Merge pull request #168 from pommedeterresautee/master

xgboost simplified documentation + dump function performance optimization (for big model)
This commit is contained in:
Tianqi Chen 2015-02-09 09:05:57 -08:00
commit a3cf30592f
7 changed files with 133 additions and 34 deletions

View File

@ -26,6 +26,7 @@ importFrom(data.table,":=")
importFrom(data.table,as.data.table) importFrom(data.table,as.data.table)
importFrom(data.table,copy) importFrom(data.table,copy)
importFrom(data.table,data.table) importFrom(data.table,data.table)
importFrom(data.table,fread)
importFrom(data.table,rbindlist) importFrom(data.table,rbindlist)
importFrom(data.table,set) importFrom(data.table,set)
importFrom(data.table,setnames) importFrom(data.table,setnames)

View File

@ -3,8 +3,10 @@
#' Save a xgboost model to text file. Could be parsed later. #' Save a xgboost model to text file. Could be parsed later.
#' #'
#' @importFrom magrittr %>% #' @importFrom magrittr %>%
#' @importFrom stringr str_split
#' @importFrom stringr str_replace #' @importFrom stringr str_replace
#' @importFrom data.table fread
#' @importFrom data.table :=
#' @importFrom data.table setnames
#' @param model the model object. #' @param model the model object.
#' @param fname the name of the text file where to save the model text dump. If not provided or set to \code{NULL} the function will return the model as a \code{character} vector. #' @param fname the name of the text file where to save the model text dump. If not provided or set to \code{NULL} the function will return the model as a \code{character} vector.
#' @param fmap feature map file representing the type of feature. #' @param fmap feature map file representing the type of feature.
@ -46,12 +48,17 @@ xgb.dump <- function(model = NULL, fname = NULL, fmap = "", with.stats=FALSE) {
stop("fmap: argument must be type character (when provided)") stop("fmap: argument must be type character (when provided)")
} }
result <- .Call("XGBoosterDumpModel_R", model, fmap, as.integer(with.stats), PACKAGE = "xgboost") longString <- .Call("XGBoosterDumpModel_R", model, fmap, as.integer(with.stats), PACKAGE = "xgboost")
dt <- fread(paste(longString, collapse = ""), sep = "\n", header = F)
setnames(dt, "Content")
if(is.null(fname)) { if(is.null(fname)) {
return(str_split(result, "\n") %>% unlist %>% str_replace("^\t+","") %>% Filter(function(x) x != "", .)) result <- dt[Content != "0"][,Content := str_replace(Content, "^\t+", "")][Content != ""][,paste(Content)]
return(result)
} else { } else {
result %>% str_split("\n") %>% unlist %>% Filter(function(x) x != "", .) %>% writeLines(fname) result <- dt[Content != "0"][Content != ""][,paste(Content)] %>% writeLines(fname)
return(TRUE) return(TRUE)
} }
} }

View File

@ -73,7 +73,7 @@ xgb.importance <- function(feature_names = NULL, filename_dump = NULL, model = N
} }
treeDump <- function(feature_names, text){ treeDump <- function(feature_names, text){
result <- xgb.model.dt.tree(feature_names = feature_names, text = text)[Feature!="Leaf",.(Gain = sum(Quality), Cover = sum(Cover), Frequence = .N), by = Feature][,`:=`(Gain=Gain/sum(Gain),Cover=Cover/sum(Cover),Frequence=Frequence/sum(Frequence))][order(-Gain)] result <- xgb.model.dt.tree(feature_names = feature_names, text = text)[Feature!="Leaf",.(Gain = sum(Quality), Cover = sum(Cover), Frequence = .N), by = Feature][,`:=`(Gain = Gain/sum(Gain), Cover = Cover/sum(Cover), Frequence = Frequence/sum(Frequence))]
result result
} }

View File

@ -37,6 +37,7 @@
#' \item \code{Quality}: it's the gain related to the split in this specific node ; #' \item \code{Quality}: it's the gain related to the split in this specific node ;
#' \item \code{Cover}: metric to measure the number of observation affected by the split ; #' \item \code{Cover}: metric to measure the number of observation affected by the split ;
#' \item \code{Tree}: ID of the tree. It is included in the main ID ; #' \item \code{Tree}: ID of the tree. It is included in the main ID ;
#' \item \code{Yes.X} or \code{No.X}: data related to the pointer in \code{Yes} or \code{No} column ;
#' } #' }
#' #'
#' @examples #' @examples

View File

@ -6,33 +6,78 @@
#' \code{xgb.DMatrix}. #' \code{xgb.DMatrix}.
#' @param label the response variable. User should not set this field, #' @param label the response variable. User should not set this field,
#' if data is local data file or \code{xgb.DMatrix}. #' if data is local data file or \code{xgb.DMatrix}.
#' @param params the list of parameters. Commonly used ones are: #' @param params the list of parameters.
#'
#' 1. General Parameters
#'
#' \itemize{ #' \itemize{
#' \item \code{objective} objective function, common ones are #' \item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree}
#' \itemize{ #' \item \code{silent} 0 means printing running messages, 1 means silent mode. Default: 0
#' \item \code{reg:linear} linear regression #' }
#' \item \code{binary:logistic} logistic regression for classification #'
#' } #' 2. Booster Parameters
#' \item \code{eta} step size of each boosting step #'
#' \item \code{max.depth} maximum depth of the tree #' 2.1. Parameter for Tree Booster
#' \item \code{nthread} number of thread used in training, if not set, all threads are used #'
#' \itemize{
#' \item \code{eta} step size shrinkage used in update to prevents overfitting. After each boosting step, we can directly get the weights of new features. and eta actually shrinkage the feature weights to make the boosting process more conservative. Default: 0.3
#' \item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
#' \item \code{max_depth} maximum depth of a tree. Default: 6
#' \item \code{min_child_weight} minimum sum of instance weight(hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
#' \item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. Default: 1
#' \item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
#' }
#'
#' 2.2. Parameter for Linear Booster
#'
#' \itemize{
#' \item \code{lambda} L2 regularization term on weights. Default: 0
#' \item \code{lambda_bias} L2 regularization term on bias. Default: 0
#' \item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
#' }
#'
#' 3. Task Parameters
#'
#' \itemize{
#' \item \code{objective} specify the learning task and the corresponding learning objective, and the objective options are below:
#' \itemize{
#' \item \code{reg:linear} linear regression (Default).
#' \item \code{reg:logistic} logistic regression.
#' \item \code{binary:logistic} logistic regression for binary classification. Output probability.
#' \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
#' \item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective, you also need to set num_class(number of classes).
#' \item \code{multi:softprob} same as softmax, but output a vector of ndata * nclass, which can be further reshaped to ndata, nclass matrix. The result contains predicted probability of each data point belonging to each class.
#' \item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
#' }
#' \item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
#' \item \code{eval_metric} evaluation metrics for validation data. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
#' } #' }
#' #'
#' See \url{https://github.com/tqchen/xgboost/wiki/Parameters} for
#' further details. See also demo/ for walkthrough example in R.
#' @param nrounds the max number of iterations #' @param nrounds the max number of iterations
#' @param verbose If 0, xgboost will stay silent. If 1, xgboost will print #' @param verbose If 0, xgboost will stay silent. If 1, xgboost will print
#' information of performance. If 2, xgboost will print information of both #' information of performance. If 2, xgboost will print information of both
#' performance and construction progress information #' performance and construction progress information
#' @param missing Missing is only used when input is dense matrix, pick a float #' @param missing Missing is only used when input is dense matrix, pick a float
#' value that represents missing value. Sometime a data use 0 or other extreme value to represents missing values. #' value that represents missing value. Sometimes a data use 0 or other extreme value to represents missing values.
#' @param ... other parameters to pass to \code{params}. #' @param ... other parameters to pass to \code{params}.
#' #'
#' @details #' @details
#' This is the modeling function for xgboost. #' This is the modeling function for xgboost.
#' #'
#' Parallelization is automatically enabled if OpenMP is present. #' Parallelization is automatically enabled if OpenMP is present.
#' Number of threads can also be manually specified via "nthread" parameter #' Number of threads can also be manually specified via "nthread" parameter.
#'
#' \code{eval_metric} is set automatically by xgboost but can be overriden by parameter. Below is provided the list of different metric optimized by xgboost to help you to understand how it works inside. It should not be overriden until you have a real reason to do so.
#' \itemize{
#' \item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error}
#' \item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood}
#' \item \code{error} Binary classification error rate. It is calculated as \code{(wrong cases) / (all cases)}. For the predictions, the evaluation will regard the instances with prediction value larger than 0.5 as positive instances, and the others as negative instances.
#' \item \code{merror} Multiclass classification error rate. It is calculated as \code{(wrong cases) / (all cases)}.
#' \item \code{auc} Area under the curve. \url{http://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
#' \item \code{ndcg} Normalized Discounted Cumulative Gain. \url{http://en.wikipedia.org/wiki/NDCG}
#' }
#'
#' More parameters are available in the Wiki \url{https://github.com/tqchen/xgboost/wiki/Parameters}.
#' #'
#' @examples #' @examples
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package='xgboost')

View File

@ -39,6 +39,7 @@ The content of the \code{data.table} is organised that way:
\item \code{Quality}: it's the gain related to the split in this specific node ; \item \code{Quality}: it's the gain related to the split in this specific node ;
\item \code{Cover}: metric to measure the number of observation affected by the split ; \item \code{Cover}: metric to measure the number of observation affected by the split ;
\item \code{Tree}: ID of the tree. It is included in the main ID ; \item \code{Tree}: ID of the tree. It is included in the main ID ;
\item \code{Yes.X} or \code{No.X}: data related to the pointer in \code{Yes} or \code{No} column ;
} }
} }
\examples{ \examples{

View File

@ -15,22 +15,54 @@ xgboost(data = NULL, label = NULL, missing = NULL, params = list(),
if data is local data file or \code{xgb.DMatrix}.} if data is local data file or \code{xgb.DMatrix}.}
\item{missing}{Missing is only used when input is dense matrix, pick a float \item{missing}{Missing is only used when input is dense matrix, pick a float
value that represents missing value. Sometime a data use 0 or other extreme value to represents missing values.} value that represents missing value. Sometimes a data use 0 or other extreme value to represents missing values.}
\item{params}{the list of parameters.
1. General Parameters
\item{params}{the list of parameters. Commonly used ones are:
\itemize{ \itemize{
\item \code{objective} objective function, common ones are \item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree}
\itemize{ \item \code{silent} 0 means printing running messages, 1 means silent mode. Default: 0
\item \code{reg:linear} linear regression
\item \code{binary:logistic} logistic regression for classification
}
\item \code{eta} step size of each boosting step
\item \code{max.depth} maximum depth of the tree
\item \code{nthread} number of thread used in training, if not set, all threads are used
} }
See \url{https://github.com/tqchen/xgboost/wiki/Parameters} for 2. Booster Parameters
further details. See also demo/ for walkthrough example in R.}
2.1. Parameter for Tree Booster
\itemize{
\item \code{eta} step size shrinkage used in update to prevents overfitting. After each boosting step, we can directly get the weights of new features. and eta actually shrinkage the feature weights to make the boosting process more conservative. Default: 0.3
\item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
\item \code{max_depth} maximum depth of a tree. Default: 6
\item \code{min_child_weight} minimum sum of instance weight(hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
\item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. Default: 1
\item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
}
2.2. Parameter for Linear Booster
\itemize{
\item \code{lambda} L2 regularization term on weights. Default: 0
\item \code{lambda_bias} L2 regularization term on bias. Default: 0
\item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
}
3. Task Parameters
\itemize{
\item \code{objective} specify the learning task and the corresponding learning objective, and the objective options are below:
\itemize{
\item \code{reg:linear} linear regression (Default).
\item \code{reg:logistic} logistic regression.
\item \code{binary:logistic} logistic regression for binary classification. Output probability.
\item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
\item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective, you also need to set num_class(number of classes).
\item \code{multi:softprob} same as softmax, but output a vector of ndata * nclass, which can be further reshaped to ndata, nclass matrix. The result contains predicted probability of each data point belonging to each class.
\item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
}
\item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
\item \code{eval_metric} evaluation metrics for validation data. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
}}
\item{nrounds}{the max number of iterations} \item{nrounds}{the max number of iterations}
@ -47,7 +79,19 @@ A simple interface for xgboost in R
This is the modeling function for xgboost. This is the modeling function for xgboost.
Parallelization is automatically enabled if OpenMP is present. Parallelization is automatically enabled if OpenMP is present.
Number of threads can also be manually specified via "nthread" parameter Number of threads can also be manually specified via "nthread" parameter.
\code{eval_metric} is set automatically by xgboost but can be overriden by parameter. Below is provided the list of different metric optimized by xgboost to help you to understand how it works inside. It should not be overriden until you have a real reason to do so.
\itemize{
\item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error}
\item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood}
\item \code{error} Binary classification error rate. It is calculated as \code{(wrong cases) / (all cases)}. For the predictions, the evaluation will regard the instances with prediction value larger than 0.5 as positive instances, and the others as negative instances.
\item \code{merror} Multiclass classification error rate. It is calculated as \code{(wrong cases) / (all cases)}.
\item \code{auc} Area under the curve. \url{http://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
\item \code{ndcg} Normalized Discounted Cumulative Gain. \url{http://en.wikipedia.org/wiki/NDCG}
}
More parameters are available in the Wiki \url{https://github.com/tqchen/xgboost/wiki/Parameters}.
} }
\examples{ \examples{
data(agaricus.train, package='xgboost') data(agaricus.train, package='xgboost')