From 9b6a14a99d685f0c313197f1b65c945d22627d7d Mon Sep 17 00:00:00 2001 From: El Potaeto Date: Mon, 29 Dec 2014 23:56:31 +0100 Subject: [PATCH] regeneration of documentation --- R-package/man/xgb.dump.Rd | 17 +++++++++++------ R-package/man/xgb.importance.Rd | 19 ++++++++++++------- 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/R-package/man/xgb.dump.Rd b/R-package/man/xgb.dump.Rd index 9be2696b9..bcecc6abd 100644 --- a/R-package/man/xgb.dump.Rd +++ b/R-package/man/xgb.dump.Rd @@ -4,7 +4,7 @@ \alias{xgb.dump} \title{Save xgboost model to text file} \usage{ -xgb.dump(model, fname, fmap = "") +xgb.dump(model, fname, fmap = "", with.stats = FALSE) } \arguments{ \item{model}{the model object.} @@ -12,11 +12,16 @@ xgb.dump(model, fname, fmap = "") \item{fname}{the name of the binary file.} \item{fmap}{feature map file representing the type of feature. - Detailed description could be found at - \url{https://github.com/tqchen/xgboost/wiki/Binary-Classification#dump-model}. - See demo/ for walkthrough example in R, and - \url{https://github.com/tqchen/xgboost/blob/master/demo/data/featmap.txt} - for example Format.} +Detailed description could be found at +\url{https://github.com/tqchen/xgboost/wiki/Binary-Classification#dump-model}. +See demo/ for walkthrough example in R, and +\url{https://github.com/tqchen/xgboost/blob/master/demo/data/featmap.txt} +for example Format.} + +\item{with.stats}{whether dump statistics of splits + When this option is on, the model dump comes with two additional statistics: + gain is the approximate loss function gain we get in each split; + cover is the sum of second order gradient in each node.} } \description{ Save a xgboost model to text file. Could be parsed later. diff --git a/R-package/man/xgb.importance.Rd b/R-package/man/xgb.importance.Rd index 9609fe82f..7d02315f9 100644 --- a/R-package/man/xgb.importance.Rd +++ b/R-package/man/xgb.importance.Rd @@ -4,30 +4,35 @@ \alias{xgb.importance} \title{Show importance of features in a model} \usage{ -xgb.importance(feature_names, filename_dump) +xgb.importance(feature_names = NULL, filename_dump = NULL) } \arguments{ -\item{feature_names}{names of each feature as a character vector. Can be extracted from a sparse matrix.} +\item{feature_names}{names of each feature as a character vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.} -\item{filename_dump}{the name of the text file.} +\item{filename_dump}{the path to the text file storing the model.} } \description{ -Read a xgboost model in text file format. Return a data.table of the features with their weight. +Read a xgboost model in text file format. +Can be tree or linear model (text dump of linear model are only supported in dev version of Xgboost for now). +} +\details{ +Return a data.table of the features with their weight. +#' } \examples{ data(agaricus.train, package='xgboost') data(agaricus.test, package='xgboost') -#Both dataset are list with two items, a sparse matrix and labels (outcome column which will be learned). +#Both dataset are list with two items, a sparse matrix and labels (labels = outcome column which will be learned). #Each column of the sparse Matrix is a feature in one hot encoding format. train <- agaricus.train test <- agaricus.test bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nround = 2,objective = "binary:logistic") -xgb.dump(bst, 'xgb.model.dump') +xgb.dump(bst, 'xgb.model.dump', with.stats = T) -#agaricus.test$data@Dimnames[[2]] represents the column name of the sparse matrix. +#agaricus.test$data@Dimnames[[2]] represents the column names of the sparse matrix. xgb.importance(agaricus.test$data@Dimnames[[2]], 'xgb.model.dump') }