diff --git a/R-package/R/predict.xgb.Booster.R b/R-package/R/predict.xgb.Booster.R index 122e116c7..49f1ad4f0 100644 --- a/R-package/R/predict.xgb.Booster.R +++ b/R-package/R/predict.xgb.Booster.R @@ -7,6 +7,8 @@ setClass("xgb.Booster") #' @param object Object of class "xgb.Boost" #' @param newdata takes \code{matrix}, \code{dgCMatrix}, local data file or #' \code{xgb.DMatrix}. +#' @param missing Missing is only used when input is dense matrix, pick a float +# value that represents missing value. Sometime a data use 0 or other extreme value to represents missing values. #' @param outputmargin whether the prediction should be shown in the original #' value of sum of functions, when outputmargin=TRUE, the prediction is #' untransformed margin value. In logistic regression, outputmargin=T will diff --git a/R-package/R/xgb.cv.R b/R-package/R/xgb.cv.R index 0aae574fb..c60d9d96f 100644 --- a/R-package/R/xgb.cv.R +++ b/R-package/R/xgb.cv.R @@ -32,7 +32,7 @@ #' @param nfold number of folds used #' @param label option field, when data is Matrix #' @param missing Missing is only used when input is dense matrix, pick a float -#' value that represents missing value. Sometime a data use 0 or other extreme value to represents missing values. +# value that represents missing value. Sometime a data use 0 or other extreme value to represents missing values. #' @param prediction A logical value indicating whether to return the prediction vector. #' @param showsd \code{boolean}, whether show standard deviation of cross validation #' @param metrics, list of evaluation metrics to be used in corss validation, @@ -50,8 +50,6 @@ #' @param feval custimized evaluation function. Returns #' \code{list(metric='metric-name', value='metric-value')} with given #' prediction and dtrain, -#' @param missing Missing is only used when input is dense matrix, pick a float -# value that represents missing value. Sometime a data use 0 or other extreme value to represents missing values. #' @param verbose \code{boolean}, print the statistics during the process. #' @param ... other parameters to pass to \code{params}. #' diff --git a/R-package/R/xgb.importance.R b/R-package/R/xgb.importance.R index c2688848b..3b7f69401 100644 --- a/R-package/R/xgb.importance.R +++ b/R-package/R/xgb.importance.R @@ -33,7 +33,7 @@ #' data(agaricus.test, package='xgboost') #' #' #Both dataset are list with two items, a sparse matrix and labels -#' (labels = outcome column which will be learned). +#' #(labels = outcome column which will be learned). #' #Each column of the sparse Matrix is a feature in one hot encoding format. #' train <- agaricus.train #' test <- agaricus.test diff --git a/R-package/R/xgb.model.dt.tree.R b/R-package/R/xgb.model.dt.tree.R index b67597126..e77208720 100644 --- a/R-package/R/xgb.model.dt.tree.R +++ b/R-package/R/xgb.model.dt.tree.R @@ -43,7 +43,7 @@ #' data(agaricus.train, package='xgboost') #' #' #Both dataset are list with two items, a sparse matrix and labels -#' (labels = outcome column which will be learned). +#' #(labels = outcome column which will be learned). #' #Each column of the sparse Matrix is a feature in one hot encoding format. #' train <- agaricus.train #' diff --git a/R-package/R/xgb.plot.tree.R b/R-package/R/xgb.plot.tree.R index 443446916..b5f7a959e 100644 --- a/R-package/R/xgb.plot.tree.R +++ b/R-package/R/xgb.plot.tree.R @@ -43,7 +43,7 @@ #' data(agaricus.train, package='xgboost') #' #' #Both dataset are list with two items, a sparse matrix and labels -#' (labels = outcome column which will be learned). +#' #(labels = outcome column which will be learned). #' #Each column of the sparse Matrix is a feature in one hot encoding format. #' train <- agaricus.train #' diff --git a/R-package/man/predict-xgb.Booster-method.Rd b/R-package/man/predict-xgb.Booster-method.Rd index afa0c70a5..d8da7975e 100644 --- a/R-package/man/predict-xgb.Booster-method.Rd +++ b/R-package/man/predict-xgb.Booster-method.Rd @@ -6,7 +6,7 @@ \title{Predict method for eXtreme Gradient Boosting model} \usage{ \S4method{predict}{xgb.Booster}(object, newdata, missing = NULL, - outputmargin = FALSE, ntreelimit = NULL) + outputmargin = FALSE, ntreelimit = NULL, predleaf = FALSE) } \arguments{ \item{object}{Object of class "xgb.Boost"} @@ -14,6 +14,8 @@ \item{newdata}{takes \code{matrix}, \code{dgCMatrix}, local data file or \code{xgb.DMatrix}.} +\item{missing}{Missing is only used when input is dense matrix, pick a float} + \item{outputmargin}{whether the prediction should be shown in the original value of sum of functions, when outputmargin=TRUE, the prediction is untransformed margin value. In logistic regression, outputmargin=T will @@ -22,6 +24,8 @@ output value before logistic transformation.} \item{ntreelimit}{limit number of trees used in prediction, this parameter is only valid for gbtree, but not for gblinear. set it to be value bigger than 0. It will use all trees by default.} + +\item{predleaf}{whether predict leaf index instead. If set to TRUE, the output will be a matrix object.} } \description{ Predicted values based on xgboost model object. diff --git a/R-package/man/xgb.cv.Rd b/R-package/man/xgb.cv.Rd index 7ba5eb727..0867134ae 100644 --- a/R-package/man/xgb.cv.Rd +++ b/R-package/man/xgb.cv.Rd @@ -5,8 +5,8 @@ \title{Cross Validation} \usage{ xgb.cv(params = list(), data, nrounds, nfold, label = NULL, - missing = NULL, showsd = TRUE, metrics = list(), obj = NULL, - feval = NULL, verbose = T, ...) + missing = NULL, prediction = FALSE, showsd = TRUE, metrics = list(), + obj = NULL, feval = NULL, verbose = T, ...) } \arguments{ \item{params}{the list of parameters. Commonly used ones are: @@ -34,6 +34,8 @@ xgb.cv(params = list(), data, nrounds, nfold, label = NULL, \item{missing}{Missing is only used when input is dense matrix, pick a float} +\item{prediction}{A logical value indicating whether to return the prediction vector.} + \item{showsd}{\code{boolean}, whether show standard deviation of cross validation} \item{metrics,}{list of evaluation metrics to be used in corss validation, diff --git a/R-package/man/xgb.dump.Rd b/R-package/man/xgb.dump.Rd index 473227357..7958a72e8 100644 --- a/R-package/man/xgb.dump.Rd +++ b/R-package/man/xgb.dump.Rd @@ -37,7 +37,7 @@ test <- agaricus.test bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nround = 2,objective = "binary:logistic") # save the model in file 'xgb.model.dump' -xgb.dump(bst, 'xgb.model.dump', with.stats = T) +xgb.dump(bst, 'xgb.model.dump', with.stats = TRUE) # print the model without saving it to a file print(xgb.dump(bst)) diff --git a/R-package/man/xgb.importance.Rd b/R-package/man/xgb.importance.Rd index c173b1e8e..1b2946729 100644 --- a/R-package/man/xgb.importance.Rd +++ b/R-package/man/xgb.importance.Rd @@ -38,7 +38,8 @@ There are 3 columns : data(agaricus.train, package='xgboost') data(agaricus.test, package='xgboost') -#Both dataset are list with two items, a sparse matrix and labels (labels = outcome column which will be learned). +#Both dataset are list with two items, a sparse matrix and labels +#(labels = outcome column which will be learned). #Each column of the sparse Matrix is a feature in one hot encoding format. train <- agaricus.train test <- agaricus.test diff --git a/R-package/man/xgb.model.dt.tree.Rd b/R-package/man/xgb.model.dt.tree.Rd index fb5bd94bd..0030fb14d 100644 --- a/R-package/man/xgb.model.dt.tree.Rd +++ b/R-package/man/xgb.model.dt.tree.Rd @@ -44,7 +44,8 @@ The content of the \code{data.table} is organised that way: \examples{ data(agaricus.train, package='xgboost') -#Both dataset are list with two items, a sparse matrix and labels (labels = outcome column which will be learned). +#Both dataset are list with two items, a sparse matrix and labels +#(labels = outcome column which will be learned). #Each column of the sparse Matrix is a feature in one hot encoding format. train <- agaricus.train diff --git a/R-package/man/xgb.plot.tree.Rd b/R-package/man/xgb.plot.tree.Rd index ce69d4431..8aec827ec 100644 --- a/R-package/man/xgb.plot.tree.Rd +++ b/R-package/man/xgb.plot.tree.Rd @@ -5,7 +5,7 @@ \title{Plot a boosted tree model} \usage{ xgb.plot.tree(feature_names = NULL, filename_dump = NULL, model = NULL, - n_first_tree = NULL, CSSstyle = NULL) + n_first_tree = NULL, CSSstyle = NULL, width = NULL, height = NULL) } \arguments{ \item{feature_names}{names of each feature as a character vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.} @@ -17,6 +17,10 @@ xgb.plot.tree(feature_names = NULL, filename_dump = NULL, model = NULL, \item{n_first_tree}{limit the plot to the n first trees. If \code{NULL}, all trees of the model are plotted. Performance can be low for huge models.} \item{CSSstyle}{a \code{character} vector storing a css style to customize the appearance of nodes. Look at the \href{https://github.com/knsv/mermaid/wiki}{Mermaid wiki} for more information.} + +\item{width}{the width of the diagram in pixels.} + +\item{height}{the height of the diagram in pixels.} } \value{ A \code{DiagrammeR} of the model. @@ -40,7 +44,8 @@ It uses \href{https://github.com/knsv/mermaid/}{Mermaid} library for that purpos \examples{ data(agaricus.train, package='xgboost') -#Both dataset are list with two items, a sparse matrix and labels (labels = outcome column which will be learned). +#Both dataset are list with two items, a sparse matrix and labels +#(labels = outcome column which will be learned). #Each column of the sparse Matrix is a feature in one hot encoding format. train <- agaricus.train