From 0052b193cf47a2482e209dad8b90c41393b3f85f Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 7 Nov 2015 21:01:28 +0100 Subject: [PATCH] Update lib version dependencies (for DiagrammeR mainly) Fix @export tag in each R file (for Roxygen 5, otherwise it doesn't work anymore) Regerate Roxygen doc --- R-package/DESCRIPTION | 23 +++++----- R-package/NAMESPACE | 6 ++- R-package/R/getinfo.xgb.DMatrix.R | 1 - R-package/R/predict.xgb.Booster.R | 1 - R-package/R/setinfo.xgb.DMatrix.R | 1 - R-package/R/slice.xgb.DMatrix.R | 1 - R-package/R/xgb.DMatrix.R | 1 - R-package/R/xgb.DMatrix.save.R | 1 - R-package/R/xgb.cv.R | 1 - R-package/R/xgb.dump.R | 1 - R-package/R/xgb.load.R | 1 - R-package/R/xgb.save.R | 1 - R-package/R/xgb.save.raw.R | 1 - R-package/R/xgb.train.R | 1 - R-package/R/xgboost.R | 1 - R-package/man/agaricus.test.Rd | 8 ++-- R-package/man/agaricus.train.Rd | 8 ++-- R-package/man/getinfo.Rd | 2 +- R-package/man/nrow-xgb.DMatrix-method.Rd | 3 +- R-package/man/predict-xgb.Booster-method.Rd | 12 +++--- .../man/predict-xgb.Booster.handle-method.Rd | 2 +- R-package/man/setinfo.Rd | 2 +- R-package/man/slice.Rd | 2 +- R-package/man/xgb.DMatrix.Rd | 4 +- R-package/man/xgb.DMatrix.save.Rd | 2 +- R-package/man/xgb.cv.Rd | 20 ++++----- R-package/man/xgb.dump.Rd | 18 ++++---- R-package/man/xgb.importance.Rd | 13 +++--- R-package/man/xgb.load.Rd | 4 +- R-package/man/xgb.model.dt.tree.Rd | 9 ++-- R-package/man/xgb.plot.importance.Rd | 9 ++-- R-package/man/xgb.plot.tree.Rd | 19 ++++---- R-package/man/xgb.save.Rd | 4 +- R-package/man/xgb.save.raw.Rd | 4 +- R-package/man/xgb.train.Rd | 43 ++++++++++--------- R-package/man/xgboost.Rd | 19 ++++---- 36 files changed, 123 insertions(+), 126 deletions(-) diff --git a/R-package/DESCRIPTION b/R-package/DESCRIPTION index 59728f3c2..b4201e793 100644 --- a/R-package/DESCRIPTION +++ b/R-package/DESCRIPTION @@ -3,16 +3,16 @@ Type: Package Title: Extreme Gradient Boosting Version: 0.4-2 Date: 2015-08-01 -Author: Tianqi Chen , Tong He , Michael Benesty +Author: Tianqi Chen , Tong He , + Michael Benesty Maintainer: Tong He -Description: Extreme Gradient Boosting, which is an - efficient implementation of gradient boosting framework. - This package is its R interface. The package includes efficient - linear model solver and tree learning algorithms. The package can automatically - do parallel computation on a single machine which could be more than 10 times faster - than existing gradient boosting packages. It supports various - objective functions, including regression, classification and ranking. The - package is made to be extensible, so that users are also allowed to define +Description: Extreme Gradient Boosting, which is an efficient implementation + of gradient boosting framework. This package is its R interface. The package + includes efficient linear model solver and tree learning algorithms. The package + can automatically do parallel computation on a single machine which could be + more than 10 times faster than existing gradient boosting packages. It supports + various objective functions, including regression, classification and ranking. + The package is made to be extensible, so that users are also allowed to define their own objectives easily. License: Apache License (== 2.0) | file LICENSE URL: https://github.com/dmlc/xgboost @@ -21,7 +21,7 @@ VignetteBuilder: knitr Suggests: knitr, ggplot2 (>= 1.0.0), - DiagrammeR (>= 0.6), + DiagrammeR (>= 0.8.1), Ckmeans.1d.dp (>= 3.3.1), vcd (>= 1.3), testthat @@ -30,6 +30,7 @@ Depends: Imports: Matrix (>= 1.1-0), methods, - data.table (>= 1.9.4), + data.table (>= 1.9.6), magrittr (>= 1.5), stringr (>= 0.6.2) +RoxygenNote: 5.0.0 diff --git a/R-package/NAMESPACE b/R-package/NAMESPACE index a4f07799a..f3a7390b7 100644 --- a/R-package/NAMESPACE +++ b/R-package/NAMESPACE @@ -1,4 +1,4 @@ -# Generated by roxygen2 (4.1.1): do not edit by hand +# Generated by roxygen2: do not edit by hand export(getinfo) export(setinfo) @@ -21,6 +21,10 @@ exportMethods(predict) import(methods) importClassesFrom(Matrix,dgCMatrix) importClassesFrom(Matrix,dgeMatrix) +importFrom(DiagrammeR,create_edges) +importFrom(DiagrammeR,create_graph) +importFrom(DiagrammeR,create_nodes) +importFrom(DiagrammeR,render_graph) importFrom(Matrix,cBind) importFrom(Matrix,colSums) importFrom(Matrix,sparseVector) diff --git a/R-package/R/getinfo.xgb.DMatrix.R b/R-package/R/getinfo.xgb.DMatrix.R index dc734bce1..3000a1e7d 100644 --- a/R-package/R/getinfo.xgb.DMatrix.R +++ b/R-package/R/getinfo.xgb.DMatrix.R @@ -23,7 +23,6 @@ setClass('xgb.DMatrix') #' stopifnot(all(labels2 == 1-labels)) #' @rdname getinfo #' @export -#' getinfo <- function(object, ...){ UseMethod("getinfo") } diff --git a/R-package/R/predict.xgb.Booster.R b/R-package/R/predict.xgb.Booster.R index 432581e76..abdb94e75 100644 --- a/R-package/R/predict.xgb.Booster.R +++ b/R-package/R/predict.xgb.Booster.R @@ -29,7 +29,6 @@ setClass("xgb.Booster", #' eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") #' pred <- predict(bst, test$data) #' @export -#' setMethod("predict", signature = "xgb.Booster", definition = function(object, newdata, missing = NA, outputmargin = FALSE, ntreelimit = NULL, predleaf = FALSE) { diff --git a/R-package/R/setinfo.xgb.DMatrix.R b/R-package/R/setinfo.xgb.DMatrix.R index 4bee161b7..427de08d4 100644 --- a/R-package/R/setinfo.xgb.DMatrix.R +++ b/R-package/R/setinfo.xgb.DMatrix.R @@ -21,7 +21,6 @@ #' stopifnot(all(labels2 == 1-labels)) #' @rdname setinfo #' @export -#' setinfo <- function(object, ...){ UseMethod("setinfo") } diff --git a/R-package/R/slice.xgb.DMatrix.R b/R-package/R/slice.xgb.DMatrix.R index 3b025e1dd..4626c2b4d 100644 --- a/R-package/R/slice.xgb.DMatrix.R +++ b/R-package/R/slice.xgb.DMatrix.R @@ -13,7 +13,6 @@ setClass('xgb.DMatrix') #' dsub <- slice(dtrain, 1:3) #' @rdname slice #' @export -#' slice <- function(object, ...){ UseMethod("slice") } diff --git a/R-package/R/xgb.DMatrix.R b/R-package/R/xgb.DMatrix.R index 20a3276c0..c34c65d95 100644 --- a/R-package/R/xgb.DMatrix.R +++ b/R-package/R/xgb.DMatrix.R @@ -17,7 +17,6 @@ #' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data') #' dtrain <- xgb.DMatrix('xgb.DMatrix.data') #' @export -#' xgb.DMatrix <- function(data, info = list(), missing = NA, ...) { if (typeof(data) == "character") { handle <- .Call("XGDMatrixCreateFromFile_R", data, as.integer(FALSE), diff --git a/R-package/R/xgb.DMatrix.save.R b/R-package/R/xgb.DMatrix.save.R index 7a9ac611d..63a0be691 100644 --- a/R-package/R/xgb.DMatrix.save.R +++ b/R-package/R/xgb.DMatrix.save.R @@ -12,7 +12,6 @@ #' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data') #' dtrain <- xgb.DMatrix('xgb.DMatrix.data') #' @export -#' xgb.DMatrix.save <- function(DMatrix, fname) { if (typeof(fname) != "character") { stop("xgb.save: fname must be character") diff --git a/R-package/R/xgb.cv.R b/R-package/R/xgb.cv.R index 5f964c4f8..89edbeb63 100644 --- a/R-package/R/xgb.cv.R +++ b/R-package/R/xgb.cv.R @@ -90,7 +90,6 @@ #' max.depth =3, eta = 1, objective = "binary:logistic") #' print(history) #' @export -#' xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing = NA, prediction = FALSE, showsd = TRUE, metrics=list(), obj = NULL, feval = NULL, stratified = TRUE, folds = NULL, verbose = T, print.every.n=1L, diff --git a/R-package/R/xgb.dump.R b/R-package/R/xgb.dump.R index 856ec0888..b39359abd 100644 --- a/R-package/R/xgb.dump.R +++ b/R-package/R/xgb.dump.R @@ -36,7 +36,6 @@ #' # print the model without saving it to a file #' print(xgb.dump(bst)) #' @export -#' xgb.dump <- function(model = NULL, fname = NULL, fmap = "", with.stats=FALSE) { if (class(model) != "xgb.Booster") { stop("model: argument must be type xgb.Booster") diff --git a/R-package/R/xgb.load.R b/R-package/R/xgb.load.R index 2a2598dd8..03d6a4842 100644 --- a/R-package/R/xgb.load.R +++ b/R-package/R/xgb.load.R @@ -15,7 +15,6 @@ #' bst <- xgb.load('xgb.model') #' pred <- predict(bst, test$data) #' @export -#' xgb.load <- function(modelfile) { if (is.null(modelfile)) stop("xgb.load: modelfile cannot be NULL") diff --git a/R-package/R/xgb.save.R b/R-package/R/xgb.save.R index ad3cc8b12..7d595ddc6 100644 --- a/R-package/R/xgb.save.R +++ b/R-package/R/xgb.save.R @@ -16,7 +16,6 @@ #' bst <- xgb.load('xgb.model') #' pred <- predict(bst, test$data) #' @export -#' xgb.save <- function(model, fname) { if (typeof(fname) != "character") { stop("xgb.save: fname must be character") diff --git a/R-package/R/xgb.save.raw.R b/R-package/R/xgb.save.raw.R index e885e6e7e..e61303add 100644 --- a/R-package/R/xgb.save.raw.R +++ b/R-package/R/xgb.save.raw.R @@ -16,7 +16,6 @@ #' bst <- xgb.load(raw) #' pred <- predict(bst, test$data) #' @export -#' xgb.save.raw <- function(model) { if (class(model) == "xgb.Booster"){ model <- model$handle diff --git a/R-package/R/xgb.train.R b/R-package/R/xgb.train.R index 07bf74589..ffc94e34f 100644 --- a/R-package/R/xgb.train.R +++ b/R-package/R/xgb.train.R @@ -120,7 +120,6 @@ #' param <- list(max.depth = 2, eta = 1, silent = 1, objective=logregobj,eval_metric=evalerror) #' bst <- xgb.train(param, dtrain, nthread = 2, nround = 2, watchlist) #' @export -#' xgb.train <- function(params=list(), data, nrounds, watchlist = list(), obj = NULL, feval = NULL, verbose = 1, print.every.n=1L, early.stop.round = NULL, maximize = NULL, diff --git a/R-package/R/xgboost.R b/R-package/R/xgboost.R index 122d2f492..92637bb43 100644 --- a/R-package/R/xgboost.R +++ b/R-package/R/xgboost.R @@ -58,7 +58,6 @@ #' pred <- predict(bst, test$data) #' #' @export -#' xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL, params = list(), nrounds, verbose = 1, print.every.n = 1L, early.stop.round = NULL, diff --git a/R-package/man/agaricus.test.Rd b/R-package/man/agaricus.test.Rd index c54e30ba3..52ff08f86 100644 --- a/R-package/man/agaricus.test.Rd +++ b/R-package/man/agaricus.test.Rd @@ -1,10 +1,10 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/xgboost.R \docType{data} \name{agaricus.test} \alias{agaricus.test} \title{Test part from Mushroom Data Set} -\format{A list containing a label vector, and a dgCMatrix object with 1611 +\format{A list containing a label vector, and a dgCMatrix object with 1611 rows and 126 variables} \usage{ data(agaricus.test) @@ -24,8 +24,8 @@ This data set includes the following fields: \references{ https://archive.ics.uci.edu/ml/datasets/Mushroom -Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository -[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, +Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository +[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science. } \keyword{datasets} diff --git a/R-package/man/agaricus.train.Rd b/R-package/man/agaricus.train.Rd index 955257148..e27d3ac25 100644 --- a/R-package/man/agaricus.train.Rd +++ b/R-package/man/agaricus.train.Rd @@ -1,10 +1,10 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/xgboost.R \docType{data} \name{agaricus.train} \alias{agaricus.train} \title{Training part from Mushroom Data Set} -\format{A list containing a label vector, and a dgCMatrix object with 6513 +\format{A list containing a label vector, and a dgCMatrix object with 6513 rows and 127 variables} \usage{ data(agaricus.train) @@ -24,8 +24,8 @@ This data set includes the following fields: \references{ https://archive.ics.uci.edu/ml/datasets/Mushroom -Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository -[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, +Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository +[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science. } \keyword{datasets} diff --git a/R-package/man/getinfo.Rd b/R-package/man/getinfo.Rd index 87c507566..f8b4f6b99 100644 --- a/R-package/man/getinfo.Rd +++ b/R-package/man/getinfo.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getinfo.xgb.DMatrix.R \docType{methods} \name{getinfo} diff --git a/R-package/man/nrow-xgb.DMatrix-method.Rd b/R-package/man/nrow-xgb.DMatrix-method.Rd index f86709afd..1fd52b9c1 100644 --- a/R-package/man/nrow-xgb.DMatrix-method.Rd +++ b/R-package/man/nrow-xgb.DMatrix-method.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/nrow.xgb.DMatrix.R \docType{methods} \name{nrow,xgb.DMatrix-method} @@ -18,5 +18,6 @@ data(agaricus.train, package='xgboost') train <- agaricus.train dtrain <- xgb.DMatrix(train$data, label=train$label) stopifnot(nrow(dtrain) == nrow(train$data)) + } diff --git a/R-package/man/predict-xgb.Booster-method.Rd b/R-package/man/predict-xgb.Booster-method.Rd index 682df1f4b..13f37802e 100644 --- a/R-package/man/predict-xgb.Booster-method.Rd +++ b/R-package/man/predict-xgb.Booster-method.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.xgb.Booster.R \docType{methods} \name{predict,xgb.Booster-method} @@ -11,19 +11,19 @@ \arguments{ \item{object}{Object of class "xgb.Boost"} -\item{newdata}{takes \code{matrix}, \code{dgCMatrix}, local data file or +\item{newdata}{takes \code{matrix}, \code{dgCMatrix}, local data file or \code{xgb.DMatrix}.} -\item{missing}{Missing is only used when input is dense matrix, pick a float +\item{missing}{Missing is only used when input is dense matrix, pick a float value that represents missing value. Sometime a data use 0 or other extreme value to represents missing values.} \item{outputmargin}{whether the prediction should be shown in the original -value of sum of functions, when outputmargin=TRUE, the prediction is +value of sum of functions, when outputmargin=TRUE, the prediction is untransformed margin value. In logistic regression, outputmargin=T will output value before logistic transformation.} \item{ntreelimit}{limit number of trees used in prediction, this parameter is -only valid for gbtree, but not for gblinear. set it to be value bigger +only valid for gbtree, but not for gblinear. set it to be value bigger than 0. It will use all trees by default.} \item{predleaf}{whether predict leaf index instead. If set to TRUE, the output will be a matrix object.} @@ -36,7 +36,7 @@ data(agaricus.train, package='xgboost') data(agaricus.test, package='xgboost') train <- agaricus.train test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, +bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") pred <- predict(bst, test$data) } diff --git a/R-package/man/predict-xgb.Booster.handle-method.Rd b/R-package/man/predict-xgb.Booster.handle-method.Rd index 7eb237a94..34454e555 100644 --- a/R-package/man/predict-xgb.Booster.handle-method.Rd +++ b/R-package/man/predict-xgb.Booster.handle-method.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.xgb.Booster.handle.R \docType{methods} \name{predict,xgb.Booster.handle-method} diff --git a/R-package/man/setinfo.Rd b/R-package/man/setinfo.Rd index edf5284bd..cb939721e 100644 --- a/R-package/man/setinfo.Rd +++ b/R-package/man/setinfo.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/setinfo.xgb.DMatrix.R \docType{methods} \name{setinfo} diff --git a/R-package/man/slice.Rd b/R-package/man/slice.Rd index 20a78a383..b17722115 100644 --- a/R-package/man/slice.Rd +++ b/R-package/man/slice.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/slice.xgb.DMatrix.R \docType{methods} \name{slice} diff --git a/R-package/man/xgb.DMatrix.Rd b/R-package/man/xgb.DMatrix.Rd index 9432ce319..2e892cc6d 100644 --- a/R-package/man/xgb.DMatrix.Rd +++ b/R-package/man/xgb.DMatrix.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/xgb.DMatrix.R \name{xgb.DMatrix} \alias{xgb.DMatrix} @@ -7,7 +7,7 @@ xgb.DMatrix(data, info = list(), missing = NA, ...) } \arguments{ -\item{data}{a \code{matrix} object, a \code{dgCMatrix} object or a character +\item{data}{a \code{matrix} object, a \code{dgCMatrix} object or a character indicating the data file.} \item{info}{a list of information of the xgb.DMatrix object} diff --git a/R-package/man/xgb.DMatrix.save.Rd b/R-package/man/xgb.DMatrix.save.Rd index 3ba36f55a..78348c3fa 100644 --- a/R-package/man/xgb.DMatrix.save.Rd +++ b/R-package/man/xgb.DMatrix.save.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/xgb.DMatrix.save.R \name{xgb.DMatrix.save} \alias{xgb.DMatrix.save} diff --git a/R-package/man/xgb.cv.Rd b/R-package/man/xgb.cv.Rd index f918a003c..f3a1fcfd1 100644 --- a/R-package/man/xgb.cv.Rd +++ b/R-package/man/xgb.cv.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/xgb.cv.R \name{xgb.cv} \alias{xgb.cv} @@ -40,7 +40,7 @@ value that represents missing value. Sometime a data use 0 or other extreme valu \item{showsd}{\code{boolean}, whether show standard deviation of cross validation} -\item{metrics,}{list of evaluation metrics to be used in corss validation, +\item{metrics, }{list of evaluation metrics to be used in corss validation, when it is not specified, the evaluation metric is chosen according to objective function. Possible options are: \itemize{ @@ -51,11 +51,11 @@ value that represents missing value. Sometime a data use 0 or other extreme valu \item \code{merror} Exact matching error, used to evaluate multi-class classification }} -\item{obj}{customized objective function. Returns gradient and second order +\item{obj}{customized objective function. Returns gradient and second order gradient with given prediction and dtrain.} -\item{feval}{custimized evaluation function. Returns -\code{list(metric='metric-name', value='metric-value')} with given +\item{feval}{custimized evaluation function. Returns +\code{list(metric='metric-name', value='metric-value')} with given prediction and dtrain.} \item{stratified}{\code{boolean} whether sampling of folds should be stratified by the values of labels in \code{data}} @@ -67,12 +67,12 @@ If folds are supplied, the nfold and stratified parameters would be ignored.} \item{print.every.n}{Print every N progress messages when \code{verbose>0}. Default is 1 which means all messages are printed.} -\item{early.stop.round}{If \code{NULL}, the early stopping function is not triggered. -If set to an integer \code{k}, training with a validation set will stop if the performance +\item{early.stop.round}{If \code{NULL}, the early stopping function is not triggered. +If set to an integer \code{k}, training with a validation set will stop if the performance keeps getting worse consecutively for \code{k} rounds.} \item{maximize}{If \code{feval} and \code{early.stop.round} are set, then \code{maximize} must be set as well. - \code{maximize=TRUE} means the larger the evaluation score the better.} +\code{maximize=TRUE} means the larger the evaluation score the better.} \item{...}{other parameters to pass to \code{params}.} } @@ -89,9 +89,9 @@ If \code{prediction = FALSE}, just a \code{data.table} with each mean and standa The cross valudation function of xgboost } \details{ -The original sample is randomly partitioned into \code{nfold} equal size subsamples. +The original sample is randomly partitioned into \code{nfold} equal size subsamples. -Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data. +Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data. The cross-validation process is then repeated \code{nrounds} times, with each of the \code{nfold} subsamples used exactly once as the validation data. diff --git a/R-package/man/xgb.dump.Rd b/R-package/man/xgb.dump.Rd index eaf1ca521..cafa8ac14 100644 --- a/R-package/man/xgb.dump.Rd +++ b/R-package/man/xgb.dump.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/xgb.dump.R \name{xgb.dump} \alias{xgb.dump} @@ -11,17 +11,17 @@ xgb.dump(model = NULL, fname = NULL, fmap = "", with.stats = FALSE) \item{fname}{the name of the text file where to save the model text dump. If not provided or set to \code{NULL} the function will return the model as a \code{character} vector.} -\item{fmap}{feature map file representing the type of feature. -Detailed description could be found at +\item{fmap}{feature map file representing the type of feature. +Detailed description could be found at \url{https://github.com/dmlc/xgboost/wiki/Binary-Classification#dump-model}. See demo/ for walkthrough example in R, and -\url{https://github.com/dmlc/xgboost/blob/master/demo/data/featmap.txt} +\url{https://github.com/dmlc/xgboost/blob/master/demo/data/featmap.txt} for example Format.} -\item{with.stats}{whether dump statistics of splits - When this option is on, the model dump comes with two additional statistics: - gain is the approximate loss function gain we get in each split; - cover is the sum of second order gradient in each node.} +\item{with.stats}{whether dump statistics of splits +When this option is on, the model dump comes with two additional statistics: +gain is the approximate loss function gain we get in each split; +cover is the sum of second order gradient in each node.} } \value{ if fname is not provided or set to \code{NULL} the function will return the model as a \code{character} vector. Otherwise it will return \code{TRUE}. @@ -34,7 +34,7 @@ data(agaricus.train, package='xgboost') data(agaricus.test, package='xgboost') train <- agaricus.train test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, +bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") # save the model in file 'xgb.model.dump' xgb.dump(bst, 'xgb.model.dump', with.stats = TRUE) diff --git a/R-package/man/xgb.importance.Rd b/R-package/man/xgb.importance.Rd index 11740e4ac..a1ce89d4f 100644 --- a/R-package/man/xgb.importance.Rd +++ b/R-package/man/xgb.importance.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/xgb.importance.R \name{xgb.importance} \alias{xgb.importance} @@ -24,7 +24,7 @@ xgb.importance(feature_names = NULL, filename_dump = NULL, model = NULL, A \code{data.table} of the features used in the model with their average gain (and their weight for boosted tree model) in the model. } \description{ -Read a xgboost model text dump. +Read a xgboost model text dump. Can be tree or linear model (text dump of linear model are only supported in dev version of \code{Xgboost} for now). } \details{ @@ -32,7 +32,7 @@ This is the function to understand the model trained (and through your model, yo Results are returned for both linear and tree models. -\code{data.table} is returned by the function. +\code{data.table} is returned by the function. There are 3 columns : \itemize{ \item \code{Features} name of the features as provided in \code{feature_names} or already present in the model dump. @@ -53,12 +53,12 @@ If you need to remember one thing only: until you want to leave us early, don't \examples{ data(agaricus.train, package='xgboost') -# Both dataset are list with two items, a sparse matrix and labels -# (labels = outcome column which will be learned). +# Both dataset are list with two items, a sparse matrix and labels +# (labels = outcome column which will be learned). # Each column of the sparse Matrix is a feature in one hot encoding format. train <- agaricus.train -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, +bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") # train$data@Dimnames[[2]] represents the column names of the sparse matrix. @@ -66,5 +66,6 @@ xgb.importance(train$data@Dimnames[[2]], model = bst) # Same thing with co-occurence computation this time xgb.importance(train$data@Dimnames[[2]], model = bst, data = train$data, label = train$label) + } diff --git a/R-package/man/xgb.load.Rd b/R-package/man/xgb.load.Rd index 1331ff249..92576ad95 100644 --- a/R-package/man/xgb.load.Rd +++ b/R-package/man/xgb.load.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/xgb.load.R \name{xgb.load} \alias{xgb.load} @@ -17,7 +17,7 @@ data(agaricus.train, package='xgboost') data(agaricus.test, package='xgboost') train <- agaricus.train test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, +bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") xgb.save(bst, 'xgb.model') bst <- xgb.load('xgb.model') diff --git a/R-package/man/xgb.model.dt.tree.Rd b/R-package/man/xgb.model.dt.tree.Rd index c53ed057f..9a3efc39f 100644 --- a/R-package/man/xgb.model.dt.tree.Rd +++ b/R-package/man/xgb.model.dt.tree.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/xgb.model.dt.tree.R \name{xgb.model.dt.tree} \alias{xgb.model.dt.tree} @@ -45,15 +45,16 @@ The content of the \code{data.table} is organised that way: \examples{ data(agaricus.train, package='xgboost') -#Both dataset are list with two items, a sparse matrix and labels -#(labels = outcome column which will be learned). +#Both dataset are list with two items, a sparse matrix and labels +#(labels = outcome column which will be learned). #Each column of the sparse Matrix is a feature in one hot encoding format. train <- agaricus.train -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, +bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") #agaricus.test$data@Dimnames[[2]] represents the column names of the sparse matrix. xgb.model.dt.tree(agaricus.train$data@Dimnames[[2]], model = bst) + } diff --git a/R-package/man/xgb.plot.importance.Rd b/R-package/man/xgb.plot.importance.Rd index 4147278b9..de70624cb 100644 --- a/R-package/man/xgb.plot.importance.Rd +++ b/R-package/man/xgb.plot.importance.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/xgb.plot.importance.R \name{xgb.plot.importance} \alias{xgb.plot.importance} @@ -25,16 +25,17 @@ In particular you may want to override the title of the graph. To do so, add \co \examples{ data(agaricus.train, package='xgboost') -#Both dataset are list with two items, a sparse matrix and labels -#(labels = outcome column which will be learned). +#Both dataset are list with two items, a sparse matrix and labels +#(labels = outcome column which will be learned). #Each column of the sparse Matrix is a feature in one hot encoding format. train <- agaricus.train -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, +bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") #train$data@Dimnames[[2]] represents the column names of the sparse matrix. importance_matrix <- xgb.importance(train$data@Dimnames[[2]], model = bst) xgb.plot.importance(importance_matrix) + } diff --git a/R-package/man/xgb.plot.tree.Rd b/R-package/man/xgb.plot.tree.Rd index 4501d87ce..f34e75bf9 100644 --- a/R-package/man/xgb.plot.tree.Rd +++ b/R-package/man/xgb.plot.tree.Rd @@ -1,11 +1,11 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/xgb.plot.tree.R \name{xgb.plot.tree} \alias{xgb.plot.tree} \title{Plot a boosted tree model} \usage{ xgb.plot.tree(feature_names = NULL, filename_dump = NULL, model = NULL, - n_first_tree = NULL, CSSstyle = NULL, width = NULL, height = NULL) + n_first_tree = NULL, width = NULL, height = NULL) } \arguments{ \item{feature_names}{names of each feature as a character vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.} @@ -16,8 +16,6 @@ xgb.plot.tree(feature_names = NULL, filename_dump = NULL, model = NULL, \item{n_first_tree}{limit the plot to the n first trees. If \code{NULL}, all trees of the model are plotted. Performance can be low for huge models.} -\item{CSSstyle}{a \code{character} vector storing a css style to customize the appearance of nodes. Look at the \href{https://github.com/knsv/mermaid/wiki}{Mermaid wiki} for more information.} - \item{width}{the width of the diagram in pixels.} \item{height}{the height of the diagram in pixels.} @@ -26,7 +24,7 @@ xgb.plot.tree(feature_names = NULL, filename_dump = NULL, model = NULL, A \code{DiagrammeR} of the model. } \description{ -Read a tree model text dump. +Read a tree model text dump. Plotting only works for boosted tree model (not linear model). } \details{ @@ -36,23 +34,24 @@ The content of each node is organised that way: \item \code{feature} value ; \item \code{cover}: the sum of second order gradient of training data classified to the leaf, if it is square loss, this simply corresponds to the number of instances in that branch. Deeper in the tree a node is, lower this metric will be ; \item \code{gain}: metric the importance of the node in the model. -} +} Each branch finishes with a leaf. For each leaf, only the \code{cover} is indicated. -It uses \href{https://github.com/knsv/mermaid/}{Mermaid} library for that purpose. +It uses \href{http://www.graphviz.org/}{GraphViz} library for that purpose. } \examples{ data(agaricus.train, package='xgboost') -#Both dataset are list with two items, a sparse matrix and labels -#(labels = outcome column which will be learned). +#Both dataset are list with two items, a sparse matrix and labels +#(labels = outcome column which will be learned). #Each column of the sparse Matrix is a feature in one hot encoding format. train <- agaricus.train -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, +bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") #agaricus.test$data@Dimnames[[2]] represents the column names of the sparse matrix. xgb.plot.tree(agaricus.train$data@Dimnames[[2]], model = bst) + } diff --git a/R-package/man/xgb.save.Rd b/R-package/man/xgb.save.Rd index eca097fac..db335105c 100644 --- a/R-package/man/xgb.save.Rd +++ b/R-package/man/xgb.save.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/xgb.save.R \name{xgb.save} \alias{xgb.save} @@ -19,7 +19,7 @@ data(agaricus.train, package='xgboost') data(agaricus.test, package='xgboost') train <- agaricus.train test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, +bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") xgb.save(bst, 'xgb.model') bst <- xgb.load('xgb.model') diff --git a/R-package/man/xgb.save.raw.Rd b/R-package/man/xgb.save.raw.Rd index 79c356c0f..1e9f4a4db 100644 --- a/R-package/man/xgb.save.raw.Rd +++ b/R-package/man/xgb.save.raw.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/xgb.save.raw.R \name{xgb.save.raw} \alias{xgb.save.raw} @@ -18,7 +18,7 @@ data(agaricus.train, package='xgboost') data(agaricus.test, package='xgboost') train <- agaricus.train test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, +bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") raw <- xgb.save.raw(bst) bst <- xgb.load(raw) diff --git a/R-package/man/xgb.train.Rd b/R-package/man/xgb.train.Rd index 15a0b0ba7..50bfb46d0 100644 --- a/R-package/man/xgb.train.Rd +++ b/R-package/man/xgb.train.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/xgb.train.R \name{xgb.train} \alias{xgb.train} @@ -10,7 +10,7 @@ xgb.train(params = list(), data, nrounds, watchlist = list(), obj = NULL, save_name = "xgboost.model", ...) } \arguments{ -\item{params}{the list of parameters. +\item{params}{the list of parameters. 1. General Parameters @@ -18,30 +18,30 @@ xgb.train(params = list(), data, nrounds, watchlist = list(), obj = NULL, \item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree} \item \code{silent} 0 means printing running messages, 1 means silent mode. Default: 0 } - + 2. Booster Parameters 2.1. Parameter for Tree Booster \itemize{ \item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3 - \item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be. + \item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be. \item \code{max_depth} maximum depth of a tree. Default: 6 \item \code{min_child_weight} minimum sum of instance weight(hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1 - \item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nround}. Default: 1 + \item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nround}. Default: 1 \item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1 \item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1 } 2.2. Parameter for Linear Booster - + \itemize{ \item \code{lambda} L2 regularization term on weights. Default: 0 \item \code{lambda_bias} L2 regularization term on bias. Default: 0 \item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0 } -3. Task Parameters +3. Task Parameters \itemize{ \item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below: @@ -51,7 +51,7 @@ xgb.train(params = list(), data, nrounds, watchlist = list(), obj = NULL, \item \code{binary:logistic} logistic regression for binary classification. Output probability. \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation. \item \code{num_class} set the number of classes. To use only with multiclass objectives. - \item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{tonum_class}. + \item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class}. \item \code{multi:softprob} same as softmax, but output a vector of ndata * nclass, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class. \item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss. } @@ -64,25 +64,25 @@ xgb.train(params = list(), data, nrounds, watchlist = list(), obj = NULL, \item{nrounds}{the max number of iterations} \item{watchlist}{what information should be printed when \code{verbose=1} or - \code{verbose=2}. Watchlist is used to specify validation set monitoring - during training. For example user can specify - watchlist=list(validation1=mat1, validation2=mat2) to watch - the performance of each round's model on mat1 and mat2} +\code{verbose=2}. Watchlist is used to specify validation set monitoring +during training. For example user can specify + watchlist=list(validation1=mat1, validation2=mat2) to watch + the performance of each round's model on mat1 and mat2} -\item{obj}{customized objective function. Returns gradient and second order +\item{obj}{customized objective function. Returns gradient and second order gradient with given prediction and dtrain,} -\item{feval}{custimized evaluation function. Returns -\code{list(metric='metric-name', value='metric-value')} with given +\item{feval}{custimized evaluation function. Returns +\code{list(metric='metric-name', value='metric-value')} with given prediction and dtrain,} -\item{verbose}{If 0, xgboost will stay silent. If 1, xgboost will print +\item{verbose}{If 0, xgboost will stay silent. If 1, xgboost will print information of performance. If 2, xgboost will print information of both} \item{print.every.n}{Print every N progress messages when \code{verbose>0}. Default is 1 which means all messages are printed.} -\item{early.stop.round}{If \code{NULL}, the early stopping function is not triggered. -If set to an integer \code{k}, training with a validation set will stop if the performance +\item{early.stop.round}{If \code{NULL}, the early stopping function is not triggered. +If set to an integer \code{k}, training with a validation set will stop if the performance keeps getting worse consecutively for \code{k} rounds.} \item{maximize}{If \code{feval} and \code{early.stop.round} are set, then \code{maximize} must be set as well. @@ -98,24 +98,25 @@ keeps getting worse consecutively for \code{k} rounds.} An advanced interface for training xgboost model. Look at \code{\link{xgboost}} function for a simpler interface. } \details{ -This is the training function for \code{xgboost}. +This is the training function for \code{xgboost}. It supports advanced features such as \code{watchlist}, customized objective function (\code{feval}), therefore it is more flexible than \code{\link{xgboost}} function. -Parallelization is automatically enabled if \code{OpenMP} is present. +Parallelization is automatically enabled if \code{OpenMP} is present. Number of threads can also be manually specified via \code{nthread} parameter. \code{eval_metric} parameter (not listed above) is set automatically by Xgboost but can be overriden by parameter. Below is provided the list of different metric optimized by Xgboost to help you to understand how it works inside or to use them with the \code{watchlist} parameter. \itemize{ \item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error} \item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood} + \item \code{mlogloss} multiclass logloss. \url{https://www.kaggle.com/wiki/MultiClassLogLoss} \item \code{error} Binary classification error rate. It is calculated as \code{(wrong cases) / (all cases)}. For the predictions, the evaluation will regard the instances with prediction value larger than 0.5 as positive instances, and the others as negative instances. \item \code{merror} Multiclass classification error rate. It is calculated as \code{(wrong cases) / (all cases)}. \item \code{auc} Area under the curve. \url{http://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation. \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{http://en.wikipedia.org/wiki/NDCG} } - + Full list of parameters is available in the Wiki \url{https://github.com/dmlc/xgboost/wiki/Parameters}. This function only accepts an \code{\link{xgb.DMatrix}} object as the input. diff --git a/R-package/man/xgboost.Rd b/R-package/man/xgboost.Rd index 79c33007e..e31e5da43 100644 --- a/R-package/man/xgboost.Rd +++ b/R-package/man/xgboost.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/xgboost.R \name{xgboost} \alias{xgboost} @@ -10,13 +10,13 @@ xgboost(data = NULL, label = NULL, missing = NA, weight = NULL, save_name = "xgboost.model", ...) } \arguments{ -\item{data}{takes \code{matrix}, \code{dgCMatrix}, local data file or +\item{data}{takes \code{matrix}, \code{dgCMatrix}, local data file or \code{xgb.DMatrix}.} \item{label}{the response variable. User should not set this field, if data is local data file or \code{xgb.DMatrix}.} -\item{missing}{Missing is only used when input is dense matrix, pick a float +\item{missing}{Missing is only used when input is dense matrix, pick a float value that represents missing value. Sometimes a data use 0 or other extreme value to represents missing values.} \item{weight}{a vector indicating the weight for each row of the input.} @@ -34,21 +34,21 @@ Commonly used ones are: \item \code{max.depth} maximum depth of the tree \item \code{nthread} number of thread used in training, if not set, all threads are used } - + Look at \code{\link{xgb.train}} for a more complete list of parameters or \url{https://github.com/dmlc/xgboost/wiki/Parameters} for the full list. - + See also \code{demo/} for walkthrough example in R.} \item{nrounds}{the max number of iterations} -\item{verbose}{If 0, xgboost will stay silent. If 1, xgboost will print +\item{verbose}{If 0, xgboost will stay silent. If 1, xgboost will print information of performance. If 2, xgboost will print information of both performance and construction progress information} \item{print.every.n}{Print every N progress messages when \code{verbose>0}. Default is 1 which means all messages are printed.} -\item{early.stop.round}{If \code{NULL}, the early stopping function is not triggered. -If set to an integer \code{k}, training with a validation set will stop if the performance +\item{early.stop.round}{If \code{NULL}, the early stopping function is not triggered. +If set to an integer \code{k}, training with a validation set will stop if the performance keeps getting worse consecutively for \code{k} rounds.} \item{maximize}{If \code{feval} and \code{early.stop.round} are set, then \code{maximize} must be set as well. @@ -75,8 +75,9 @@ data(agaricus.train, package='xgboost') data(agaricus.test, package='xgboost') train <- agaricus.train test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max.depth = 2, +bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nthread = 2, nround = 2, objective = "binary:logistic") pred <- predict(bst, test$data) + }