Merge branch 'master' of ssh://github.com/tqchen/xgboost
This commit is contained in:
commit
ab6a3b1ee8
@ -66,7 +66,11 @@
|
||||
#' prediction and dtrain,
|
||||
#' @param verbose If 0, xgboost will stay silent. If 1, xgboost will print
|
||||
#' information of performance. If 2, xgboost will print information of both
|
||||
#'
|
||||
#' @param earlyStopRound If \code{NULL}, the early stopping function is not triggered.
|
||||
#' If set to an integer \code{k}, training with a validation set will stop if the performance
|
||||
#' keeps getting worse consecutively for \code{k} rounds.
|
||||
#' @param maximize If \code{feval} and \code{earlyStopRound} are set, then \code{maximize} must be set as well.
|
||||
#' \code{maximize=TRUE} means the larger the evaluation score the better.
|
||||
#' @param ... other parameters to pass to \code{params}.
|
||||
#'
|
||||
#' @details
|
||||
@ -114,7 +118,8 @@
|
||||
#' @export
|
||||
#'
|
||||
xgb.train <- function(params=list(), data, nrounds, watchlist = list(),
|
||||
obj = NULL, feval = NULL, verbose = 1, ...) {
|
||||
obj = NULL, feval = NULL, verbose = 1,
|
||||
earlyStopRound = NULL, maximize = NULL, ...) {
|
||||
dtrain <- data
|
||||
if (typeof(params) != "list") {
|
||||
stop("xgb.train: first argument params must be list")
|
||||
@ -133,6 +138,36 @@ xgb.train <- function(params=list(), data, nrounds, watchlist = list(),
|
||||
}
|
||||
params = append(params, list(...))
|
||||
|
||||
# Early stopping
|
||||
if (!is.null(earlyStopRound)){
|
||||
if (!is.null(feval) && is.null(maximize))
|
||||
stop('Please set maximize to note whether the model is maximizing the evaluation or not.')
|
||||
if (length(watchlist) == 0)
|
||||
stop('For early stopping you need at least one set in watchlist.')
|
||||
if (is.null(maximize) && is.null(params$eval_metric))
|
||||
stop('Please set maximize to note whether the model is maximizing the evaluation or not.')
|
||||
if (is.null(maximize))
|
||||
{
|
||||
if (params$eval_metric %in% c('rmse','logloss','error','merror','mlogloss')) {
|
||||
maximize = FALSE
|
||||
} else {
|
||||
maximize = TRUE
|
||||
}
|
||||
}
|
||||
|
||||
if (maximize) {
|
||||
bestScore = 0
|
||||
} else {
|
||||
bestScore = Inf
|
||||
}
|
||||
bestInd = 0
|
||||
earlyStopflag = FALSE
|
||||
|
||||
if (length(watchlist)>1)
|
||||
warning('Only the first data set in watchlist is used for early stopping process.')
|
||||
}
|
||||
|
||||
|
||||
handle <- xgb.Booster(params, append(watchlist, dtrain))
|
||||
bst <- xgb.handleToBooster(handle)
|
||||
for (i in 1:nrounds) {
|
||||
@ -140,8 +175,27 @@ xgb.train <- function(params=list(), data, nrounds, watchlist = list(),
|
||||
if (length(watchlist) != 0) {
|
||||
msg <- xgb.iter.eval(bst$handle, watchlist, i - 1, feval)
|
||||
cat(paste(msg, "\n", sep=""))
|
||||
if (!is.null(earlyStopRound))
|
||||
{
|
||||
score = strsplit(msg,':|\\s+')[[1]][3]
|
||||
score = as.numeric(score)
|
||||
if ((maximize && score>bestScore) || (!maximize && score<bestScore)) {
|
||||
bestScore = score
|
||||
bestInd = i
|
||||
} else {
|
||||
if (i-bestInd>earlyStopRound) {
|
||||
earlyStopflag = TRUE
|
||||
cat('Stopping. Best iteration:',bestInd)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
bst <- xgb.Booster.check(bst)
|
||||
if (!is.null(earlyStopRound)) {
|
||||
bst$bestScore = bestScore
|
||||
bst$bestInd = bestInd
|
||||
}
|
||||
return(bst)
|
||||
}
|
||||
|
||||
@ -30,6 +30,11 @@
|
||||
#' performance and construction progress information
|
||||
#' @param missing Missing is only used when input is dense matrix, pick a float
|
||||
#' value that represents missing value. Sometimes a data use 0 or other extreme value to represents missing values.
|
||||
#' @param earlyStopRound If \code{NULL}, the early stopping function is not triggered.
|
||||
#' If set to an integer \code{k}, training with a validation set will stop if the performance
|
||||
#' keeps getting worse consecutively for \code{k} rounds.
|
||||
#' @param maximize If \code{feval} and \code{earlyStopRound} are set, then \code{maximize} must be set as well.
|
||||
#' \code{maximize=TRUE} means the larger the evaluation score the better.
|
||||
#' @param ... other parameters to pass to \code{params}.
|
||||
#'
|
||||
#' @details
|
||||
@ -51,7 +56,7 @@
|
||||
#' @export
|
||||
#'
|
||||
xgboost <- function(data = NULL, label = NULL, missing = NULL, params = list(), nrounds,
|
||||
verbose = 1, ...) {
|
||||
verbose = 1, earlyStopRound = NULL, maximize = NULL, ...) {
|
||||
if (is.null(missing)) {
|
||||
dtrain <- xgb.get.DMatrix(data, label)
|
||||
} else {
|
||||
@ -66,7 +71,8 @@ xgboost <- function(data = NULL, label = NULL, missing = NULL, params = list(),
|
||||
watchlist <- list()
|
||||
}
|
||||
|
||||
bst <- xgb.train(params, dtrain, nrounds, watchlist, verbose=verbose)
|
||||
bst <- xgb.train(params, dtrain, nrounds, watchlist, verbose = verbose,
|
||||
earlyStopRound = earlyStopRound)
|
||||
|
||||
return(bst)
|
||||
}
|
||||
|
||||
@ -6,3 +6,4 @@ generalized_linear_model Generalized Linear Model
|
||||
cross_validation Cross validation
|
||||
create_sparse_matrix Create Sparse Matrix
|
||||
predict_leaf_indices Predicting the corresponding leaves
|
||||
early_Stopping Early Stop in training
|
||||
|
||||
37
R-package/demo/early_Stopping.R
Normal file
37
R-package/demo/early_Stopping.R
Normal file
@ -0,0 +1,37 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
# note: for customized objective function, we leave objective as default
|
||||
# note: what we are getting is margin value in prediction
|
||||
# you must know what you are doing
|
||||
param <- list(max.depth=2,eta=1,nthread = 2, silent=1)
|
||||
watchlist <- list(eval = dtest)
|
||||
num_round <- 20
|
||||
# user define objective function, given prediction, return gradient and second order gradient
|
||||
# this is loglikelihood loss
|
||||
logregobj <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
preds <- 1/(1 + exp(-preds))
|
||||
grad <- preds - labels
|
||||
hess <- preds * (1 - preds)
|
||||
return(list(grad = grad, hess = hess))
|
||||
}
|
||||
# user defined evaluation function, return a pair metric_name, result
|
||||
# NOTE: when you do customized loss function, the default prediction value is margin
|
||||
# this may make buildin evalution metric not function properly
|
||||
# for example, we are doing logistic loss, the prediction is score before logistic transformation
|
||||
# the buildin evaluation error assumes input is after logistic transformation
|
||||
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
|
||||
evalerror <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
|
||||
return(list(metric = "error", value = err))
|
||||
}
|
||||
print ('start training with early Stopping setting')
|
||||
# training with customized objective, we can also do step by step training
|
||||
# simply look at xgboost.py's implementation of train
|
||||
bst <- xgb.train(param, dtrain, num_round, watchlist, logregobj, evalerror, maximize = FALSE,
|
||||
earlyStopRound = 3)
|
||||
@ -5,7 +5,8 @@
|
||||
\title{eXtreme Gradient Boosting Training}
|
||||
\usage{
|
||||
xgb.train(params = list(), data, nrounds, watchlist = list(), obj = NULL,
|
||||
feval = NULL, verbose = 1, ...)
|
||||
feval = NULL, verbose = 1, earlyStopRound = NULL, maximize = NULL,
|
||||
...)
|
||||
}
|
||||
\arguments{
|
||||
\item{params}{the list of parameters.
|
||||
@ -49,7 +50,7 @@ xgb.train(params = list(), data, nrounds, watchlist = list(), obj = NULL,
|
||||
\item \code{binary:logistic} logistic regression for binary classification. Output probability.
|
||||
\item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
|
||||
\item \code{num_class} set the number of classes. To use only with multiclass objectives.
|
||||
\item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is a number and should be from 0 \code{tonum_class}
|
||||
\item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{tonum_class}.
|
||||
\item \code{multi:softprob} same as softmax, but output a vector of ndata * nclass, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class.
|
||||
\item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
|
||||
}
|
||||
@ -77,6 +78,13 @@ prediction and dtrain,}
|
||||
\item{verbose}{If 0, xgboost will stay silent. If 1, xgboost will print
|
||||
information of performance. If 2, xgboost will print information of both}
|
||||
|
||||
\item{earlyStopRound}{If \code{NULL}, the early stopping function is not triggered.
|
||||
If set to an integer \code{k}, training with a validation set will stop if the performance
|
||||
keeps getting worse consecutively for \code{k} rounds.}
|
||||
|
||||
\item{maximize}{If \code{feval} and \code{earlyStopRound} are set, then \code{maximize} must be set as well.
|
||||
\code{maximize=TRUE} means the larger the evaluation score the better.}
|
||||
|
||||
\item{...}{other parameters to pass to \code{params}.}
|
||||
}
|
||||
\description{
|
||||
@ -98,7 +106,7 @@ Number of threads can also be manually specified via \code{nthread} parameter.
|
||||
\item \code{error} Binary classification error rate. It is calculated as \code{(wrong cases) / (all cases)}. For the predictions, the evaluation will regard the instances with prediction value larger than 0.5 as positive instances, and the others as negative instances.
|
||||
\item \code{merror} Multiclass classification error rate. It is calculated as \code{(wrong cases) / (all cases)}.
|
||||
\item \code{auc} Area under the curve. \url{http://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
|
||||
\item \code{ndcg} Normalized Discounted Cumulative Gain. \url{http://en.wikipedia.org/wiki/NDCG}
|
||||
\item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{http://en.wikipedia.org/wiki/NDCG}
|
||||
}
|
||||
|
||||
Full list of parameters is available in the Wiki \url{https://github.com/dmlc/xgboost/wiki/Parameters}.
|
||||
|
||||
@ -5,7 +5,7 @@
|
||||
\title{eXtreme Gradient Boosting (Tree) library}
|
||||
\usage{
|
||||
xgboost(data = NULL, label = NULL, missing = NULL, params = list(),
|
||||
nrounds, verbose = 1, ...)
|
||||
nrounds, verbose = 1, earlyStopRound = NULL, maximize = NULL, ...)
|
||||
}
|
||||
\arguments{
|
||||
\item{data}{takes \code{matrix}, \code{dgCMatrix}, local data file or
|
||||
@ -41,6 +41,13 @@ Commonly used ones are:
|
||||
information of performance. If 2, xgboost will print information of both
|
||||
performance and construction progress information}
|
||||
|
||||
\item{earlyStopRound}{If \code{NULL}, the early stopping function is not triggered.
|
||||
If set to an integer \code{k}, training with a validation set will stop if the performance
|
||||
keeps getting worse consecutively for \code{k} rounds.}
|
||||
|
||||
\item{maximize}{If \code{feval} and \code{earlyStopRound} are set, then \code{maximize} must be set as well.
|
||||
\code{maximize=TRUE} means the larger the evaluation score the better.}
|
||||
|
||||
\item{...}{other parameters to pass to \code{params}.}
|
||||
}
|
||||
\description{
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user