From 4a8612defcd3932ad25f3e6be03ba664799da4fb Mon Sep 17 00:00:00 2001 From: tqchen Date: Sat, 6 Sep 2014 10:19:19 -0700 Subject: [PATCH] add customize objective --- R-package/R/utils.R | 6 ++--- R-package/demo/custom_objective.R | 39 +++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 3 deletions(-) create mode 100644 R-package/demo/custom_objective.R diff --git a/R-package/R/utils.R b/R-package/R/utils.R index 2dddcc980..4ed6b14fe 100644 --- a/R-package/R/utils.R +++ b/R-package/R/utils.R @@ -133,9 +133,9 @@ xgb.iter.update <- function(booster, dtrain, iter, obj = NULL) { .Call("XGBoosterUpdateOneIter_R", booster, as.integer(iter), dtrain, PACKAGE = "xgboost") } else { - pred <- xgb.predict(bst, dtrain) + pred <- xgb.predict(booster, dtrain) gpair <- obj(pred, dtrain) - succ <- xgb.iter.boost(bst, dtrain, gpair) + succ <- xgb.iter.boost(booster, dtrain, gpair) } return(TRUE) } @@ -172,7 +172,7 @@ xgb.iter.eval <- function(booster, watchlist, iter, feval = NULL) { if (length(names(w)) == 0) { stop("xgb.eval: name tag must be presented for every elements in watchlist") } - ret <- feval(xgb.predict(bst, w[[1]]), w[[1]]) + ret <- feval(xgb.predict(booster, w[[1]]), w[[1]]) msg <- paste(msg, "\t", names(w), "-", ret$metric, ":", ret$value, sep="") } } diff --git a/R-package/demo/custom_objective.R b/R-package/demo/custom_objective.R new file mode 100644 index 000000000..017961876 --- /dev/null +++ b/R-package/demo/custom_objective.R @@ -0,0 +1,39 @@ +require(xgboost) +# load in the agaricus dataset +data(agaricus.train, package='xgboost') +data(agaricus.test, package='xgboost') +dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) +dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) + +# note: for customized objective function, we leave objective as default +# note: what we are getting is margin value in prediction +# you must know what you are doing +param <- list(max_depth=2,eta=1,silent=1) +watchlist <- list(eval = dtest, train = dtrain) +num_round <- 2 + +# user define objective function, given prediction, return gradient and second order gradient +# this is loglikelihood loss +logregobj <- function(preds, dtrain) { + labels <- getinfo(dtrain, "label") + preds <- 1/(1 + exp(-preds)) + grad <- preds - labels + hess <- preds * (1 - preds) + return(list(grad = grad, hess = hess)) +} + +# user defined evaluation function, return a pair metric_name, result +# NOTE: when you do customized loss function, the default prediction value is margin +# this may make buildin evalution metric not function properly +# for example, we are doing logistic loss, the prediction is score before logistic transformation +# the buildin evaluation error assumes input is after logistic transformation +# Take this in mind when you use the customization, and maybe you need write customized evaluation function +evalerror <- function(preds, dtrain) { + labels <- getinfo(dtrain, "label") + err <- as.numeric(sum(labels != (preds > 0)))/length(labels) + return(list(metric = "error", value = err)) +} +print ('start training with user customized objective') +# training with customized objective, we can also do step by step training +# simply look at xgboost.py's implementation of train +bst <- xgb.train(param, dtrain, num_round, watchlist, logregobj, evalerror)