remove dependency on bst
This commit is contained in:
parent
46f14b8c27
commit
3c1ed847fb
@ -6,13 +6,13 @@ objective = binary:logistic
|
|||||||
|
|
||||||
# Tree Booster Parameters
|
# Tree Booster Parameters
|
||||||
# step size shrinkage
|
# step size shrinkage
|
||||||
bst:eta = 1.0
|
eta = 1.0
|
||||||
# minimum loss reduction required to make a further partition
|
# minimum loss reduction required to make a further partition
|
||||||
bst:gamma = 1.0
|
gamma = 1.0
|
||||||
# minimum sum of instance weight(hessian) needed in a child
|
# minimum sum of instance weight(hessian) needed in a child
|
||||||
bst:min_child_weight = 1
|
min_child_weight = 1
|
||||||
# maximum depth of a tree
|
# maximum depth of a tree
|
||||||
bst:max_depth = 3
|
max_depth = 3
|
||||||
|
|
||||||
# Task Parameters
|
# Task Parameters
|
||||||
# the number of round to do boosting
|
# the number of round to do boosting
|
||||||
|
|||||||
@ -42,8 +42,8 @@ param = {}
|
|||||||
param['objective'] = 'binary:logitraw'
|
param['objective'] = 'binary:logitraw'
|
||||||
# scale weight of positive examples
|
# scale weight of positive examples
|
||||||
param['scale_pos_weight'] = sum_wneg/sum_wpos
|
param['scale_pos_weight'] = sum_wneg/sum_wpos
|
||||||
param['bst:eta'] = 0.1
|
param['eta'] = 0.1
|
||||||
param['bst:max_depth'] = 6
|
param['max_depth'] = 6
|
||||||
param['eval_metric'] = 'auc'
|
param['eval_metric'] = 'auc'
|
||||||
param['silent'] = 1
|
param['silent'] = 1
|
||||||
param['nthread'] = 16
|
param['nthread'] = 16
|
||||||
|
|||||||
@ -25,8 +25,8 @@ param = {}
|
|||||||
# use softmax multi-class classification
|
# use softmax multi-class classification
|
||||||
param['objective'] = 'multi:softmax'
|
param['objective'] = 'multi:softmax'
|
||||||
# scale weight of positive examples
|
# scale weight of positive examples
|
||||||
param['bst:eta'] = 0.1
|
param['eta'] = 0.1
|
||||||
param['bst:max_depth'] = 6
|
param['max_depth'] = 6
|
||||||
param['silent'] = 1
|
param['silent'] = 1
|
||||||
param['nthread'] = 4
|
param['nthread'] = 4
|
||||||
param['num_class'] = 6
|
param['num_class'] = 6
|
||||||
|
|||||||
@ -5,13 +5,13 @@ objective="rank:pairwise"
|
|||||||
|
|
||||||
# Tree Booster Parameters
|
# Tree Booster Parameters
|
||||||
# step size shrinkage
|
# step size shrinkage
|
||||||
bst:eta = 0.1
|
eta = 0.1
|
||||||
# minimum loss reduction required to make a further partition
|
# minimum loss reduction required to make a further partition
|
||||||
bst:gamma = 1.0
|
gamma = 1.0
|
||||||
# minimum sum of instance weight(hessian) needed in a child
|
# minimum sum of instance weight(hessian) needed in a child
|
||||||
bst:min_child_weight = 0.1
|
min_child_weight = 0.1
|
||||||
# maximum depth of a tree
|
# maximum depth of a tree
|
||||||
bst:max_depth = 6
|
max_depth = 6
|
||||||
|
|
||||||
# Task parameters
|
# Task parameters
|
||||||
# the number of round to do boosting
|
# the number of round to do boosting
|
||||||
|
|||||||
@ -7,13 +7,13 @@ objective = reg:linear
|
|||||||
|
|
||||||
# Tree Booster Parameters
|
# Tree Booster Parameters
|
||||||
# step size shrinkage
|
# step size shrinkage
|
||||||
bst:eta = 1.0
|
eta = 1.0
|
||||||
# minimum loss reduction required to make a further partition
|
# minimum loss reduction required to make a further partition
|
||||||
bst:gamma = 1.0
|
gamma = 1.0
|
||||||
# minimum sum of instance weight(hessian) needed in a child
|
# minimum sum of instance weight(hessian) needed in a child
|
||||||
bst:min_child_weight = 1
|
min_child_weight = 1
|
||||||
# maximum depth of a tree
|
# maximum depth of a tree
|
||||||
bst:max_depth = 3
|
max_depth = 3
|
||||||
|
|
||||||
# Task parameters
|
# Task parameters
|
||||||
# the number of round to do boosting
|
# the number of round to do boosting
|
||||||
|
|||||||
@ -79,6 +79,11 @@ class BoostLearner {
|
|||||||
* \param val value of the parameter
|
* \param val value of the parameter
|
||||||
*/
|
*/
|
||||||
inline void SetParam(const char *name, const char *val) {
|
inline void SetParam(const char *name, const char *val) {
|
||||||
|
// in this version, bst: prefix is no longer required
|
||||||
|
if (strncmp(name, "bst:", 4) != 0) {
|
||||||
|
std::string n = "bst:"; n += name;
|
||||||
|
this->SetParam(n.c_str(), val);
|
||||||
|
}
|
||||||
if (!strcmp(name, "silent")) silent = atoi(val);
|
if (!strcmp(name, "silent")) silent = atoi(val);
|
||||||
if (!strcmp(name, "prob_buffer_row")) prob_buffer_row = static_cast<float>(atof(val));
|
if (!strcmp(name, "prob_buffer_row")) prob_buffer_row = static_cast<float>(atof(val));
|
||||||
if (!strcmp(name, "eval_metric")) evaluator_.AddEval(val);
|
if (!strcmp(name, "eval_metric")) evaluator_.AddEval(val);
|
||||||
@ -91,7 +96,7 @@ class BoostLearner {
|
|||||||
if (!strcmp(name, "objective")) name_obj_ = val;
|
if (!strcmp(name, "objective")) name_obj_ = val;
|
||||||
if (!strcmp(name, "booster")) name_gbm_ = val;
|
if (!strcmp(name, "booster")) name_gbm_ = val;
|
||||||
mparam.SetParam(name, val);
|
mparam.SetParam(name, val);
|
||||||
}
|
}
|
||||||
if (gbm_ != NULL) gbm_->SetParam(name, val);
|
if (gbm_ != NULL) gbm_->SetParam(name, val);
|
||||||
if (obj_ != NULL) obj_->SetParam(name, val);
|
if (obj_ != NULL) obj_->SetParam(name, val);
|
||||||
if (gbm_ == NULL || obj_ == NULL) {
|
if (gbm_ == NULL || obj_ == NULL) {
|
||||||
|
|||||||
@ -13,7 +13,7 @@ dtrain = xgb.DMatrix('agaricus.txt.train')
|
|||||||
dtest = xgb.DMatrix('agaricus.txt.test')
|
dtest = xgb.DMatrix('agaricus.txt.test')
|
||||||
|
|
||||||
# specify parameters via map, definition are same as c++ version
|
# specify parameters via map, definition are same as c++ version
|
||||||
param = {'bst:max_depth':2, 'bst:eta':1, 'silent':1, 'objective':'binary:logistic' }
|
param = {'max_depth':2, 'eta':1, 'silent':1, 'objective':'binary:logistic' }
|
||||||
|
|
||||||
# specify validations set to watch performance
|
# specify validations set to watch performance
|
||||||
evallist = [(dtest,'eval'), (dtrain,'train')]
|
evallist = [(dtest,'eval'), (dtrain,'train')]
|
||||||
@ -75,7 +75,7 @@ print ('start running example to used cutomized objective function')
|
|||||||
# note: for customized objective function, we leave objective as default
|
# note: for customized objective function, we leave objective as default
|
||||||
# note: what we are getting is margin value in prediction
|
# note: what we are getting is margin value in prediction
|
||||||
# you must know what you are doing
|
# you must know what you are doing
|
||||||
param = {'bst:max_depth':2, 'bst:eta':1, 'silent':1 }
|
param = {'max_depth':2, 'eta':1, 'silent':1 }
|
||||||
|
|
||||||
# user define objective function, given prediction, return gradient and second order gradient
|
# user define objective function, given prediction, return gradient and second order gradient
|
||||||
# this is loglikelihood loss
|
# this is loglikelihood loss
|
||||||
@ -107,7 +107,7 @@ bst = xgb.train(param, dtrain, num_round, evallist, logregobj, evalerror)
|
|||||||
#
|
#
|
||||||
print ('start running example to start from a initial prediction')
|
print ('start running example to start from a initial prediction')
|
||||||
# specify parameters via map, definition are same as c++ version
|
# specify parameters via map, definition are same as c++ version
|
||||||
param = {'bst:max_depth':2, 'bst:eta':1, 'silent':1, 'objective':'binary:logistic' }
|
param = {'max_depth':2, 'eta':1, 'silent':1, 'objective':'binary:logistic' }
|
||||||
# train xgboost for 1 round
|
# train xgboost for 1 round
|
||||||
bst = xgb.train( param, dtrain, 1, evallist )
|
bst = xgb.train( param, dtrain, 1, evallist )
|
||||||
# Note: we need the margin value instead of transformed prediction in set_base_margin
|
# Note: we need the margin value instead of transformed prediction in set_base_margin
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user