diff --git a/demo/binary_classification/mushroom.conf b/demo/binary_classification/mushroom.conf index d364905f7..d2566f132 100644 --- a/demo/binary_classification/mushroom.conf +++ b/demo/binary_classification/mushroom.conf @@ -6,13 +6,13 @@ objective = binary:logistic # Tree Booster Parameters # step size shrinkage -bst:eta = 1.0 +eta = 1.0 # minimum loss reduction required to make a further partition -bst:gamma = 1.0 +gamma = 1.0 # minimum sum of instance weight(hessian) needed in a child -bst:min_child_weight = 1 +min_child_weight = 1 # maximum depth of a tree -bst:max_depth = 3 +max_depth = 3 # Task Parameters # the number of round to do boosting diff --git a/demo/kaggle-higgs/higgs-numpy.py b/demo/kaggle-higgs/higgs-numpy.py index bd60f074f..1e7448a4c 100755 --- a/demo/kaggle-higgs/higgs-numpy.py +++ b/demo/kaggle-higgs/higgs-numpy.py @@ -42,8 +42,8 @@ param = {} param['objective'] = 'binary:logitraw' # scale weight of positive examples param['scale_pos_weight'] = sum_wneg/sum_wpos -param['bst:eta'] = 0.1 -param['bst:max_depth'] = 6 +param['eta'] = 0.1 +param['max_depth'] = 6 param['eval_metric'] = 'auc' param['silent'] = 1 param['nthread'] = 16 diff --git a/demo/multiclass_classification/train.py b/demo/multiclass_classification/train.py index 702542a4c..f387de7c0 100755 --- a/demo/multiclass_classification/train.py +++ b/demo/multiclass_classification/train.py @@ -25,8 +25,8 @@ param = {} # use softmax multi-class classification param['objective'] = 'multi:softmax' # scale weight of positive examples -param['bst:eta'] = 0.1 -param['bst:max_depth'] = 6 +param['eta'] = 0.1 +param['max_depth'] = 6 param['silent'] = 1 param['nthread'] = 4 param['num_class'] = 6 diff --git a/demo/rank/mq2008.conf b/demo/rank/mq2008.conf index 90aadec4e..a19758bb7 100644 --- a/demo/rank/mq2008.conf +++ b/demo/rank/mq2008.conf @@ -5,13 +5,13 @@ objective="rank:pairwise" # Tree Booster Parameters # step size shrinkage -bst:eta = 0.1 +eta = 0.1 # minimum loss reduction required to make a further partition -bst:gamma = 1.0 +gamma = 1.0 # minimum sum of instance weight(hessian) needed in a child -bst:min_child_weight = 0.1 +min_child_weight = 0.1 # maximum depth of a tree -bst:max_depth = 6 +max_depth = 6 # Task parameters # the number of round to do boosting diff --git a/demo/regression/machine.conf b/demo/regression/machine.conf index f5a5163a8..8c677a502 100644 --- a/demo/regression/machine.conf +++ b/demo/regression/machine.conf @@ -7,13 +7,13 @@ objective = reg:linear # Tree Booster Parameters # step size shrinkage -bst:eta = 1.0 +eta = 1.0 # minimum loss reduction required to make a further partition -bst:gamma = 1.0 +gamma = 1.0 # minimum sum of instance weight(hessian) needed in a child -bst:min_child_weight = 1 +min_child_weight = 1 # maximum depth of a tree -bst:max_depth = 3 +max_depth = 3 # Task parameters # the number of round to do boosting diff --git a/src/learner/learner-inl.hpp b/src/learner/learner-inl.hpp index 7bf8c33ac..40ef274ee 100644 --- a/src/learner/learner-inl.hpp +++ b/src/learner/learner-inl.hpp @@ -79,6 +79,11 @@ class BoostLearner { * \param val value of the parameter */ inline void SetParam(const char *name, const char *val) { + // in this version, bst: prefix is no longer required + if (strncmp(name, "bst:", 4) != 0) { + std::string n = "bst:"; n += name; + this->SetParam(n.c_str(), val); + } if (!strcmp(name, "silent")) silent = atoi(val); if (!strcmp(name, "prob_buffer_row")) prob_buffer_row = static_cast(atof(val)); if (!strcmp(name, "eval_metric")) evaluator_.AddEval(val); @@ -91,7 +96,7 @@ class BoostLearner { if (!strcmp(name, "objective")) name_obj_ = val; if (!strcmp(name, "booster")) name_gbm_ = val; mparam.SetParam(name, val); - } + } if (gbm_ != NULL) gbm_->SetParam(name, val); if (obj_ != NULL) obj_->SetParam(name, val); if (gbm_ == NULL || obj_ == NULL) { diff --git a/wrapper/python-example/demo.py b/wrapper/python-example/demo.py index 52d565456..687b491a4 100755 --- a/wrapper/python-example/demo.py +++ b/wrapper/python-example/demo.py @@ -13,7 +13,7 @@ dtrain = xgb.DMatrix('agaricus.txt.train') dtest = xgb.DMatrix('agaricus.txt.test') # specify parameters via map, definition are same as c++ version -param = {'bst:max_depth':2, 'bst:eta':1, 'silent':1, 'objective':'binary:logistic' } +param = {'max_depth':2, 'eta':1, 'silent':1, 'objective':'binary:logistic' } # specify validations set to watch performance evallist = [(dtest,'eval'), (dtrain,'train')] @@ -75,7 +75,7 @@ print ('start running example to used cutomized objective function') # note: for customized objective function, we leave objective as default # note: what we are getting is margin value in prediction # you must know what you are doing -param = {'bst:max_depth':2, 'bst:eta':1, 'silent':1 } +param = {'max_depth':2, 'eta':1, 'silent':1 } # user define objective function, given prediction, return gradient and second order gradient # this is loglikelihood loss @@ -107,7 +107,7 @@ bst = xgb.train(param, dtrain, num_round, evallist, logregobj, evalerror) # print ('start running example to start from a initial prediction') # specify parameters via map, definition are same as c++ version -param = {'bst:max_depth':2, 'bst:eta':1, 'silent':1, 'objective':'binary:logistic' } +param = {'max_depth':2, 'eta':1, 'silent':1, 'objective':'binary:logistic' } # train xgboost for 1 round bst = xgb.train( param, dtrain, 1, evallist ) # Note: we need the margin value instead of transformed prediction in set_base_margin