From 9a48a40cf1e62f55d99f8ce3118b4de80fc7d942 Mon Sep 17 00:00:00 2001 From: Vadim Khotilovich Date: Sun, 5 Jun 2016 00:17:35 -0500 Subject: [PATCH] Fixes for multiple and default metric (#1239) * fix multiple evaluation metrics * create DefaultEvalMetric only when really necessary * py test for #1239 * make travis happy --- src/c_api/c_api.cc | 5 ++++- src/learner.cc | 9 +++------ tests/python/test_basic_models.py | 10 ++++++++++ 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/src/c_api/c_api.cc b/src/c_api/c_api.cc index 37fb92c24..d659b85ce 100644 --- a/src/c_api/c_api.cc +++ b/src/c_api/c_api.cc @@ -33,7 +33,10 @@ class Booster { inline void SetParam(const std::string& name, const std::string& val) { auto it = std::find_if(cfg_.begin(), cfg_.end(), - [&name](decltype(*cfg_.begin()) &x) { + [&name, &val](decltype(*cfg_.begin()) &x) { + if (name == "eval_metric") { + return x.first == name && x.second == val; + } return x.first == name; }); if (it == cfg_.end()) { diff --git a/src/learner.cc b/src/learner.cc index 44e56c732..a7391b018 100644 --- a/src/learner.cc +++ b/src/learner.cc @@ -256,9 +256,6 @@ class LearnerImpl : public Learner { attributes_ = std::map( attr.begin(), attr.end()); } - if (metrics_.size() == 0) { - metrics_.emplace_back(Metric::Create(obj_->DefaultEvalMetric())); - } this->base_score_ = mparam.base_score; gbm_->ResetPredBuffer(pred_buffer_size_); cfg_["num_class"] = common::ToString(mparam.num_class); @@ -307,6 +304,9 @@ class LearnerImpl : public Learner { std::ostringstream os; os << '[' << iter << ']' << std::setiosflags(std::ios::fixed); + if (metrics_.size() == 0) { + metrics_.emplace_back(Metric::Create(obj_->DefaultEvalMetric())); + } for (size_t i = 0; i < data_sets.size(); ++i) { this->PredictRaw(data_sets[i], &preds_); obj_->EvalTransform(&preds_); @@ -445,9 +445,6 @@ class LearnerImpl : public Learner { // reset the base score mparam.base_score = obj_->ProbToMargin(mparam.base_score); - if (metrics_.size() == 0) { - metrics_.emplace_back(Metric::Create(obj_->DefaultEvalMetric())); - } this->base_score_ = mparam.base_score; gbm_->ResetPredBuffer(pred_buffer_size_); diff --git a/tests/python/test_basic_models.py b/tests/python/test_basic_models.py index c81935e9d..9e9c08423 100644 --- a/tests/python/test_basic_models.py +++ b/tests/python/test_basic_models.py @@ -105,6 +105,16 @@ class TestModels(unittest.TestCase): if int(preds2[i] > 0.5) != labels[i]) / float(len(preds2)) assert err == err2 + def test_multi_eval_metric(self): + watchlist = [(dtest, 'eval'), (dtrain, 'train')] + param = {'max_depth': 2, 'eta': 0.2, 'silent': 1, 'objective': 'binary:logistic'} + param['eval_metric'] = ["auc", "logloss", 'error'] + evals_result = {} + bst = xgb.train(param, dtrain, 4, watchlist, evals_result=evals_result) + assert isinstance(bst, xgb.core.Booster) + assert len(evals_result['eval']) == 3 + assert set(evals_result['eval'].keys()) == {'auc', 'error', 'logloss'} + def test_fpreproc(self): param = {'max_depth': 2, 'eta': 1, 'silent': 1, 'objective': 'binary:logistic'}