Fixes for multiple and default metric (#1239)
* fix multiple evaluation metrics * create DefaultEvalMetric only when really necessary * py test for #1239 * make travis happy
This commit is contained in:
parent
9ef86072f4
commit
9a48a40cf1
@ -33,7 +33,10 @@ class Booster {
|
|||||||
|
|
||||||
inline void SetParam(const std::string& name, const std::string& val) {
|
inline void SetParam(const std::string& name, const std::string& val) {
|
||||||
auto it = std::find_if(cfg_.begin(), cfg_.end(),
|
auto it = std::find_if(cfg_.begin(), cfg_.end(),
|
||||||
[&name](decltype(*cfg_.begin()) &x) {
|
[&name, &val](decltype(*cfg_.begin()) &x) {
|
||||||
|
if (name == "eval_metric") {
|
||||||
|
return x.first == name && x.second == val;
|
||||||
|
}
|
||||||
return x.first == name;
|
return x.first == name;
|
||||||
});
|
});
|
||||||
if (it == cfg_.end()) {
|
if (it == cfg_.end()) {
|
||||||
|
|||||||
@ -256,9 +256,6 @@ class LearnerImpl : public Learner {
|
|||||||
attributes_ = std::map<std::string, std::string>(
|
attributes_ = std::map<std::string, std::string>(
|
||||||
attr.begin(), attr.end());
|
attr.begin(), attr.end());
|
||||||
}
|
}
|
||||||
if (metrics_.size() == 0) {
|
|
||||||
metrics_.emplace_back(Metric::Create(obj_->DefaultEvalMetric()));
|
|
||||||
}
|
|
||||||
this->base_score_ = mparam.base_score;
|
this->base_score_ = mparam.base_score;
|
||||||
gbm_->ResetPredBuffer(pred_buffer_size_);
|
gbm_->ResetPredBuffer(pred_buffer_size_);
|
||||||
cfg_["num_class"] = common::ToString(mparam.num_class);
|
cfg_["num_class"] = common::ToString(mparam.num_class);
|
||||||
@ -307,6 +304,9 @@ class LearnerImpl : public Learner {
|
|||||||
std::ostringstream os;
|
std::ostringstream os;
|
||||||
os << '[' << iter << ']'
|
os << '[' << iter << ']'
|
||||||
<< std::setiosflags(std::ios::fixed);
|
<< std::setiosflags(std::ios::fixed);
|
||||||
|
if (metrics_.size() == 0) {
|
||||||
|
metrics_.emplace_back(Metric::Create(obj_->DefaultEvalMetric()));
|
||||||
|
}
|
||||||
for (size_t i = 0; i < data_sets.size(); ++i) {
|
for (size_t i = 0; i < data_sets.size(); ++i) {
|
||||||
this->PredictRaw(data_sets[i], &preds_);
|
this->PredictRaw(data_sets[i], &preds_);
|
||||||
obj_->EvalTransform(&preds_);
|
obj_->EvalTransform(&preds_);
|
||||||
@ -445,9 +445,6 @@ class LearnerImpl : public Learner {
|
|||||||
|
|
||||||
// reset the base score
|
// reset the base score
|
||||||
mparam.base_score = obj_->ProbToMargin(mparam.base_score);
|
mparam.base_score = obj_->ProbToMargin(mparam.base_score);
|
||||||
if (metrics_.size() == 0) {
|
|
||||||
metrics_.emplace_back(Metric::Create(obj_->DefaultEvalMetric()));
|
|
||||||
}
|
|
||||||
|
|
||||||
this->base_score_ = mparam.base_score;
|
this->base_score_ = mparam.base_score;
|
||||||
gbm_->ResetPredBuffer(pred_buffer_size_);
|
gbm_->ResetPredBuffer(pred_buffer_size_);
|
||||||
|
|||||||
@ -105,6 +105,16 @@ class TestModels(unittest.TestCase):
|
|||||||
if int(preds2[i] > 0.5) != labels[i]) / float(len(preds2))
|
if int(preds2[i] > 0.5) != labels[i]) / float(len(preds2))
|
||||||
assert err == err2
|
assert err == err2
|
||||||
|
|
||||||
|
def test_multi_eval_metric(self):
|
||||||
|
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
|
||||||
|
param = {'max_depth': 2, 'eta': 0.2, 'silent': 1, 'objective': 'binary:logistic'}
|
||||||
|
param['eval_metric'] = ["auc", "logloss", 'error']
|
||||||
|
evals_result = {}
|
||||||
|
bst = xgb.train(param, dtrain, 4, watchlist, evals_result=evals_result)
|
||||||
|
assert isinstance(bst, xgb.core.Booster)
|
||||||
|
assert len(evals_result['eval']) == 3
|
||||||
|
assert set(evals_result['eval'].keys()) == {'auc', 'error', 'logloss'}
|
||||||
|
|
||||||
def test_fpreproc(self):
|
def test_fpreproc(self):
|
||||||
param = {'max_depth': 2, 'eta': 1, 'silent': 1,
|
param = {'max_depth': 2, 'eta': 1, 'silent': 1,
|
||||||
'objective': 'binary:logistic'}
|
'objective': 'binary:logistic'}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user