[Breaking] Change default evaluation metric for classification to logloss / mlogloss (#6183)

* Change DefaultEvalMetric of classification from error to logloss

* Change default binary metric in plugin/example/custom_obj.cc

* Set old error metric in python tests

* Set old error metric in R tests

* Fix missed eval metrics and typos in R tests

* Fix setting eval_metric twice in R tests

* Add warning for empty eval_metric for classification

* Fix Dask tests

Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
This commit is contained in:
Christian Lorentzen
2020-10-02 21:06:47 +02:00
committed by GitHub
parent e0e4f15d0e
commit cf4f019ed6
18 changed files with 56 additions and 32 deletions

View File

@@ -115,7 +115,9 @@ class TestDMatrix(unittest.TestCase):
eval_res_0 = {}
booster = xgb.train(
{'num_class': 3, 'objective': 'multi:softprob'}, d,
{'num_class': 3, 'objective': 'multi:softprob',
'eval_metric': 'merror'},
d,
num_boost_round=2, evals=[(d, 'd')], evals_result=eval_res_0)
predt = booster.predict(d)
@@ -130,9 +132,11 @@ class TestDMatrix(unittest.TestCase):
assert sliced_margin.shape[0] == len(ridxs) * 3
eval_res_1 = {}
xgb.train({'num_class': 3, 'objective': 'multi:softprob'}, sliced,
num_boost_round=2, evals=[(sliced, 'd')],
evals_result=eval_res_1)
xgb.train(
{'num_class': 3, 'objective': 'multi:softprob',
'eval_metric': 'merror'},
sliced,
num_boost_round=2, evals=[(sliced, 'd')], evals_result=eval_res_1)
eval_res_0 = eval_res_0['d']['merror']
eval_res_1 = eval_res_1['d']['merror']