Move skl eval_metric and early_stopping rounds to model params. (#6751)
A new parameter `custom_metric` is added to `train` and `cv` to distinguish the behaviour from the old `feval`. And `feval` is deprecated. The new `custom_metric` receives transformed prediction when the built-in objective is used. This enables XGBoost to use cost functions from other libraries like scikit-learn directly without going through the definition of the link function. `eval_metric` and `early_stopping_rounds` in sklearn interface are moved from `fit` to `__init__` and is now saved as part of the scikit-learn model. The old ones in `fit` function are now deprecated. The new `eval_metric` in `__init__` has the same new behaviour as `custom_metric`. Added more detailed documents for the behaviour of custom objective and metric.
This commit is contained in:
@@ -144,7 +144,7 @@ def py_rmsle(dtrain: xgb.DMatrix, dtest: xgb.DMatrix) -> Dict:
|
||||
dtrain=dtrain,
|
||||
num_boost_round=kBoostRound,
|
||||
obj=squared_log,
|
||||
feval=rmsle,
|
||||
custom_metric=rmsle,
|
||||
evals=[(dtrain, 'dtrain'), (dtest, 'dtest')],
|
||||
evals_result=results)
|
||||
|
||||
|
||||
@@ -3,6 +3,9 @@ only applicable after (excluding) XGBoost 1.0.0, as before this version XGBoost
|
||||
returns transformed prediction for multi-class objective function. More
|
||||
details in comments.
|
||||
|
||||
See https://xgboost.readthedocs.io/en/latest/tutorials/custom_metric_obj.html for detailed
|
||||
tutorial and notes.
|
||||
|
||||
'''
|
||||
|
||||
import numpy as np
|
||||
@@ -95,7 +98,12 @@ def predict(booster: xgb.Booster, X):
|
||||
|
||||
def merror(predt: np.ndarray, dtrain: xgb.DMatrix):
|
||||
y = dtrain.get_label()
|
||||
# Like custom objective, the predt is untransformed leaf weight
|
||||
# Like custom objective, the predt is untransformed leaf weight when custom objective
|
||||
# is provided.
|
||||
|
||||
# With the use of `custom_metric` parameter in train function, custom metric receives
|
||||
# raw input only when custom objective is also being used. Otherwise custom metric
|
||||
# will receive transformed prediction.
|
||||
assert predt.shape == (kRows, kClasses)
|
||||
out = np.zeros(kRows)
|
||||
for r in range(predt.shape[0]):
|
||||
@@ -134,7 +142,7 @@ def main(args):
|
||||
m,
|
||||
num_boost_round=kRounds,
|
||||
obj=softprob_obj,
|
||||
feval=merror,
|
||||
custom_metric=merror,
|
||||
evals_result=custom_results,
|
||||
evals=[(m, 'train')])
|
||||
|
||||
@@ -143,6 +151,7 @@ def main(args):
|
||||
native_results = {}
|
||||
# Use the same objective function defined in XGBoost.
|
||||
booster_native = xgb.train({'num_class': kClasses,
|
||||
"objective": "multi:softmax",
|
||||
'eval_metric': 'merror'},
|
||||
m,
|
||||
num_boost_round=kRounds,
|
||||
|
||||
Reference in New Issue
Block a user