[Breaking] Fix custom metric for multi output. (#5954)
* Set output margin to true for custom metric. This fixes only R and Python.
This commit is contained in:
@@ -75,7 +75,7 @@ def softprob_obj(predt: np.ndarray, data: xgb.DMatrix):
|
||||
return grad, hess
|
||||
|
||||
|
||||
def predict(booster, X):
|
||||
def predict(booster: xgb.Booster, X):
|
||||
'''A customized prediction function that converts raw prediction to
|
||||
target class.
|
||||
|
||||
@@ -93,15 +93,34 @@ def predict(booster, X):
|
||||
return out
|
||||
|
||||
|
||||
def merror(predt: np.ndarray, dtrain: xgb.DMatrix):
|
||||
y = dtrain.get_label()
|
||||
# Like custom objective, the predt is untransformed leaf weight
|
||||
assert predt.shape == (kRows, kClasses)
|
||||
out = np.zeros(kRows)
|
||||
for r in range(predt.shape[0]):
|
||||
i = np.argmax(predt[r])
|
||||
out[r] = i
|
||||
|
||||
assert y.shape == out.shape
|
||||
|
||||
errors = np.zeros(kRows)
|
||||
errors[y != out] = 1.0
|
||||
return 'PyMError', np.sum(errors) / kRows
|
||||
|
||||
|
||||
def plot_history(custom_results, native_results):
|
||||
fig, axs = plt.subplots(2, 1)
|
||||
ax0 = axs[0]
|
||||
ax1 = axs[1]
|
||||
|
||||
pymerror = custom_results['train']['PyMError']
|
||||
merror = native_results['train']['merror']
|
||||
|
||||
x = np.arange(0, kRounds, 1)
|
||||
ax0.plot(x, custom_results['train']['merror'], label='Custom objective')
|
||||
ax0.plot(x, pymerror, label='Custom objective')
|
||||
ax0.legend()
|
||||
ax1.plot(x, native_results['train']['merror'], label='multi:softmax')
|
||||
ax1.plot(x, merror, label='multi:softmax')
|
||||
ax1.legend()
|
||||
|
||||
plt.show()
|
||||
@@ -110,10 +129,12 @@ def plot_history(custom_results, native_results):
|
||||
def main(args):
|
||||
custom_results = {}
|
||||
# Use our custom objective function
|
||||
booster_custom = xgb.train({'num_class': kClasses},
|
||||
booster_custom = xgb.train({'num_class': kClasses,
|
||||
'disable_default_eval_metric': True},
|
||||
m,
|
||||
num_boost_round=kRounds,
|
||||
obj=softprob_obj,
|
||||
feval=merror,
|
||||
evals_result=custom_results,
|
||||
evals=[(m, 'train')])
|
||||
|
||||
@@ -131,6 +152,8 @@ def main(args):
|
||||
# We are reimplementing the loss function in XGBoost, so it should
|
||||
# be the same for normal cases.
|
||||
assert np.all(predt_custom == predt_native)
|
||||
np.testing.assert_allclose(custom_results['train']['PyMError'],
|
||||
native_results['train']['merror'])
|
||||
|
||||
if args.plot != 0:
|
||||
plot_history(custom_results, native_results)
|
||||
|
||||
Reference in New Issue
Block a user