Revert ntree limit fix (#6616)
The old (before fix) best_ntree_limit ignores the num_class parameters, which is incorrect. In before we workarounded it in c++ layer to avoid possible breaking changes on other language bindings. But the Python interpretation stayed incorrect. The PR fixed that in Python to consider num_class, but didn't remove the old workaround, so tree calculation in predictor is incorrect, see PredictBatch in CPUPredictor.
This commit is contained in:
@@ -347,7 +347,7 @@ class TestModels:
|
||||
X, y = load_iris(return_X_y=True)
|
||||
cls = xgb.XGBClassifier(n_estimators=2)
|
||||
cls.fit(X, y, early_stopping_rounds=1, eval_set=[(X, y)])
|
||||
assert cls.get_booster().best_ntree_limit == 2 * cls.n_classes_
|
||||
assert cls.get_booster().best_ntree_limit == 2
|
||||
assert cls.best_ntree_limit == cls.get_booster().best_ntree_limit
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
@@ -356,7 +356,7 @@ class TestModels:
|
||||
|
||||
cls = xgb.XGBClassifier(n_estimators=2)
|
||||
cls.load_model(path)
|
||||
assert cls.get_booster().best_ntree_limit == 2 * cls.n_classes_
|
||||
assert cls.get_booster().best_ntree_limit == 2
|
||||
assert cls.best_ntree_limit == cls.get_booster().best_ntree_limit
|
||||
|
||||
@pytest.mark.skipif(**tm.no_sklearn())
|
||||
|
||||
Reference in New Issue
Block a user