Add base_margin for evaluation dataset. (#6591)
* Add base margin to evaluation datasets. * Unify the code base for evaluation matrices.
This commit is contained in:
@@ -717,13 +717,13 @@ def test_validation_weights_xgbmodel():
|
||||
assert all((logloss_with_weights[i] != logloss_without_weights[i]
|
||||
for i in [0, 1]))
|
||||
|
||||
with pytest.raises(AssertionError):
|
||||
with pytest.raises(ValueError):
|
||||
# length of eval set and sample weight doesn't match.
|
||||
clf.fit(X_train, y_train, sample_weight=weights_train,
|
||||
eval_set=[(X_train, y_train), (X_test, y_test)],
|
||||
sample_weight_eval_set=[weights_train])
|
||||
|
||||
with pytest.raises(AssertionError):
|
||||
with pytest.raises(ValueError):
|
||||
cls = xgb.XGBClassifier()
|
||||
cls.fit(X_train, y_train, sample_weight=weights_train,
|
||||
eval_set=[(X_train, y_train), (X_test, y_test)],
|
||||
@@ -1118,19 +1118,9 @@ def run_boost_from_prediction(tree_method):
|
||||
assert np.all(predictions_1 == predictions_2)
|
||||
|
||||
|
||||
@pytest.mark.skipif(**tm.no_sklearn())
|
||||
def test_boost_from_prediction_hist():
|
||||
run_boost_from_prediction('hist')
|
||||
|
||||
|
||||
@pytest.mark.skipif(**tm.no_sklearn())
|
||||
def test_boost_from_prediction_approx():
|
||||
run_boost_from_prediction('approx')
|
||||
|
||||
|
||||
@pytest.mark.skipif(**tm.no_sklearn())
|
||||
def test_boost_from_prediction_exact():
|
||||
run_boost_from_prediction('exact')
|
||||
@pytest.mark.parametrize("tree_method", ["hist", "approx", "exact"])
|
||||
def test_boost_from_prediction(tree_method):
|
||||
run_boost_from_prediction(tree_method)
|
||||
|
||||
|
||||
def test_estimator_type():
|
||||
@@ -1154,3 +1144,32 @@ def test_estimator_type():
|
||||
|
||||
cls = xgb.XGBClassifier()
|
||||
cls.load_model(path) # no error
|
||||
|
||||
|
||||
def run_data_initialization(DMatrix, model, X, y):
|
||||
"""Assert that we don't create duplicated DMatrix."""
|
||||
|
||||
old_init = DMatrix.__init__
|
||||
count = [0]
|
||||
|
||||
def new_init(self, **kwargs):
|
||||
count[0] += 1
|
||||
return old_init(self, **kwargs)
|
||||
|
||||
DMatrix.__init__ = new_init
|
||||
model(n_estimators=1).fit(X, y, eval_set=[(X, y)])
|
||||
|
||||
assert count[0] == 1
|
||||
count[0] = 0 # only 1 DMatrix is created.
|
||||
|
||||
y_copy = y.copy()
|
||||
model(n_estimators=1).fit(X, y, eval_set=[(X, y_copy)])
|
||||
assert count[0] == 2 # a different Python object is considered different
|
||||
|
||||
DMatrix.__init__ = old_init
|
||||
|
||||
|
||||
def test_data_initialization():
|
||||
from sklearn.datasets import load_digits
|
||||
X, y = load_digits(return_X_y=True)
|
||||
run_data_initialization(xgb.DMatrix, xgb.XGBClassifier, X, y)
|
||||
|
||||
Reference in New Issue
Block a user