xgboost/tests/python/test_tree_regularization.py
Philip Hyunsu Cho 9c9070aea2
Use pytest conventions consistently (#6337)
* Do not derive from unittest.TestCase (not needed for pytest)

* assertRaises -> pytest.raises

* Simplify test_empty_dmatrix with test parametrization

* setUpClass -> setup_class, tearDownClass -> teardown_class

* Don't import unittest; import pytest

* Use plain assert

* Use parametrized tests in more places

* Fix test_gpu_with_sklearn.py

* Put back run_empty_dmatrix_reg / run_empty_dmatrix_cls

* Fix test_eta_decay_gpu_hist

* Add parametrized tests for monotone constraints

* Fix test names

* Remove test parametrization

* Revise test_slice to be not flaky
2020-11-19 17:00:15 -08:00

63 lines
1.8 KiB
Python

import numpy as np
import xgboost as xgb
from numpy.testing import assert_approx_equal
train_data = xgb.DMatrix(np.array([[1]]), label=np.array([1]))
class TestTreeRegularization:
def test_alpha(self):
params = {
'tree_method': 'exact', 'verbosity': 0,
'objective': 'reg:squarederror',
'eta': 1,
'lambda': 0,
'alpha': 0.1
}
model = xgb.train(params, train_data, 1)
preds = model.predict(train_data)
# Default prediction (with no trees) is 0.5
# sum_grad = (0.5 - 1.0)
# sum_hess = 1.0
# 0.9 = 0.5 - (sum_grad - alpha * sgn(sum_grad)) / sum_hess
assert_approx_equal(preds[0], 0.9)
def test_lambda(self):
params = {
'tree_method': 'exact', 'verbosity': 0,
'objective': 'reg:squarederror',
'eta': 1,
'lambda': 1,
'alpha': 0
}
model = xgb.train(params, train_data, 1)
preds = model.predict(train_data)
# Default prediction (with no trees) is 0.5
# sum_grad = (0.5 - 1.0)
# sum_hess = 1.0
# 0.75 = 0.5 - sum_grad / (sum_hess + lambda)
assert_approx_equal(preds[0], 0.75)
def test_alpha_and_lambda(self):
params = {
'tree_method': 'exact', 'verbosity': 1,
'objective': 'reg:squarederror',
'eta': 1,
'lambda': 1,
'alpha': 0.1
}
model = xgb.train(params, train_data, 1)
preds = model.predict(train_data)
# Default prediction (with no trees) is 0.5
# sum_grad = (0.5 - 1.0)
# sum_hess = 1.0
# 0.7 = 0.5 - (sum_grad - alpha * sgn(sum_grad)) / (sum_hess + lambda)
assert_approx_equal(preds[0], 0.7)