Monotone constraints for gpu_hist (#2904)
This commit is contained in:
@@ -97,7 +97,7 @@ def train_sparse(param_in, comparison_tree_method):
|
||||
|
||||
# Enumerates all permutations of variable parameters
|
||||
def assert_updater_accuracy(tree_method, comparison_tree_method, variable_param, tolerance):
|
||||
param = {'tree_method': tree_method }
|
||||
param = {'tree_method': tree_method}
|
||||
names = sorted(variable_param)
|
||||
combinations = it.product(*(variable_param[Name] for Name in names))
|
||||
|
||||
@@ -109,10 +109,14 @@ def assert_updater_accuracy(tree_method, comparison_tree_method, variable_param,
|
||||
param_tmp[name] = set[i]
|
||||
|
||||
print(param_tmp, file=sys.stderr)
|
||||
assert_accuracy(train_boston(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance, param_tmp)
|
||||
assert_accuracy(train_digits(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance, param_tmp)
|
||||
assert_accuracy(train_cancer(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance, param_tmp)
|
||||
assert_accuracy(train_sparse(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance, param_tmp)
|
||||
assert_accuracy(train_boston(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance,
|
||||
param_tmp)
|
||||
assert_accuracy(train_digits(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance,
|
||||
param_tmp)
|
||||
assert_accuracy(train_cancer(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance,
|
||||
param_tmp)
|
||||
assert_accuracy(train_sparse(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance,
|
||||
param_tmp)
|
||||
|
||||
|
||||
@attr('gpu')
|
||||
@@ -122,5 +126,6 @@ class TestGPU(unittest.TestCase):
|
||||
assert_updater_accuracy('gpu_exact', 'exact', variable_param, 0.02)
|
||||
|
||||
def test_gpu_hist(self):
|
||||
variable_param = {'n_gpus': [1, -1], 'max_depth': [2, 6], 'max_leaves': [255, 4], 'max_bin': [2, 16, 1024]}
|
||||
variable_param = {'n_gpus': [1, -1], 'max_depth': [2, 6], 'max_leaves': [255, 4], 'max_bin': [2, 16, 1024],
|
||||
'grow_policy': ['depthwise', 'lossguide']}
|
||||
assert_updater_accuracy('gpu_hist', 'hist', variable_param, 0.01)
|
||||
|
||||
44
tests/python-gpu/test_monotonic_constraints.py
Normal file
44
tests/python-gpu/test_monotonic_constraints.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import unittest
|
||||
import xgboost as xgb
|
||||
from nose.plugins.attrib import attr
|
||||
from sklearn.datasets import make_regression
|
||||
|
||||
rng = np.random.RandomState(1994)
|
||||
|
||||
|
||||
def non_decreasing(L):
|
||||
return all((x - y) < 0.001 for x, y in zip(L, L[1:]))
|
||||
|
||||
|
||||
def non_increasing(L):
|
||||
return all((y - x) < 0.001 for x, y in zip(L, L[1:]))
|
||||
|
||||
|
||||
def assert_constraint(constraint, tree_method):
|
||||
n = 1000
|
||||
X, y = make_regression(n, random_state=rng, n_features=1, n_informative=1)
|
||||
dtrain = xgb.DMatrix(X, y)
|
||||
param = {}
|
||||
param['tree_method'] = tree_method
|
||||
param['monotone_constraints'] = "(" + str(constraint) + ")"
|
||||
bst = xgb.train(param, dtrain)
|
||||
dpredict = xgb.DMatrix(X[X[:, 0].argsort()])
|
||||
pred = bst.predict(dpredict)
|
||||
if constraint > 0:
|
||||
assert non_decreasing(pred)
|
||||
elif constraint < 0:
|
||||
assert non_increasing(pred)
|
||||
|
||||
|
||||
@attr('gpu')
|
||||
class TestMonotonicConstraints(unittest.TestCase):
|
||||
def test_exact(self):
|
||||
assert_constraint(1, 'exact')
|
||||
assert_constraint(-1, 'exact')
|
||||
|
||||
def test_gpu_hist(self):
|
||||
assert_constraint(1, 'gpu_hist')
|
||||
assert_constraint(-1, 'gpu_hist')
|
||||
Reference in New Issue
Block a user