Cleanup Python GPU tests. (#9934)

* Cleanup Python GPU tests.

- Remove the use of `gpu_hist` and `gpu_id` in cudf/cupy tests.
- Move base margin test into the testing directory.
This commit is contained in:
Jiaming Yuan
2024-01-04 13:15:18 +08:00
committed by GitHub
parent 3c004a4145
commit 9f73127a23
14 changed files with 282 additions and 240 deletions

View File

@@ -6,22 +6,29 @@ from xgboost import testing as tm
pytestmark = tm.timeout(10)
parameter_strategy = strategies.fixed_dictionaries({
'booster': strategies.just('gblinear'),
'eta': strategies.floats(0.01, 0.25),
'tolerance': strategies.floats(1e-5, 1e-2),
'nthread': strategies.integers(1, 4),
'feature_selector': strategies.sampled_from(['cyclic', 'shuffle',
'greedy', 'thrifty']),
'top_k': strategies.integers(1, 10),
})
parameter_strategy = strategies.fixed_dictionaries(
{
"booster": strategies.just("gblinear"),
"eta": strategies.floats(0.01, 0.25),
"tolerance": strategies.floats(1e-5, 1e-2),
"nthread": strategies.integers(1, 4),
"feature_selector": strategies.sampled_from(
["cyclic", "shuffle", "greedy", "thrifty"]
),
"top_k": strategies.integers(1, 10),
}
)
def train_result(param, dmat, num_rounds):
result = {}
booster = xgb.train(
param, dmat, num_rounds, [(dmat, 'train')], verbose_eval=False,
evals_result=result
param,
dmat,
num_rounds,
[(dmat, "train")],
verbose_eval=False,
evals_result=result,
)
assert booster.num_boosted_rounds() == num_rounds
return result
@@ -32,9 +39,11 @@ class TestGPULinear:
@settings(deadline=None, max_examples=20, print_blob=True)
def test_gpu_coordinate(self, param, num_rounds, dataset):
assume(len(dataset.y) > 0)
param['updater'] = 'gpu_coord_descent'
param["updater"] = "gpu_coord_descent"
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
result = train_result(param, dataset.get_dmat(), num_rounds)["train"][
dataset.metric
]
note(result)
assert tm.non_increasing(result)
@@ -46,16 +55,18 @@ class TestGPULinear:
strategies.integers(10, 50),
tm.make_dataset_strategy(),
strategies.floats(1e-5, 0.8),
strategies.floats(1e-5, 0.8)
strategies.floats(1e-5, 0.8),
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_gpu_coordinate_regularised(self, param, num_rounds, dataset, alpha, lambd):
assume(len(dataset.y) > 0)
param['updater'] = 'gpu_coord_descent'
param['alpha'] = alpha
param['lambda'] = lambd
param["updater"] = "gpu_coord_descent"
param["alpha"] = alpha
param["lambda"] = lambd
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
result = train_result(param, dataset.get_dmat(), num_rounds)["train"][
dataset.metric
]
note(result)
assert tm.non_increasing([result[0], result[-1]])
@@ -64,8 +75,12 @@ class TestGPULinear:
# Training linear model is quite expensive, so we don't include it in
# test_from_cupy.py
import cupy
params = {'booster': 'gblinear', 'updater': 'gpu_coord_descent',
'n_estimators': 100}
params = {
"booster": "gblinear",
"updater": "gpu_coord_descent",
"n_estimators": 100,
}
X, y = tm.get_california_housing()
cpu_model = xgb.XGBRegressor(**params)
cpu_model.fit(X, y)