Avoid calling CUDA code on CPU for linear model. (#7154)

This commit is contained in:
Jiaming Yuan
2021-09-01 10:45:31 +08:00
committed by GitHub
parent ba69244a94
commit 3a4f51f39f
4 changed files with 43 additions and 19 deletions

View File

@@ -19,8 +19,15 @@ class TestLoadPickle:
assert os.environ['CUDA_VISIBLE_DEVICES'] == '-1'
bst = load_pickle(model_path)
x, y = build_dataset()
test_x = xgb.DMatrix(x)
res = bst.predict(test_x)
if isinstance(bst, xgb.Booster):
test_x = xgb.DMatrix(x)
res = bst.predict(test_x)
else:
res = bst.predict(x)
assert len(res) == 10
bst.set_params(n_jobs=1) # triggers a re-configuration
res = bst.predict(x)
assert len(res) == 10
def test_predictor_type_is_auto(self):

View File

@@ -41,13 +41,7 @@ class TestPickling:
"-s",
"--fulltrace"]
def test_pickling(self):
x, y = build_dataset()
train_x = xgb.DMatrix(x, label=y)
param = {'tree_method': 'gpu_hist',
'verbosity': 1}
bst = xgb.train(param, train_x)
def run_pickling(self, bst) -> None:
save_pickle(bst, model_path)
args = [
"pytest", "--verbose", "-s", "--fulltrace",
@@ -71,6 +65,25 @@ class TestPickling:
assert status == 0
os.remove(model_path)
@pytest.mark.skipif(**tm.no_sklearn())
def test_pickling(self):
x, y = build_dataset()
train_x = xgb.DMatrix(x, label=y)
param = {'tree_method': 'gpu_hist', "gpu_id": 0}
bst = xgb.train(param, train_x)
self.run_pickling(bst)
bst = xgb.XGBRegressor(**param).fit(x, y)
self.run_pickling(bst)
param = {"booster": "gblinear", "updater": "gpu_coord_descent", "gpu_id": 0}
bst = xgb.train(param, train_x)
self.run_pickling(bst)
bst = xgb.XGBRegressor(**param).fit(x, y)
self.run_pickling(bst)
@pytest.mark.mgpu
def test_wrap_gpu_id(self):
X, y = build_dataset()