De-duplicate GPU parameters. (#4454)
* Only define `gpu_id` and `n_gpus` in `LearnerTrainParam` * Pass LearnerTrainParam through XGBoost vid factory method. * Disable all GPU usage when GPU related parameters are not specified (fixes XGBoost choosing GPU over aggressively). * Test learner train param io. * Fix gpu pickling.
This commit is contained in:
20
tests/python-gpu/load_pickle.py
Normal file
20
tests/python-gpu/load_pickle.py
Normal file
@@ -0,0 +1,20 @@
|
||||
'''Loading a pickled model generated by test_pickling.py'''
|
||||
import pickle
|
||||
import unittest
|
||||
import os
|
||||
import xgboost as xgb
|
||||
import sys
|
||||
|
||||
sys.path.append("tests/python")
|
||||
from test_pickling import build_dataset, model_path
|
||||
|
||||
|
||||
class TestLoadPickle(unittest.TestCase):
|
||||
def test_load_pkl(self):
|
||||
assert os.environ['CUDA_VISIBLE_DEVICES'] == ''
|
||||
with open(model_path, 'rb') as fd:
|
||||
bst = pickle.load(fd)
|
||||
x, y = build_dataset()
|
||||
test_x = xgb.DMatrix(x)
|
||||
res = bst.predict(test_x)
|
||||
assert len(res) == 10
|
||||
@@ -29,6 +29,7 @@ class TestGPUPredict(unittest.TestCase):
|
||||
"objective": "binary:logistic",
|
||||
"predictor": "gpu_predictor",
|
||||
'eval_metric': 'auc',
|
||||
'verbosity': '3'
|
||||
}
|
||||
bst = xgb.train(param, dtrain, iterations, evals=watchlist,
|
||||
evals_result=res)
|
||||
@@ -42,12 +43,13 @@ class TestGPUPredict(unittest.TestCase):
|
||||
cpu_pred_train = bst_cpu.predict(dtrain, output_margin=True)
|
||||
cpu_pred_test = bst_cpu.predict(dtest, output_margin=True)
|
||||
cpu_pred_val = bst_cpu.predict(dval, output_margin=True)
|
||||
|
||||
np.testing.assert_allclose(cpu_pred_train, gpu_pred_train,
|
||||
rtol=1e-5)
|
||||
rtol=1e-3)
|
||||
np.testing.assert_allclose(cpu_pred_val, gpu_pred_val,
|
||||
rtol=1e-5)
|
||||
rtol=1e-3)
|
||||
np.testing.assert_allclose(cpu_pred_test, gpu_pred_test,
|
||||
rtol=1e-5)
|
||||
rtol=1e-3)
|
||||
|
||||
def non_decreasing(self, L):
|
||||
return all((x - y) < 0.001 for x, y in zip(L, L[1:]))
|
||||
|
||||
53
tests/python-gpu/test_pickling.py
Normal file
53
tests/python-gpu/test_pickling.py
Normal file
@@ -0,0 +1,53 @@
|
||||
'''Test model IO with pickle.'''
|
||||
import pickle
|
||||
import unittest
|
||||
import numpy as np
|
||||
import subprocess
|
||||
import os
|
||||
import xgboost as xgb
|
||||
|
||||
model_path = './model.pkl'
|
||||
|
||||
|
||||
def build_dataset():
|
||||
N = 10
|
||||
x = np.linspace(0, N*N, N*N)
|
||||
x = x.reshape((N, N))
|
||||
y = np.linspace(0, N, N)
|
||||
return x, y
|
||||
|
||||
|
||||
class TestPickling(unittest.TestCase):
|
||||
def test_pickling(self):
|
||||
x, y = build_dataset()
|
||||
train_x = xgb.DMatrix(x, label=y)
|
||||
param = {'tree_method': 'gpu_hist',
|
||||
'gpu_id': 0,
|
||||
'n_gpus': -1,
|
||||
'verbosity': 1}
|
||||
bst = xgb.train(param, train_x)
|
||||
|
||||
with open(model_path, 'wb') as fd:
|
||||
pickle.dump(bst, fd)
|
||||
args = ["pytest",
|
||||
"--verbose",
|
||||
"-s",
|
||||
"--fulltrace",
|
||||
"./tests/python-gpu/load_pickle.py"]
|
||||
command = ''
|
||||
for arg in args:
|
||||
command += arg
|
||||
command += ' '
|
||||
|
||||
cuda_environment = {'CUDA_VISIBLE_DEVICES': ''}
|
||||
env = os.environ
|
||||
# Passing new_environment directly to `env' argument results
|
||||
# in failure on Windows:
|
||||
# Fatal Python error: _Py_HashRandomization_Init: failed to
|
||||
# get random numbers to initialize Python
|
||||
env.update(cuda_environment)
|
||||
|
||||
# Load model in a CPU only environment.
|
||||
status = subprocess.call(command, env=env, shell=True)
|
||||
assert status == 0
|
||||
os.remove(model_path)
|
||||
Reference in New Issue
Block a user