De-duplicate GPU parameters. (#4454)

* Only define `gpu_id` and `n_gpus` in `LearnerTrainParam`
* Pass LearnerTrainParam through XGBoost vid factory method.
* Disable all GPU usage when GPU related parameters are not specified (fixes XGBoost choosing GPU over aggressively).
* Test learner train param io.
* Fix gpu pickling.
This commit is contained in:
Jiaming Yuan
2019-05-29 11:55:57 +08:00
committed by GitHub
parent a3fedbeaa8
commit c589eff941
69 changed files with 927 additions and 562 deletions

View File

@@ -29,6 +29,7 @@ class TestGPUPredict(unittest.TestCase):
"objective": "binary:logistic",
"predictor": "gpu_predictor",
'eval_metric': 'auc',
'verbosity': '3'
}
bst = xgb.train(param, dtrain, iterations, evals=watchlist,
evals_result=res)
@@ -42,12 +43,13 @@ class TestGPUPredict(unittest.TestCase):
cpu_pred_train = bst_cpu.predict(dtrain, output_margin=True)
cpu_pred_test = bst_cpu.predict(dtest, output_margin=True)
cpu_pred_val = bst_cpu.predict(dval, output_margin=True)
np.testing.assert_allclose(cpu_pred_train, gpu_pred_train,
rtol=1e-5)
rtol=1e-3)
np.testing.assert_allclose(cpu_pred_val, gpu_pred_val,
rtol=1e-5)
rtol=1e-3)
np.testing.assert_allclose(cpu_pred_test, gpu_pred_test,
rtol=1e-5)
rtol=1e-3)
def non_decreasing(self, L):
return all((x - y) < 0.001 for x, y in zip(L, L[1:]))