xgboost/tests/python-gpu/test_gpu_updaters.py
Rong Ou c5b229632d [BREAKING] prevent multi-gpu usage (#4749)
* prevent multi-gpu usage

* fix distributed test

* combine gpu predictor tests

* set upper bound on n_gpus
2019-08-13 09:11:35 +12:00

51 lines
2.0 KiB
Python

import numpy as np
import sys
import unittest
import pytest
sys.path.append("tests/python")
from regression_test_utilities import run_suite, parameter_combinations, \
assert_results_non_increasing
def assert_gpu_results(cpu_results, gpu_results):
for cpu_res, gpu_res in zip(cpu_results, gpu_results):
# Check final eval result roughly equivalent
assert np.allclose(cpu_res["eval"][-1],
gpu_res["eval"][-1], 1e-2, 1e-2)
datasets = ["Boston", "Cancer", "Digits", "Sparse regression",
"Sparse regression with weights", "Small weights regression"]
class TestGPU(unittest.TestCase):
def test_gpu_hist(self):
test_param = parameter_combinations({'n_gpus': [1], 'max_depth': [2, 8],
'max_leaves': [255, 4],
'max_bin': [2, 256],
'grow_policy': ['lossguide']})
test_param.append({'single_precision_histogram': True})
test_param.append({'min_child_weight': 0,
'lambda': 0})
for param in test_param:
param['tree_method'] = 'gpu_hist'
gpu_results = run_suite(param, select_datasets=datasets)
assert_results_non_increasing(gpu_results, 1e-2)
param['tree_method'] = 'hist'
cpu_results = run_suite(param, select_datasets=datasets)
assert_gpu_results(cpu_results, gpu_results)
@pytest.mark.mgpu
def test_specified_gpu_id_gpu_update(self):
variable_param = {'n_gpus': [1],
'gpu_id': [1],
'max_depth': [8],
'max_leaves': [255, 4],
'max_bin': [2, 64],
'grow_policy': ['lossguide'],
'tree_method': ['gpu_hist']}
for param in parameter_combinations(variable_param):
gpu_results = run_suite(param, select_datasets=datasets)
assert_results_non_increasing(gpu_results, 1e-2)