Upgrade to CUDA 10.0 (#5649) (#5652)

Co-authored-by: fis <jm.yuan@outlook.com>

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
This commit is contained in:
Jiaming Yuan 2020-05-11 22:27:36 +08:00 committed by GitHub
parent fcf57823b6
commit 9ad40901a8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 27 additions and 21 deletions

11
Jenkinsfile vendored
View File

@ -64,7 +64,6 @@ pipeline {
'build-cpu': { BuildCPU() }, 'build-cpu': { BuildCPU() },
'build-cpu-rabit-mock': { BuildCPUMock() }, 'build-cpu-rabit-mock': { BuildCPUMock() },
'build-cpu-non-omp': { BuildCPUNonOmp() }, 'build-cpu-non-omp': { BuildCPUNonOmp() },
'build-gpu-cuda9.0': { BuildCUDA(cuda_version: '9.0') },
'build-gpu-cuda10.0': { BuildCUDA(cuda_version: '10.0') }, 'build-gpu-cuda10.0': { BuildCUDA(cuda_version: '10.0') },
'build-gpu-cuda10.1': { BuildCUDA(cuda_version: '10.1') }, 'build-gpu-cuda10.1': { BuildCUDA(cuda_version: '10.1') },
'build-jvm-packages': { BuildJVMPackages(spark_version: '2.4.3') }, 'build-jvm-packages': { BuildJVMPackages(spark_version: '2.4.3') },
@ -251,10 +250,10 @@ def BuildCUDA(args) {
${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal" ${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal"
${dockerRun} ${container_type} ${docker_binary} ${docker_args} python3 tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux2010_x86_64 ${dockerRun} ${container_type} ${docker_binary} ${docker_args} python3 tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux2010_x86_64
""" """
// Stash wheel for CUDA 9.0 target // Stash wheel for CUDA 10.0 target
if (args.cuda_version == '9.0') { if (args.cuda_version == '10.0') {
echo 'Stashing Python wheel...' echo 'Stashing Python wheel...'
stash name: 'xgboost_whl_cuda9', includes: 'python-package/dist/*.whl' stash name: 'xgboost_whl_cuda10', includes: 'python-package/dist/*.whl'
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/" path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl' s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
echo 'Stashing C++ test executable (testxgboost)...' echo 'Stashing C++ test executable (testxgboost)...'
@ -298,7 +297,7 @@ def BuildJVMDoc() {
def TestPythonCPU() { def TestPythonCPU() {
node('linux && cpu') { node('linux && cpu') {
unstash name: 'xgboost_whl_cuda9' unstash name: 'xgboost_whl_cuda10'
unstash name: 'srcs' unstash name: 'srcs'
unstash name: 'xgboost_cli' unstash name: 'xgboost_cli'
echo "Test Python CPU" echo "Test Python CPU"
@ -315,7 +314,7 @@ def TestPythonCPU() {
def TestPythonGPU(args) { def TestPythonGPU(args) {
nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu' nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu'
node(nodeReq) { node(nodeReq) {
unstash name: 'xgboost_whl_cuda9' unstash name: 'xgboost_whl_cuda10'
unstash name: 'srcs' unstash name: 'srcs'
echo "Test Python GPU: CUDA ${args.cuda_version}" echo "Test Python GPU: CUDA ${args.cuda_version}"
def container_type = "gpu" def container_type = "gpu"

View File

@ -12,25 +12,15 @@ rng = np.random.RandomState(1994)
class TestGPUBasicModels(unittest.TestCase): class TestGPUBasicModels(unittest.TestCase):
cputest = test_bm.TestModels() cputest = test_bm.TestModels()
def test_eta_decay_gpu_hist(self): def run_cls(self, X, y, deterministic):
self.cputest.run_eta_decay('gpu_hist')
def test_deterministic_gpu_hist(self):
kRows = 1000
kCols = 64
kClasses = 4
# Create large values to force rounding.
X = np.random.randn(kRows, kCols) * 1e4
y = np.random.randint(0, kClasses, size=kRows)
cls = xgb.XGBClassifier(tree_method='gpu_hist', cls = xgb.XGBClassifier(tree_method='gpu_hist',
deterministic_histogram=True, deterministic_histogram=deterministic,
single_precision_histogram=True) single_precision_histogram=True)
cls.fit(X, y) cls.fit(X, y)
cls.get_booster().save_model('test_deterministic_gpu_hist-0.json') cls.get_booster().save_model('test_deterministic_gpu_hist-0.json')
cls = xgb.XGBClassifier(tree_method='gpu_hist', cls = xgb.XGBClassifier(tree_method='gpu_hist',
deterministic_histogram=True, deterministic_histogram=deterministic,
single_precision_histogram=True) single_precision_histogram=True)
cls.fit(X, y) cls.fit(X, y)
cls.get_booster().save_model('test_deterministic_gpu_hist-1.json') cls.get_booster().save_model('test_deterministic_gpu_hist-1.json')
@ -40,7 +30,24 @@ class TestGPUBasicModels(unittest.TestCase):
with open('test_deterministic_gpu_hist-1.json', 'r') as fd: with open('test_deterministic_gpu_hist-1.json', 'r') as fd:
model_1 = fd.read() model_1 = fd.read()
assert hash(model_0) == hash(model_1)
os.remove('test_deterministic_gpu_hist-0.json') os.remove('test_deterministic_gpu_hist-0.json')
os.remove('test_deterministic_gpu_hist-1.json') os.remove('test_deterministic_gpu_hist-1.json')
return hash(model_0), hash(model_1)
def test_eta_decay_gpu_hist(self):
self.cputest.run_eta_decay('gpu_hist')
def test_deterministic_gpu_hist(self):
kRows = 1000
kCols = 64
kClasses = 4
# Create large values to force rounding.
X = np.random.randn(kRows, kCols) * 1e4
y = np.random.randint(0, kClasses, size=kRows) * 1e4
model_0, model_1 = self.run_cls(X, y, True)
assert model_0 == model_1
model_0, model_1 = self.run_cls(X, y, False)
assert model_0 != model_1