diff --git a/Jenkinsfile b/Jenkinsfile deleted file mode 100644 index 7a8d54d4e..000000000 --- a/Jenkinsfile +++ /dev/null @@ -1,451 +0,0 @@ -#!/usr/bin/groovy -// -*- mode: groovy -*- -// Jenkins pipeline -// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ - -// Command to run command inside a docker container -dockerRun = 'tests/ci_build/ci_build.sh' - -// Which CUDA version to use when building reference distribution wheel -ref_cuda_ver = '11.0.3' - -import groovy.transform.Field - -@Field -def commit_id // necessary to pass a variable from one stage to another - -pipeline { - // Each stage specify its own agent - agent none - - environment { - DOCKER_CACHE_ECR_ID = '492475357299' - DOCKER_CACHE_ECR_REGION = 'us-west-2' - } - - // Setup common job properties - options { - ansiColor('xterm') - timestamps() - timeout(time: 240, unit: 'MINUTES') - buildDiscarder(logRotator(numToKeepStr: '10')) - preserveStashes() - } - - // Build stages - stages { - stage('Jenkins Linux: Initialize') { - agent { label 'job_initializer' } - steps { - script { - def buildNumber = env.BUILD_NUMBER as int - if (buildNumber > 1) milestone(buildNumber - 1) - milestone(buildNumber) - - checkoutSrcs() - commit_id = "${GIT_COMMIT}" - } - sh 'python3 tests/jenkins_get_approval.py' - stash name: 'srcs' - deleteDir() - } - } - stage('Jenkins Linux: Build') { - agent none - steps { - script { - parallel ([ - 'clang-tidy': { ClangTidy() }, - 'build-cpu': { BuildCPU() }, - 'build-cpu-arm64': { BuildCPUARM64() }, - 'build-cpu-rabit-mock': { BuildCPUMock() }, - // Build reference, distribution-ready Python wheel with CUDA 11.0 - // using CentOS 7 image - 'build-gpu-cuda11.0': { BuildCUDA(cuda_version: '11.0.3', build_rmm: true) }, - 'build-gpu-rpkg': { BuildRPackageWithCUDA(cuda_version: '11.0.3') }, - 'build-jvm-packages-gpu-cuda11.0': { BuildJVMPackagesWithCUDA(spark_version: '3.0.1', cuda_version: '11.0.3') }, - 'build-jvm-packages': { BuildJVMPackages(spark_version: '3.0.1') }, - 'build-jvm-doc': { BuildJVMDoc() } - ]) - } - } - } - stage('Jenkins Linux: Test') { - agent none - steps { - script { - parallel ([ - 'test-python-cpu': { TestPythonCPU() }, - 'test-python-cpu-arm64': { TestPythonCPUARM64() }, - // artifact_cuda_version doesn't apply to RMM tests; RMM tests will always match CUDA version between artifact and host env - 'test-python-gpu-cuda11.0': { TestPythonGPU(artifact_cuda_version: '11.0.3', host_cuda_version: '11.0.3', test_rmm: true) }, - 'test-python-mgpu-cuda11.0': { TestPythonGPU(artifact_cuda_version: '11.0.3', host_cuda_version: '11.0.3', multi_gpu: true, test_rmm: true) }, - 'test-cpp-gpu-cuda11.0': { TestCppGPU(artifact_cuda_version: '11.0.3', host_cuda_version: '11.0.3', test_rmm: true) }, - 'test-jvm-jdk8': { CrossTestJVMwithJDK(jdk_version: '8', spark_version: '3.0.0') } - ]) - } - } - } - stage('Jenkins Linux: Deploy') { - agent none - steps { - script { - parallel ([ - 'deploy-jvm-packages': { DeployJVMPackages(spark_version: '3.0.0') } - ]) - } - } - } - } -} - -// check out source code from git -def checkoutSrcs() { - retry(5) { - try { - timeout(time: 2, unit: 'MINUTES') { - checkout scm - sh 'git submodule update --init' - } - } catch (exc) { - deleteDir() - error "Failed to fetch source codes" - } - } -} - -def ClangTidy() { - node('linux && cpu_build') { - unstash name: 'srcs' - echo "Running clang-tidy job..." - def container_type = "clang_tidy" - def docker_binary = "docker" - def dockerArgs = "--build-arg CUDA_VERSION_ARG=11.0.3" - sh """ - ${dockerRun} ${container_type} ${docker_binary} ${dockerArgs} python3 tests/ci_build/tidy.py --cuda-archs 75 - """ - deleteDir() - } -} - -def BuildCPU() { - node('linux && cpu_build') { - unstash name: 'srcs' - echo "Build CPU" - def container_type = "cpu" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} rm -fv dmlc-core/include/dmlc/build_config_default.h - # This step is not necessary, but here we include it, to ensure that DMLC_CORE_USE_CMAKE flag is correctly propagated - # We want to make sure that we use the configured header build/dmlc/build_config.h instead of include/dmlc/build_config_default.h. - # See discussion at https://github.com/dmlc/xgboost/issues/5510 - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh -DPLUGIN_DENSE_PARSER=ON - ${dockerRun} ${container_type} ${docker_binary} bash -c "cd build && ctest --extra-verbose" - """ - // Sanitizer test - def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e ASAN_SYMBOLIZER_PATH=/usr/bin/llvm-symbolizer -e ASAN_OPTIONS=symbolize=1 -e UBSAN_OPTIONS=print_stacktrace=1:log_path=ubsan_error.log --cap-add SYS_PTRACE'" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak;undefined" \ - -DCMAKE_BUILD_TYPE=Debug -DSANITIZER_PATH=/usr/lib/x86_64-linux-gnu/ - ${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} bash -c "cd build && ctest --exclude-regex AllTestsInDMLCUnitTests --extra-verbose" - """ - - stash name: 'xgboost_cli', includes: 'xgboost' - deleteDir() - } -} - -def BuildCPUARM64() { - node('linux && arm64') { - unstash name: 'srcs' - echo "Build CPU ARM64" - def container_type = "aarch64" - def docker_binary = "docker" - def wheel_tag = "manylinux2014_aarch64" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh --conda-env=aarch64_test -DOPEN_MP:BOOL=ON -DHIDE_CXX_SYMBOL=ON - ${dockerRun} ${container_type} ${docker_binary} bash -c "cd build && ctest --extra-verbose" - ${dockerRun} ${container_type} ${docker_binary} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal" - ${dockerRun} ${container_type} ${docker_binary} python tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} ${wheel_tag} - ${dockerRun} ${container_type} ${docker_binary} bash -c "auditwheel repair --plat ${wheel_tag} python-package/dist/*.whl && python tests/ci_build/rename_whl.py wheelhouse/*.whl ${commit_id} ${wheel_tag}" - mv -v wheelhouse/*.whl python-package/dist/ - # Make sure that libgomp.so is vendored in the wheel - ${dockerRun} ${container_type} ${docker_binary} bash -c "unzip -l python-package/dist/*.whl | grep libgomp || exit -1" - """ - echo 'Stashing Python wheel...' - stash name: "xgboost_whl_arm64_cpu", includes: 'python-package/dist/*.whl' - if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) { - echo 'Uploading Python wheel...' - sh """ - python3 -m awscli s3 cp python-package/dist/*.whl s3://xgboost-nightly-builds/${BRANCH_NAME}/ --acl public-read --no-progress - """ - } - stash name: 'xgboost_cli_arm64', includes: 'xgboost' - deleteDir() - } -} - -def BuildCPUMock() { - node('linux && cpu_build') { - unstash name: 'srcs' - echo "Build CPU with rabit mock" - def container_type = "cpu" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_mock_cmake.sh - """ - echo 'Stashing rabit C++ test executable (xgboost)...' - stash name: 'xgboost_rabit_tests', includes: 'xgboost' - deleteDir() - } -} - -def BuildCUDA(args) { - node('linux && cpu_build') { - unstash name: 'srcs' - echo "Build with CUDA ${args.cuda_version}" - def container_type = "gpu_build_centos7" - def docker_binary = "docker" - def docker_args = "--build-arg CUDA_VERSION_ARG=${args.cuda_version}" - def arch_flag = "" - if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) { - arch_flag = "-DGPU_COMPUTE_VER=75" - } - def wheel_tag = "manylinux2014_x86_64" - sh """ - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/prune_libnccl.sh - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_CUDA=ON -DUSE_NCCL=ON -DUSE_OPENMP=ON -DHIDE_CXX_SYMBOLS=ON -DUSE_NCCL_LIB_PATH=ON -DNCCL_INCLUDE_DIR=/usr/include -DNCCL_LIBRARY=/workspace/libnccl_static.a ${arch_flag} - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal" - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} python tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} ${wheel_tag} - """ - if (args.cuda_version == ref_cuda_ver) { - sh """ - ${dockerRun} auditwheel_x86_64 ${docker_binary} auditwheel repair --plat ${wheel_tag} python-package/dist/*.whl - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} python tests/ci_build/rename_whl.py wheelhouse/*.whl ${commit_id} ${wheel_tag} - mv -v wheelhouse/*.whl python-package/dist/ - # Make sure that libgomp.so is vendored in the wheel - ${dockerRun} auditwheel_x86_64 ${docker_binary} bash -c "unzip -l python-package/dist/*.whl | grep libgomp || exit -1" - """ - } - echo 'Stashing Python wheel...' - stash name: "xgboost_whl_cuda${args.cuda_version}", includes: 'python-package/dist/*.whl' - if (args.cuda_version == ref_cuda_ver && (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release'))) { - echo 'Uploading Python wheel...' - sh """ - python3 -m awscli s3 cp python-package/dist/*.whl s3://xgboost-nightly-builds/${BRANCH_NAME}/ --acl public-read --no-progress - """ - } - echo 'Stashing C++ test executable (testxgboost)...' - stash name: "xgboost_cpp_tests_cuda${args.cuda_version}", includes: 'build/testxgboost' - if (args.build_rmm) { - echo "Build with CUDA ${args.cuda_version} and RMM" - container_type = "rmm" - docker_binary = "docker" - docker_args = "--build-arg CUDA_VERSION_ARG=${args.cuda_version}" - sh """ - rm -rf build/ - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh --conda-env=gpu_test -DUSE_CUDA=ON -DUSE_NCCL=ON -DPLUGIN_RMM=ON -DBUILD_WITH_CUDA_CUB=ON ${arch_flag} - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal" - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} python tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux2014_x86_64 - """ - echo 'Stashing Python wheel...' - stash name: "xgboost_whl_rmm_cuda${args.cuda_version}", includes: 'python-package/dist/*.whl' - echo 'Stashing C++ test executable (testxgboost)...' - stash name: "xgboost_cpp_tests_rmm_cuda${args.cuda_version}", includes: 'build/testxgboost' - } - deleteDir() - } -} - -def BuildRPackageWithCUDA(args) { - node('linux && cpu_build') { - unstash name: 'srcs' - def container_type = 'gpu_build_r_centos7' - def docker_binary = "docker" - def docker_args = "--build-arg CUDA_VERSION_ARG=${args.cuda_version}" - if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) { - sh """ - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_r_pkg_with_cuda.sh ${commit_id} - """ - echo 'Uploading R tarball...' - sh """ - python3 -m awscli s3 cp xgboost_r_gpu_linux_*.tar.gz s3://xgboost-nightly-builds/${BRANCH_NAME}/ --acl public-read --no-progress - """ - } - deleteDir() - } -} - -def BuildJVMPackagesWithCUDA(args) { - node('linux && mgpu') { - unstash name: 'srcs' - echo "Build XGBoost4J-Spark with Spark ${args.spark_version}, CUDA ${args.cuda_version}" - def container_type = "jvm_gpu_build" - def docker_binary = "nvidia-docker" - def docker_args = "--build-arg CUDA_VERSION_ARG=${args.cuda_version}" - def arch_flag = "" - if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) { - arch_flag = "-DGPU_COMPUTE_VER=75" - } - // Use only 4 CPU cores - def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='--cpuset-cpus 0-3'" - sh """ - ${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_jvm_packages.sh ${args.spark_version} -Duse.cuda=ON $arch_flag - """ - echo "Stashing XGBoost4J JAR with CUDA ${args.cuda_version} ..." - stash name: 'xgboost4j_jar_gpu', includes: "jvm-packages/xgboost4j-gpu/target/*.jar,jvm-packages/xgboost4j-spark-gpu/target/*.jar" - deleteDir() - } -} - -def BuildJVMPackages(args) { - node('linux && cpu') { - unstash name: 'srcs' - echo "Build XGBoost4J-Spark with Spark ${args.spark_version}" - def container_type = "jvm" - def docker_binary = "docker" - // Use only 4 CPU cores - def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='--cpuset-cpus 0-3'" - sh """ - ${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_packages.sh ${args.spark_version} - """ - echo 'Stashing XGBoost4J JAR...' - stash name: 'xgboost4j_jar', includes: "jvm-packages/xgboost4j/target/*.jar,jvm-packages/xgboost4j-spark/target/*.jar,jvm-packages/xgboost4j-example/target/*.jar" - deleteDir() - } -} - -def BuildJVMDoc() { - node('linux && cpu') { - unstash name: 'srcs' - echo "Building JVM doc..." - def container_type = "jvm" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_doc.sh ${BRANCH_NAME} - """ - if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) { - echo 'Uploading doc...' - sh """ - python3 -m awscli s3 cp jvm-packages/${BRANCH_NAME}.tar.bz2 s3://xgboost-docs/${BRANCH_NAME}.tar.bz2 --acl public-read --no-progress - """ - } - deleteDir() - } -} - -def TestPythonCPU() { - node('linux && cpu') { - unstash name: "xgboost_whl_cuda${ref_cuda_ver}" - unstash name: 'srcs' - unstash name: 'xgboost_cli' - echo "Test Python CPU" - def container_type = "cpu" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu - """ - deleteDir() - } -} - -def TestPythonCPUARM64() { - node('linux && arm64') { - unstash name: "xgboost_whl_arm64_cpu" - unstash name: 'srcs' - unstash name: 'xgboost_cli_arm64' - echo "Test Python CPU ARM64" - def container_type = "aarch64" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu-arm64 - """ - deleteDir() - } -} - -def TestPythonGPU(args) { - def nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu' - def artifact_cuda_version = (args.artifact_cuda_version) ?: ref_cuda_ver - node(nodeReq) { - unstash name: "xgboost_whl_cuda${artifact_cuda_version}" - unstash name: "xgboost_cpp_tests_cuda${artifact_cuda_version}" - unstash name: 'srcs' - echo "Test Python GPU: CUDA ${args.host_cuda_version}" - def container_type = "gpu" - def docker_binary = "nvidia-docker" - def docker_args = "--build-arg CUDA_VERSION_ARG=${args.host_cuda_version}" - def mgpu_indicator = (args.multi_gpu) ? 'mgpu' : 'gpu' - // Allocate extra space in /dev/shm to enable NCCL - def docker_extra_params = (args.multi_gpu) ? "CI_DOCKER_EXTRA_PARAMS_INIT='--shm-size=4g'" : '' - sh "${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh ${mgpu_indicator}" - if (args.test_rmm) { - sh "rm -rfv build/ python-package/dist/" - unstash name: "xgboost_whl_rmm_cuda${args.host_cuda_version}" - unstash name: "xgboost_cpp_tests_rmm_cuda${args.host_cuda_version}" - sh "${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh ${mgpu_indicator} --use-rmm-pool" - } - deleteDir() - } -} - -def TestCppGPU(args) { - def nodeReq = 'linux && mgpu' - def artifact_cuda_version = (args.artifact_cuda_version) ?: ref_cuda_ver - node(nodeReq) { - unstash name: "xgboost_cpp_tests_cuda${artifact_cuda_version}" - unstash name: 'srcs' - echo "Test C++, CUDA ${args.host_cuda_version}, rmm: ${args.test_rmm}" - def container_type = "gpu" - def docker_binary = "nvidia-docker" - def docker_args = "--build-arg CUDA_VERSION_ARG=${args.host_cuda_version}" - sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost" - if (args.test_rmm) { - sh "rm -rfv build/" - unstash name: "xgboost_cpp_tests_rmm_cuda${args.host_cuda_version}" - echo "Test C++, CUDA ${args.host_cuda_version} with RMM" - container_type = "rmm" - docker_binary = "nvidia-docker" - docker_args = "--build-arg CUDA_VERSION_ARG=${args.host_cuda_version}" - sh """ - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "source activate gpu_test && build/testxgboost --use-rmm-pool" - """ - } - deleteDir() - } -} - -def CrossTestJVMwithJDK(args) { - node('linux && cpu') { - unstash name: 'xgboost4j_jar' - unstash name: 'srcs' - if (args.spark_version != null) { - echo "Test XGBoost4J on a machine with JDK ${args.jdk_version}, Spark ${args.spark_version}" - } else { - echo "Test XGBoost4J on a machine with JDK ${args.jdk_version}" - } - def container_type = "jvm_cross" - def docker_binary = "docker" - def spark_arg = (args.spark_version != null) ? "--build-arg SPARK_VERSION=${args.spark_version}" : "" - def docker_args = "--build-arg JDK_VERSION=${args.jdk_version} ${spark_arg}" - // Run integration tests only when spark_version is given - def docker_extra_params = (args.spark_version != null) ? "CI_DOCKER_EXTRA_PARAMS_INIT='-e RUN_INTEGRATION_TEST=1'" : "" - sh """ - ${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_jvm_cross.sh - """ - deleteDir() - } -} - -def DeployJVMPackages(args) { - node('linux && cpu') { - unstash name: 'srcs' - if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) { - echo 'Deploying to xgboost-maven-repo S3 repo...' - sh """ - ${dockerRun} jvm_gpu_build docker --build-arg CUDA_VERSION_ARG=11.0.3 tests/ci_build/deploy_jvm_packages.sh ${args.spark_version} - """ - } - deleteDir() - } -} diff --git a/Jenkinsfile-win64 b/Jenkinsfile-win64 deleted file mode 100644 index 38841bcdf..000000000 --- a/Jenkinsfile-win64 +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/groovy -// -*- mode: groovy -*- - -/* Jenkins pipeline for Windows AMD64 target */ - -import groovy.transform.Field - -@Field -def commit_id // necessary to pass a variable from one stage to another - -pipeline { - agent none - - // Setup common job properties - options { - timestamps() - timeout(time: 240, unit: 'MINUTES') - buildDiscarder(logRotator(numToKeepStr: '10')) - preserveStashes() - } - - // Build stages - stages { - stage('Jenkins Win64: Initialize') { - agent { label 'job_initializer' } - steps { - script { - def buildNumber = env.BUILD_NUMBER as int - if (buildNumber > 1) milestone(buildNumber - 1) - milestone(buildNumber) - checkoutSrcs() - commit_id = "${GIT_COMMIT}" - } - sh 'python3 tests/jenkins_get_approval.py' - stash name: 'srcs' - deleteDir() - } - } - stage('Jenkins Win64: Build') { - agent none - steps { - script { - parallel ([ - 'build-win64-cuda11.0': { BuildWin64() }, - 'build-rpkg-win64-cuda11.0': { BuildRPackageWithCUDAWin64() } - ]) - } - } - } - stage('Jenkins Win64: Test') { - agent none - steps { - script { - parallel ([ - 'test-win64-cuda11.0': { TestWin64() }, - ]) - } - } - } - } -} - -// check out source code from git -def checkoutSrcs() { - retry(5) { - try { - timeout(time: 2, unit: 'MINUTES') { - checkout scm - sh 'git submodule update --init' - } - } catch (exc) { - deleteDir() - error "Failed to fetch source codes" - } - } -} - -def BuildWin64() { - node('win64 && cuda11_unified') { - deleteDir() - unstash name: 'srcs' - echo "Building XGBoost for Windows AMD64 target..." - bat "nvcc --version" - def arch_flag = "" - if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) { - arch_flag = "-DGPU_COMPUTE_VER=75" - } - bat """ - mkdir build - cd build - cmake .. -G"Visual Studio 15 2017 Win64" -DUSE_CUDA=ON -DCMAKE_VERBOSE_MAKEFILE=ON -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON ${arch_flag} -DCMAKE_UNITY_BUILD=ON - """ - bat """ - cd build - "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\MSBuild\\15.0\\Bin\\MSBuild.exe" xgboost.sln /m /p:Configuration=Release /nodeReuse:false - """ - bat """ - cd python-package - conda activate && python setup.py bdist_wheel --universal && for /R %%i in (dist\\*.whl) DO python ../tests/ci_build/rename_whl.py "%%i" ${commit_id} win_amd64 - """ - echo "Insert vcomp140.dll (OpenMP runtime) into the wheel..." - bat """ - cd python-package\\dist - COPY /B ..\\..\\tests\\ci_build\\insert_vcomp140.py - conda activate && python insert_vcomp140.py *.whl - """ - echo 'Stashing Python wheel...' - stash name: 'xgboost_whl', includes: 'python-package/dist/*.whl' - if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) { - echo 'Uploading Python wheel...' - bat """ - cd python-package - conda activate && for /R %%i in (dist\\*.whl) DO python -m awscli s3 cp "%%i" s3://xgboost-nightly-builds/${BRANCH_NAME}/ --acl public-read --no-progress - """ - } - echo 'Stashing C++ test executable (testxgboost)...' - stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost.exe' - stash name: 'xgboost_cli', includes: 'xgboost.exe' - deleteDir() - } -} - -def BuildRPackageWithCUDAWin64() { - node('win64 && cuda11_unified') { - deleteDir() - unstash name: 'srcs' - bat "nvcc --version" - if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) { - bat """ - bash tests/ci_build/build_r_pkg_with_cuda_win64.sh ${commit_id} - """ - echo 'Uploading R tarball...' - bat """ - conda activate && for /R %%i in (xgboost_r_gpu_win64_*.tar.gz) DO python -m awscli s3 cp "%%i" s3://xgboost-nightly-builds/${BRANCH_NAME}/ --acl public-read --no-progress - """ - } - deleteDir() - } -} - -def TestWin64() { - node('win64 && cuda11_unified') { - deleteDir() - unstash name: 'srcs' - unstash name: 'xgboost_whl' - unstash name: 'xgboost_cli' - unstash name: 'xgboost_cpp_tests' - echo "Test Win64" - bat "nvcc --version" - echo "Running C++ tests..." - bat "build\\testxgboost.exe" - echo "Installing Python dependencies..." - def env_name = 'win64_' + UUID.randomUUID().toString().replaceAll('-', '') - bat "conda activate && mamba env create -n ${env_name} --file=tests/ci_build/conda_env/win64_test.yml" - echo "Installing Python wheel..." - bat """ - conda activate ${env_name} && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i" - """ - echo "Running Python tests..." - bat "conda activate ${env_name} && python -X faulthandler -m pytest -v -s -rxXs --fulltrace tests\\python" - bat """ - conda activate ${env_name} && python -X faulthandler -m pytest -v -s -rxXs --fulltrace -m "(not slow) and (not mgpu)" tests\\python-gpu - """ - bat "conda env remove --name ${env_name}" - deleteDir() - } -} diff --git a/doc/contrib/donate.rst b/doc/contrib/donate.rst index 6571fef5f..cc373d2b8 100644 --- a/doc/contrib/donate.rst +++ b/doc/contrib/donate.rst @@ -13,9 +13,9 @@ DMLC/XGBoost has grown from a research project incubated in academia to one of t A robust and efficient **continuous integration (CI)** infrastructure is one of the most critical solutions to address the above challenge. A CI service will monitor an open-source repository and run a suite of integration tests for every incoming contribution. This way, the CI ensures that every proposed change in the codebase is compatible with existing functionalities. Furthermore, XGBoost can enable more thorough tests with a powerful CI infrastructure to cover cases which are closer to the production environment. -There are several CI services available free to open source projects, such as Travis CI and AppVeyor. The XGBoost project already utilizes Travis and AppVeyor. However, the XGBoost project has needs that these free services do not adequately address. In particular, the limited usage quota of resources such as CPU and memory leaves XGBoost developers unable to bring "too-intensive" tests. In addition, they do not offer test machines with GPUs for testing XGBoost-GPU code base which has been attracting more and more interest across many organizations. Consequently, the XGBoost project self-hosts a cloud server with Jenkins software installed: https://xgboost-ci.net/. +There are several CI services available free to open source projects, such as Travis CI and AppVeyor. The XGBoost project already utilizes GitHub Actions. However, the XGBoost project has needs that these free services do not adequately address. In particular, the limited usage quota of resources such as CPU and memory leaves XGBoost developers unable to bring "too-intensive" tests. In addition, they do not offer test machines with GPUs for testing XGBoost-GPU code base which has been attracting more and more interest across many organizations. Consequently, the XGBoost project uses a cloud-hosted test farm. We use `BuildKite `_ to organize CI pipelines. -The self-hosted Jenkins CI server has recurring operating expenses. It utilizes a leading cloud provider (AWS) to accommodate variable workload. The master node serving the web interface is available 24/7, to accommodate contributions from people around the globe. In addition, the master node launches slave nodes on demand, to run the test suite on incoming contributions. To save cost, the slave nodes are terminated when they are no longer needed. +The cloud-hosted test farm has recurring operating expenses. It utilizes a leading cloud provider (AWS) to accommodate variable workload. BuildKite launches worker machines on AWS on demand, to run the test suite on incoming contributions. To save cost, the worker machines are terminated when they are no longer needed. To help defray the hosting cost, the XGBoost project seeks donations from third parties. @@ -29,14 +29,14 @@ The Project Management Committee (PMC) of the XGBoost project appointed `Open So All expenses incurred for hosting CI will be submitted to the fiscal host with receipts. Only the expenses in the following categories will be approved for reimbursement: -* Cloud exprenses for the Jenkins CI server (https://xgboost-ci.net) +* Cloud exprenses for the cloud test farm (https://buildkite.com/xgboost) * Cost of domain https://xgboost-ci.net -* Meetup.com account for XGBoost project +* Monthly cost of using BuildKite * Hosting cost of the User Forum (https://discuss.xgboost.ai) -Administration of Jenkins CI server ------------------------------------ -The PMC shall appoint committer(s) to administer the Jenkins CI server on their behalf. The current administrators are as follows: +Administration of cloud CI infrastructure +----------------------------------------- +The PMC shall appoint committer(s) to administer the cloud CI infrastructure on their behalf. The current administrators are as follows: * Primary administrator: `Hyunsu Cho `_ * Secondary administrator: `Jiaming Yuan `_ diff --git a/tests/jenkins_get_approval.py b/tests/jenkins_get_approval.py deleted file mode 100644 index 4a68722d9..000000000 --- a/tests/jenkins_get_approval.py +++ /dev/null @@ -1,26 +0,0 @@ -import boto3 -import json - -lambda_client = boto3.client('lambda', region_name='us-west-2') - -# Source code for the Lambda function is available at https://github.com/hcho3/xgboost-devops -r = lambda_client.invoke( - FunctionName='XGBoostCICostWatcher', - InvocationType='RequestResponse', - Payload='{}'.encode('utf-8') -) - -payload = r['Payload'].read().decode('utf-8') -if 'FunctionError' in r: - msg = 'Error when invoking the Lambda function. Stack trace:\n' - error = json.loads(payload) - msg += f" {error['errorType']}: {error['errorMessage']}\n" - for trace in error['stackTrace']: - for line in trace.split('\n'): - msg += f' {line}\n' - raise RuntimeError(msg) -response = json.loads(payload) -if response['approved']: - print(f"Testing approved. Reason: {response['reason']}") -else: - raise RuntimeError(f"Testing rejected. Reason: {response['reason']}") diff --git a/tests/travis/run_test.sh b/tests/travis/run_test.sh deleted file mode 100755 index 4baf983e5..000000000 --- a/tests/travis/run_test.sh +++ /dev/null @@ -1,107 +0,0 @@ -#!/bin/bash - -source $HOME/miniconda/bin/activate - -if [ ${TASK} == "python_sdist_test" ]; then - set -e - - conda activate python3 - python --version - cmake --version - - make pippack - python -m pip install xgboost-*.tar.gz -v --user - python -c 'import xgboost' || exit -1 -fi - -if [ ${TASK} == "python_test" ]; then - if grep -n -R '<<<.*>>>\(.*\)' src include | grep --invert "NOLINT"; then - echo 'Do not use raw CUDA execution configuration syntax with <<>>.' \ - 'try `dh::LaunchKernel`' - exit -1 - fi - - set -e - - - # Build binary wheel - if [ ${TRAVIS_CPU_ARCH} == "arm64" ]; then - # Build manylinux2014 wheel on ARM64 - tests/ci_build/ci_build.sh aarch64 docker tests/ci_build/build_via_cmake.sh --conda-env=aarch64_test - tests/ci_build/ci_build.sh aarch64 docker bash -c "cd build && ctest --extra-verbose" - tests/ci_build/ci_build.sh aarch64 docker bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal" - TAG=manylinux2014_aarch64 - tests/ci_build/ci_build.sh aarch64 docker python tests/ci_build/rename_whl.py python-package/dist/*.whl ${TRAVIS_COMMIT} ${TAG} - tests/ci_build/ci_build.sh aarch64 docker auditwheel repair --plat ${TAG} python-package/dist/*.whl - mv -v wheelhouse/*.whl python-package/dist/ - # Make sure that libgomp.so is vendored in the wheel - unzip -l python-package/dist/*.whl | grep libgomp || exit -1 - else - rm -rf build - mkdir build && cd build - conda activate python3 - cmake --version - cmake .. -DUSE_OPENMP=ON -DCMAKE_VERBOSE_MAKEFILE=ON - make -j$(nproc) - cd ../python-package - python setup.py bdist_wheel - cd .. - TAG=macosx_10_14_x86_64.macosx_10_15_x86_64.macosx_11_0_x86_64 - python tests/ci_build/rename_whl.py python-package/dist/*.whl ${TRAVIS_COMMIT} ${TAG} - fi - - # Run unit tests - echo "------------------------------" - if [ ${TRAVIS_CPU_ARCH} == "arm64" ]; then - tests/ci_build/ci_build.sh aarch64 docker \ - bash -c "source activate aarch64_test && python -m pip install ./python-package/dist/xgboost-*-py3-none-${TAG}.whl && python -m pytest -v -s -rxXs --durations=0 --fulltrace tests/python/test_basic.py tests/python/test_basic_models.py tests/python/test_model_compatibility.py --cov=python-package/xgboost" - else - conda env create -n cpu_test --file=tests/ci_build/conda_env/macos_cpu_test.yml - conda activate cpu_test - python -m pip install ./python-package/dist/xgboost-*-py3-none-${TAG}.whl - conda --version - python --version - python -m pytest -v -s -rxXs --durations=0 --fulltrace tests/python --cov=python-package/xgboost || exit -1 - fi - conda activate python3 - codecov - - # Deploy binary wheel to S3 - if [ "${TRAVIS_PULL_REQUEST}" != "false" ] - then - S3_DEST="s3://xgboost-nightly-builds/PR-${TRAVIS_PULL_REQUEST}/" - else - if [ "${TRAVIS_BRANCH}" == "master" ] - then - S3_DEST="s3://xgboost-nightly-builds/" - elif [ -z "${TRAVIS_TAG}" ] - then - S3_DEST="s3://xgboost-nightly-builds/${TRAVIS_BRANCH}/" - fi - fi - python -m awscli s3 cp python-package/dist/*.whl "${S3_DEST}" --acl public-read || true -fi - -if [ ${TASK} == "java_test" ]; then - export RABIT_MOCK=ON - conda activate python3 - cd jvm-packages - mvn -q clean install -DskipTests -Dmaven.test.skip - mvn -q test -fi - -if [ ${TASK} == "s390x_test" ]; then - set -e - python3 -m pip install --user pytest hypothesis cmake - - # Build and run C++ tests - rm -rf build - mkdir build && cd build - cmake .. -DCMAKE_VERBOSE_MAKEFILE=ON -DGOOGLE_TEST=ON -DUSE_OPENMP=ON -DUSE_DMLC_GTEST=ON -GNinja - time ninja -v - ./testxgboost - - # Run model compatibility tests - cd .. - PYTHONPATH=./python-package python3 -m pytest --fulltrace -v -rxXs tests/python/test_basic.py -fi diff --git a/tests/travis/setup.sh b/tests/travis/setup.sh deleted file mode 100755 index 405266e17..000000000 --- a/tests/travis/setup.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -if [ ${TASK} == "python_test" ] || [ ${TASK} == "python_sdist_test" ]; then - if [ ${TRAVIS_OS_NAME} == "osx" ]; then - wget --no-verbose -O conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh - elif [ ${TRAVIS_CPU_ARCH} == "arm64" ]; then - wget --no-verbose -O conda.sh https://github.com/conda-forge/miniforge/releases/download/4.8.2-1/Miniforge3-4.8.2-1-Linux-aarch64.sh - else - wget --no-verbose -O conda.sh https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh - fi - bash conda.sh -b -p $HOME/miniconda - source $HOME/miniconda/bin/activate - hash -r - conda config --set always_yes yes --set changeps1 no - conda update -q conda - # Useful for debugging any issues with conda - conda info -a - conda create -n python3 python=3.7 cmake numpy scipy codecov - conda activate python3 - python -m pip install awscli -fi - -if [ ${TASK} == "s390x_test" ] && [ ${TRAVIS_CPU_ARCH} == "s390x" ]; then - sudo apt-get update - sudo apt-get install -y --no-install-recommends tar unzip wget git build-essential ninja-build \ - time python3 python3-pip python3-numpy python3-scipy python3-sklearn r-base -fi diff --git a/tests/travis/travis_after_failure.sh b/tests/travis/travis_after_failure.sh deleted file mode 100755 index 553cc979e..000000000 --- a/tests/travis/travis_after_failure.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -if [ ${TASK} == "r_test" ]; then - cat xgboost/xgboost.Rcheck/*.log - echo "--------------------------" - cat xgboost/xgboost.Rcheck/*.out -fi diff --git a/tests/travis/travis_before_cache.sh b/tests/travis/travis_before_cache.sh deleted file mode 100755 index 6789ae08e..000000000 --- a/tests/travis/travis_before_cache.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -# do nothing for now -ls -alLR ${CACHE_PREFIX} \ No newline at end of file diff --git a/tests/travis/travis_setup_env.sh b/tests/travis/travis_setup_env.sh deleted file mode 100644 index 7f4af313e..000000000 --- a/tests/travis/travis_setup_env.sh +++ /dev/null @@ -1,40 +0,0 @@ -# script to be sourced in travis yml -# setup all enviroment variables - -export CACHE_PREFIX=${HOME}/.cache/usr -export PATH=${HOME}/.local/bin:${PATH} -export PATH=${PATH}:${CACHE_PREFIX}/bin -export CPLUS_INCLUDE_PATH=${CPLUS_INCLUDE_PATH}:${CACHE_PREFIX}/include -export C_INCLUDE_PATH=${C_INCLUDE_PATH}:${CACHE_PREFIX}/include -export LIBRARY_PATH=${LIBRARY_PATH}:${CACHE_PREFIX}/lib -export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${CACHE_PREFIX}/lib -export DYLD_LIBRARY_PATH=${DYLD_LIBRARY_PATH}:${CACHE_PREFIX}/lib - -alias make="make -j4" - -# setup the cache prefix folder -if [ ! -d ${HOME}/.cache ]; then - mkdir ${HOME}/.cache -fi - -if [ ! -d ${CACHE_PREFIX} ]; then - mkdir ${CACHE_PREFIX} -fi -if [ ! -d ${CACHE_PREFIX}/include ]; then - mkdir ${CACHE_PREFIX}/include -fi -if [ ! -d ${CACHE_PREFIX}/lib ]; then - mkdir ${CACHE_PREFIX}/lib -fi -if [ ! -d ${CACHE_PREFIX}/bin ]; then - mkdir ${CACHE_PREFIX}/bin -fi - -# setup CUDA path if NVCC_PREFIX exists -if [ ! -z "$NVCC_PREFIX" ]; then - export PATH=${PATH}:${NVCC_PREFIX}/usr/local/cuda-7.5/bin - export CPLUS_INCLUDE_PATH=${CPLUS_INCLUDE_PATH}:${NVCC_PREFIX}/usr/local/cuda-7.5/include - export C_INCLUDE_PATH=${C_INCLUDE_PATH}:${NVCC_PREFIX}/usr/local/cuda-7.5/include - export LIBRARY_PATH=${LIBRARY_PATH}:${NVCC_PREFIX}/usr/local/cuda-7.5/lib64:${NVCC_PREFIX}/usr/lib/x86_64-linux-gnu - export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${NVCC_PREFIX}/usr/local/cuda-7.5/lib64:${NVCC_PREFIX}/usr/lib/x86_64-linux-gnu -fi