[CI] Upload nightly builds to S3 (#4976)

* Do not store built artifacts in the Jenkins master

* Add wheel renaming script

* Upload wheels to S3 bucket

* Use env.GIT_COMMIT

* Capture git hash correctly

* Add missing import in Jenkinsfile

* Address reviewer's comments

* Put artifacts for pull requests in separate directory

* No wildcard expansion in Windows CMD
This commit is contained in:
Philip Hyunsu Cho 2019-10-23 21:16:05 -07:00 committed by GitHub
parent ac457c56a2
commit da6e74f7bb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 56 additions and 5 deletions

12
Jenkinsfile vendored
View File

@ -6,6 +6,11 @@
// Command to run command inside a docker container // Command to run command inside a docker container
dockerRun = 'tests/ci_build/ci_build.sh' dockerRun = 'tests/ci_build/ci_build.sh'
import groovy.transform.Field
@Field
def commit_id // necessary to pass a variable from one stage to another
pipeline { pipeline {
// Each stage specify its own agent // Each stage specify its own agent
agent none agent none
@ -31,6 +36,7 @@ pipeline {
steps { steps {
script { script {
checkoutSrcs() checkoutSrcs()
commit_id = "${GIT_COMMIT}"
} }
stash name: 'srcs' stash name: 'srcs'
milestone ordinal: 1 milestone ordinal: 1
@ -158,7 +164,6 @@ def Doxygen() {
sh """ sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/doxygen.sh ${BRANCH_NAME} ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/doxygen.sh ${BRANCH_NAME}
""" """
archiveArtifacts artifacts: "build/${BRANCH_NAME}.tar.bz2", allowEmptyArchive: true
echo 'Uploading doc...' echo 'Uploading doc...'
s3Upload file: "build/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "doxygen/${BRANCH_NAME}.tar.bz2" s3Upload file: "build/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "doxygen/${BRANCH_NAME}.tar.bz2"
deleteDir() deleteDir()
@ -213,12 +218,14 @@ def BuildCUDA(args) {
sh """ sh """
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_CUDA=ON -DUSE_NCCL=ON -DOPEN_MP:BOOL=ON ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_CUDA=ON -DUSE_NCCL=ON -DOPEN_MP:BOOL=ON
${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal" ${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal"
${dockerRun} ${container_type} ${docker_binary} ${docker_args} python3 tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux1_x86_64
""" """
// Stash wheel for CUDA 9.0 target // Stash wheel for CUDA 9.0 target
if (args.cuda_version == '9.0') { if (args.cuda_version == '9.0') {
echo 'Stashing Python wheel...' echo 'Stashing Python wheel...'
stash name: 'xgboost_whl_cuda9', includes: 'python-package/dist/*.whl' stash name: 'xgboost_whl_cuda9', includes: 'python-package/dist/*.whl'
archiveArtifacts artifacts: "python-package/dist/*.whl", allowEmptyArchive: true path = ("${BRANCH_NAME}" == 'master') ? '/' : "${BRANCH_NAME}/"
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
echo 'Stashing C++ test executable (testxgboost)...' echo 'Stashing C++ test executable (testxgboost)...'
stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost' stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost'
} }
@ -252,7 +259,6 @@ def BuildJVMDoc() {
sh """ sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_doc.sh ${BRANCH_NAME} ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_doc.sh ${BRANCH_NAME}
""" """
archiveArtifacts artifacts: "jvm-packages/${BRANCH_NAME}.tar.bz2", allowEmptyArchive: true
echo 'Uploading doc...' echo 'Uploading doc...'
s3Upload file: "jvm-packages/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${BRANCH_NAME}.tar.bz2" s3Upload file: "jvm-packages/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${BRANCH_NAME}.tar.bz2"
deleteDir() deleteDir()

View File

@ -3,6 +3,11 @@
/* Jenkins pipeline for Windows AMD64 target */ /* Jenkins pipeline for Windows AMD64 target */
import groovy.transform.Field
@Field
def commit_id // necessary to pass a variable from one stage to another
pipeline { pipeline {
agent none agent none
// Build stages // Build stages
@ -12,6 +17,7 @@ pipeline {
steps { steps {
script { script {
checkoutSrcs() checkoutSrcs()
commit_id = "${GIT_COMMIT}"
} }
stash name: 'srcs' stash name: 'srcs'
milestone ordinal: 1 milestone ordinal: 1
@ -76,7 +82,7 @@ def BuildWin64() {
""" """
bat """ bat """
cd python-package cd python-package
conda activate && python setup.py bdist_wheel --universal conda activate && python setup.py bdist_wheel --universal && for /R %%i in (dist\\*.whl) DO python ../tests/ci_build/rename_whl.py "%%i" ${commit_id} win_amd64
""" """
echo "Insert vcomp140.dll (OpenMP runtime) into the wheel..." echo "Insert vcomp140.dll (OpenMP runtime) into the wheel..."
bat """ bat """
@ -86,7 +92,8 @@ def BuildWin64() {
""" """
echo 'Stashing Python wheel...' echo 'Stashing Python wheel...'
stash name: 'xgboost_whl', includes: 'python-package/dist/*.whl' stash name: 'xgboost_whl', includes: 'python-package/dist/*.whl'
archiveArtifacts artifacts: "python-package/dist/*.whl", allowEmptyArchive: true path = ("${BRANCH_NAME}" == 'master') ? '/' : "${BRANCH_NAME}/"
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
echo 'Stashing C++ test executable (testxgboost)...' echo 'Stashing C++ test executable (testxgboost)...'
stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost.exe' stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost.exe'
deleteDir() deleteDir()

View File

@ -15,6 +15,7 @@ Installation Guide
* The binary wheel will support GPU algorithms (`gpu_hist`) on machines with NVIDIA GPUs. Please note that **training with multiple GPUs is only supported for Linux platform**. See :doc:`gpu/index`. * The binary wheel will support GPU algorithms (`gpu_hist`) on machines with NVIDIA GPUs. Please note that **training with multiple GPUs is only supported for Linux platform**. See :doc:`gpu/index`.
* Currently, we provide binary wheels for 64-bit Linux and Windows. * Currently, we provide binary wheels for 64-bit Linux and Windows.
* Nightly builds are available. You can now run *pip install https://s3-us-west-2.amazonaws.com/xgboost-nightly-builds/xgboost-[version]+[commit hash]-py2.py3-none-manylinux1_x86_64.whl* to install the nightly build with the given commit hash. See `this page <https://s3-us-west-2.amazonaws.com/xgboost-nightly-builds/list.html>`_ to see the list of all nightly builds.
**************************** ****************************
Building XGBoost from source Building XGBoost from source

View File

@ -0,0 +1,37 @@
import sys
import os
from contextlib import contextmanager
@contextmanager
def cd(path):
path = os.path.normpath(path)
cwd = os.getcwd()
os.chdir(path)
print("cd " + path)
try:
yield path
finally:
os.chdir(cwd)
if len(sys.argv) != 4:
print('Usage: {} [wheel to rename] [commit id] [platform tag]'.format(sys.argv[0]))
sys.exit(1)
whl_path = sys.argv[1]
commit_id = sys.argv[2]
platform_tag = sys.argv[3]
assert platform_tag in ['manylinux1_x86_64', 'win_amd64']
dirname, basename = os.path.dirname(whl_path), os.path.basename(whl_path)
with cd(dirname):
tokens = basename.split('-')
assert len(tokens) == 5
keywords = {'pkg_name': tokens[0],
'version': tokens[1],
'commit_id': commit_id,
'platform_tag': platform_tag}
new_name = '{pkg_name}-{version}+{commit_id}-py2.py3-none-{platform_tag}.whl'.format(**keywords)
print('Renaming {} to {}...'.format(basename, new_name))
os.rename(basename, new_name)