1.2.1 patch release (#6206)
* Hide C++ symbols from dmlc-core (#6188) * Up version to 1.2.1 * Fix lint * [CI] Fix Docker build for CUDA 11 (#6202) * Update Dockerfile.gpu
This commit is contained in:
parent
0cd0dad0b5
commit
bcb15a980f
@ -1,9 +1,10 @@
|
|||||||
cmake_minimum_required(VERSION 3.13)
|
cmake_minimum_required(VERSION 3.13)
|
||||||
project(xgboost LANGUAGES CXX C VERSION 1.2.0)
|
project(xgboost LANGUAGES CXX C VERSION 1.2.1)
|
||||||
include(cmake/Utils.cmake)
|
include(cmake/Utils.cmake)
|
||||||
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
|
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
|
||||||
cmake_policy(SET CMP0022 NEW)
|
cmake_policy(SET CMP0022 NEW)
|
||||||
cmake_policy(SET CMP0079 NEW)
|
cmake_policy(SET CMP0079 NEW)
|
||||||
|
set(CMAKE_POLICY_DEFAULT_CMP0063 NEW)
|
||||||
cmake_policy(SET CMP0063 NEW)
|
cmake_policy(SET CMP0063 NEW)
|
||||||
|
|
||||||
if ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13))
|
if ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13))
|
||||||
@ -173,9 +174,6 @@ foreach(lib rabit rabit_base rabit_empty rabit_mock rabit_mock_static)
|
|||||||
# from dmlc is correctly applied to rabit.
|
# from dmlc is correctly applied to rabit.
|
||||||
if (TARGET ${lib})
|
if (TARGET ${lib})
|
||||||
target_link_libraries(${lib} dmlc ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${lib} dmlc ${CMAKE_THREAD_LIBS_INIT})
|
||||||
if (HIDE_CXX_SYMBOLS) # Hide all C++ symbols from Rabit
|
|
||||||
set_target_properties(${lib} PROPERTIES CXX_VISIBILITY_PRESET hidden)
|
|
||||||
endif (HIDE_CXX_SYMBOLS)
|
|
||||||
if (ENABLE_ALL_WARNINGS)
|
if (ENABLE_ALL_WARNINGS)
|
||||||
target_compile_options(${lib} PRIVATE -Wall -Wextra)
|
target_compile_options(${lib} PRIVATE -Wall -Wextra)
|
||||||
endif (ENABLE_ALL_WARNINGS)
|
endif (ENABLE_ALL_WARNINGS)
|
||||||
@ -204,8 +202,9 @@ endif (USE_NVTX)
|
|||||||
|
|
||||||
#-- Hide all C++ symbols
|
#-- Hide all C++ symbols
|
||||||
if (HIDE_CXX_SYMBOLS)
|
if (HIDE_CXX_SYMBOLS)
|
||||||
set_target_properties(objxgboost PROPERTIES CXX_VISIBILITY_PRESET hidden)
|
foreach(target objxgboost xgboost dmlc rabit rabit_mock_static)
|
||||||
set_target_properties(xgboost PROPERTIES CXX_VISIBILITY_PRESET hidden)
|
set_target_properties(${target} PROPERTIES CXX_VISIBILITY_PRESET hidden)
|
||||||
|
endforeach()
|
||||||
endif (HIDE_CXX_SYMBOLS)
|
endif (HIDE_CXX_SYMBOLS)
|
||||||
|
|
||||||
target_include_directories(xgboost
|
target_include_directories(xgboost
|
||||||
|
|||||||
14
Jenkinsfile
vendored
14
Jenkinsfile
vendored
@ -144,7 +144,7 @@ def ClangTidy() {
|
|||||||
echo "Running clang-tidy job..."
|
echo "Running clang-tidy job..."
|
||||||
def container_type = "clang_tidy"
|
def container_type = "clang_tidy"
|
||||||
def docker_binary = "docker"
|
def docker_binary = "docker"
|
||||||
def dockerArgs = "--build-arg CUDA_VERSION=10.1"
|
def dockerArgs = "--build-arg CUDA_VERSION_ARG=10.1"
|
||||||
sh """
|
sh """
|
||||||
${dockerRun} ${container_type} ${docker_binary} ${dockerArgs} python3 tests/ci_build/tidy.py
|
${dockerRun} ${container_type} ${docker_binary} ${dockerArgs} python3 tests/ci_build/tidy.py
|
||||||
"""
|
"""
|
||||||
@ -261,7 +261,7 @@ def BuildCUDA(args) {
|
|||||||
echo "Build with CUDA ${args.cuda_version}"
|
echo "Build with CUDA ${args.cuda_version}"
|
||||||
def container_type = GetCUDABuildContainerType(args.cuda_version)
|
def container_type = GetCUDABuildContainerType(args.cuda_version)
|
||||||
def docker_binary = "docker"
|
def docker_binary = "docker"
|
||||||
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
|
def docker_args = "--build-arg CUDA_VERSION_ARG=${args.cuda_version}"
|
||||||
def arch_flag = ""
|
def arch_flag = ""
|
||||||
if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) {
|
if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) {
|
||||||
arch_flag = "-DGPU_COMPUTE_VER=75"
|
arch_flag = "-DGPU_COMPUTE_VER=75"
|
||||||
@ -290,7 +290,7 @@ def BuildJVMPackagesWithCUDA(args) {
|
|||||||
echo "Build XGBoost4J-Spark with Spark ${args.spark_version}, CUDA ${args.cuda_version}"
|
echo "Build XGBoost4J-Spark with Spark ${args.spark_version}, CUDA ${args.cuda_version}"
|
||||||
def container_type = "jvm_gpu_build"
|
def container_type = "jvm_gpu_build"
|
||||||
def docker_binary = "nvidia-docker"
|
def docker_binary = "nvidia-docker"
|
||||||
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
|
def docker_args = "--build-arg CUDA_VERSION_ARG=${args.cuda_version}"
|
||||||
def arch_flag = ""
|
def arch_flag = ""
|
||||||
if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) {
|
if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) {
|
||||||
arch_flag = "-DGPU_COMPUTE_VER=75"
|
arch_flag = "-DGPU_COMPUTE_VER=75"
|
||||||
@ -365,7 +365,7 @@ def TestPythonGPU(args) {
|
|||||||
echo "Test Python GPU: CUDA ${args.host_cuda_version}"
|
echo "Test Python GPU: CUDA ${args.host_cuda_version}"
|
||||||
def container_type = "gpu"
|
def container_type = "gpu"
|
||||||
def docker_binary = "nvidia-docker"
|
def docker_binary = "nvidia-docker"
|
||||||
def docker_args = "--build-arg CUDA_VERSION=${args.host_cuda_version}"
|
def docker_args = "--build-arg CUDA_VERSION_ARG=${args.host_cuda_version}"
|
||||||
if (args.multi_gpu) {
|
if (args.multi_gpu) {
|
||||||
echo "Using multiple GPUs"
|
echo "Using multiple GPUs"
|
||||||
// Allocate extra space in /dev/shm to enable NCCL
|
// Allocate extra space in /dev/shm to enable NCCL
|
||||||
@ -406,7 +406,7 @@ def TestCppGPU(args) {
|
|||||||
echo "Test C++, CUDA ${args.host_cuda_version}"
|
echo "Test C++, CUDA ${args.host_cuda_version}"
|
||||||
def container_type = "gpu"
|
def container_type = "gpu"
|
||||||
def docker_binary = "nvidia-docker"
|
def docker_binary = "nvidia-docker"
|
||||||
def docker_args = "--build-arg CUDA_VERSION=${args.host_cuda_version}"
|
def docker_args = "--build-arg CUDA_VERSION_ARG=${args.host_cuda_version}"
|
||||||
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost"
|
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost"
|
||||||
deleteDir()
|
deleteDir()
|
||||||
}
|
}
|
||||||
@ -424,7 +424,7 @@ def CrossTestJVMwithJDKGPU(args) {
|
|||||||
}
|
}
|
||||||
def container_type = "gpu_jvm"
|
def container_type = "gpu_jvm"
|
||||||
def docker_binary = "nvidia-docker"
|
def docker_binary = "nvidia-docker"
|
||||||
def docker_args = "--build-arg CUDA_VERSION=${args.host_cuda_version}"
|
def docker_args = "--build-arg CUDA_VERSION_ARG=${args.host_cuda_version}"
|
||||||
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_jvm_gpu_cross.sh"
|
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_jvm_gpu_cross.sh"
|
||||||
deleteDir()
|
deleteDir()
|
||||||
}
|
}
|
||||||
@ -476,7 +476,7 @@ def DeployJVMPackages(args) {
|
|||||||
${dockerRun} jvm docker tests/ci_build/deploy_jvm_packages.sh ${args.spark_version} 0
|
${dockerRun} jvm docker tests/ci_build/deploy_jvm_packages.sh ${args.spark_version} 0
|
||||||
"""
|
"""
|
||||||
sh """
|
sh """
|
||||||
${dockerRun} jvm_gpu_build docker --build-arg CUDA_VERSION=10.0 tests/ci_build/deploy_jvm_packages.sh ${args.spark_version} 1
|
${dockerRun} jvm_gpu_build docker --build-arg CUDA_VERSION_ARG=10.0 tests/ci_build/deploy_jvm_packages.sh ${args.spark_version} 1
|
||||||
"""
|
"""
|
||||||
}
|
}
|
||||||
deleteDir()
|
deleteDir()
|
||||||
|
|||||||
@ -1 +1 @@
|
|||||||
1.2.0
|
1.2.1
|
||||||
|
|||||||
@ -40,7 +40,7 @@ class EarlyStopException(Exception):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, best_iteration):
|
def __init__(self, best_iteration):
|
||||||
super(EarlyStopException, self).__init__()
|
super().__init__()
|
||||||
self.best_iteration = best_iteration
|
self.best_iteration = best_iteration
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -1017,7 +1017,7 @@ class XGBRFClassifier(XGBClassifier):
|
|||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
def get_xgb_params(self):
|
def get_xgb_params(self):
|
||||||
params = super(XGBRFClassifier, self).get_xgb_params()
|
params = super().get_xgb_params()
|
||||||
params['num_parallel_tree'] = self.n_estimators
|
params['num_parallel_tree'] = self.n_estimators
|
||||||
return params
|
return params
|
||||||
|
|
||||||
@ -1049,7 +1049,7 @@ class XGBRFRegressor(XGBRegressor):
|
|||||||
reg_lambda=reg_lambda, **kwargs)
|
reg_lambda=reg_lambda, **kwargs)
|
||||||
|
|
||||||
def get_xgb_params(self):
|
def get_xgb_params(self):
|
||||||
params = super(XGBRFRegressor, self).get_xgb_params()
|
params = super().get_xgb_params()
|
||||||
params['num_parallel_tree'] = self.n_estimators
|
params['num_parallel_tree'] = self.n_estimators
|
||||||
return params
|
return params
|
||||||
|
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
ARG CUDA_VERSION
|
ARG CUDA_VERSION_ARG
|
||||||
FROM nvidia/cuda:$CUDA_VERSION-devel-ubuntu18.04
|
FROM nvidia/cuda:$CUDA_VERSION_ARG-devel-ubuntu18.04
|
||||||
|
ARG CUDA_VERSION_ARG
|
||||||
|
|
||||||
# Environment
|
# Environment
|
||||||
ENV DEBIAN_FRONTEND noninteractive
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
ARG CUDA_VERSION
|
ARG CUDA_VERSION_ARG
|
||||||
FROM nvidia/cuda:$CUDA_VERSION-runtime-ubuntu16.04
|
FROM nvidia/cuda:$CUDA_VERSION_ARG-runtime-ubuntu16.04
|
||||||
|
ARG CUDA_VERSION_ARG
|
||||||
|
|
||||||
# Environment
|
# Environment
|
||||||
ENV DEBIAN_FRONTEND noninteractive
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
@ -18,7 +19,7 @@ ENV PATH=/opt/python/bin:$PATH
|
|||||||
# Create new Conda environment with cuDF, Dask, and cuPy
|
# Create new Conda environment with cuDF, Dask, and cuPy
|
||||||
RUN \
|
RUN \
|
||||||
conda create -n gpu_test -c rapidsai-nightly -c rapidsai -c nvidia -c conda-forge -c defaults \
|
conda create -n gpu_test -c rapidsai-nightly -c rapidsai -c nvidia -c conda-forge -c defaults \
|
||||||
python=3.7 cudf=0.15* cudatoolkit=$CUDA_VERSION dask dask-cuda dask-cudf cupy \
|
python=3.7 cudf=0.15* cudatoolkit=$CUDA_VERSION_ARG dask dask-cuda dask-cudf cupy \
|
||||||
numpy pytest scipy scikit-learn pandas matplotlib wheel python-kubernetes urllib3 graphviz hypothesis
|
numpy pytest scipy scikit-learn pandas matplotlib wheel python-kubernetes urllib3 graphviz hypothesis
|
||||||
|
|
||||||
ENV GOSU_VERSION 1.10
|
ENV GOSU_VERSION 1.10
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
ARG CUDA_VERSION
|
ARG CUDA_VERSION_ARG
|
||||||
FROM nvidia/cuda:$CUDA_VERSION-devel-ubuntu16.04
|
FROM nvidia/cuda:$CUDA_VERSION_ARG-devel-ubuntu16.04
|
||||||
ARG CUDA_VERSION
|
ARG CUDA_VERSION_ARG
|
||||||
|
|
||||||
# Environment
|
# Environment
|
||||||
ENV DEBIAN_FRONTEND noninteractive
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
@ -19,7 +19,7 @@ RUN \
|
|||||||
|
|
||||||
# NCCL2 (License: https://docs.nvidia.com/deeplearning/sdk/nccl-sla/index.html)
|
# NCCL2 (License: https://docs.nvidia.com/deeplearning/sdk/nccl-sla/index.html)
|
||||||
RUN \
|
RUN \
|
||||||
export CUDA_SHORT=`echo $CUDA_VERSION | egrep -o '[0-9]+\.[0-9]'` && \
|
export CUDA_SHORT=`echo $CUDA_VERSION_ARG | egrep -o '[0-9]+\.[0-9]'` && \
|
||||||
export NCCL_VERSION=2.7.5-1 && \
|
export NCCL_VERSION=2.7.5-1 && \
|
||||||
apt-get update && \
|
apt-get update && \
|
||||||
apt-get install -y --allow-downgrades --allow-change-held-packages libnccl2=${NCCL_VERSION}+cuda${CUDA_SHORT} libnccl-dev=${NCCL_VERSION}+cuda${CUDA_SHORT}
|
apt-get install -y --allow-downgrades --allow-change-held-packages libnccl2=${NCCL_VERSION}+cuda${CUDA_SHORT} libnccl-dev=${NCCL_VERSION}+cuda${CUDA_SHORT}
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
ARG CUDA_VERSION
|
ARG CUDA_VERSION_ARG
|
||||||
FROM nvidia/cuda:$CUDA_VERSION-devel-centos6
|
FROM nvidia/cuda:$CUDA_VERSION_ARG-devel-centos6
|
||||||
ARG CUDA_VERSION
|
ARG CUDA_VERSION_ARG
|
||||||
|
|
||||||
# Environment
|
# Environment
|
||||||
ENV DEBIAN_FRONTEND noninteractive
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
@ -33,7 +33,7 @@ RUN \
|
|||||||
|
|
||||||
# NCCL2 (License: https://docs.nvidia.com/deeplearning/sdk/nccl-sla/index.html)
|
# NCCL2 (License: https://docs.nvidia.com/deeplearning/sdk/nccl-sla/index.html)
|
||||||
RUN \
|
RUN \
|
||||||
export CUDA_SHORT=`echo $CUDA_VERSION | egrep -o '[0-9]+\.[0-9]'` && \
|
export CUDA_SHORT=`echo $CUDA_VERSION_ARG | egrep -o '[0-9]+\.[0-9]'` && \
|
||||||
export NCCL_VERSION=2.4.8-1 && \
|
export NCCL_VERSION=2.4.8-1 && \
|
||||||
wget https://developer.download.nvidia.com/compute/machine-learning/repos/rhel7/x86_64/nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm && \
|
wget https://developer.download.nvidia.com/compute/machine-learning/repos/rhel7/x86_64/nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm && \
|
||||||
rpm -i nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm && \
|
rpm -i nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm && \
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
ARG CUDA_VERSION
|
ARG CUDA_VERSION_ARG
|
||||||
FROM nvidia/cuda:$CUDA_VERSION-runtime-ubuntu16.04
|
FROM nvidia/cuda:$CUDA_VERSION_ARG-runtime-ubuntu16.04
|
||||||
|
ARG CUDA_VERSION_ARG
|
||||||
ARG JDK_VERSION=8
|
ARG JDK_VERSION=8
|
||||||
ARG SPARK_VERSION=3.0.0
|
ARG SPARK_VERSION=3.0.0
|
||||||
|
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
ARG CUDA_VERSION
|
ARG CUDA_VERSION_ARG
|
||||||
FROM nvidia/cuda:$CUDA_VERSION-devel-centos6
|
FROM nvidia/cuda:$CUDA_VERSION_ARG-devel-centos6
|
||||||
ARG CUDA_VERSION
|
ARG CUDA_VERSION_ARG
|
||||||
|
|
||||||
# Environment
|
# Environment
|
||||||
ENV DEBIAN_FRONTEND noninteractive
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
@ -30,7 +30,7 @@ RUN \
|
|||||||
|
|
||||||
# NCCL2 (License: https://docs.nvidia.com/deeplearning/sdk/nccl-sla/index.html)
|
# NCCL2 (License: https://docs.nvidia.com/deeplearning/sdk/nccl-sla/index.html)
|
||||||
RUN \
|
RUN \
|
||||||
export CUDA_SHORT=`echo $CUDA_VERSION | egrep -o '[0-9]+\.[0-9]'` && \
|
export CUDA_SHORT=`echo $CUDA_VERSION_ARG | egrep -o '[0-9]+\.[0-9]'` && \
|
||||||
export NCCL_VERSION=2.4.8-1 && \
|
export NCCL_VERSION=2.4.8-1 && \
|
||||||
wget https://developer.download.nvidia.com/compute/machine-learning/repos/rhel7/x86_64/nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm && \
|
wget https://developer.download.nvidia.com/compute/machine-learning/repos/rhel7/x86_64/nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm && \
|
||||||
rpm -i nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm && \
|
rpm -i nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm && \
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user