temp merge, disable 1 line, SetValid
This commit is contained in:
commit
ea19555474
4
.github/workflows/main.yml
vendored
4
.github/workflows/main.yml
vendored
@ -29,7 +29,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake .. -DGOOGLE_TEST=ON -DUSE_OPENMP=ON -DUSE_DMLC_GTEST=ON -DPLUGIN_DENSE_PARSER=ON -GNinja
|
cmake .. -DGOOGLE_TEST=ON -DUSE_OPENMP=ON -DUSE_DMLC_GTEST=ON -DPLUGIN_DENSE_PARSER=ON -GNinja -DBUILD_DEPRECATED_CLI=ON
|
||||||
ninja -v
|
ninja -v
|
||||||
- name: Run gtest binary
|
- name: Run gtest binary
|
||||||
run: |
|
run: |
|
||||||
@ -56,7 +56,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake .. -GNinja -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON -DUSE_OPENMP=OFF
|
cmake .. -GNinja -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON -DUSE_OPENMP=OFF -DBUILD_DEPRECATED_CLI=ON
|
||||||
ninja -v
|
ninja -v
|
||||||
- name: Run gtest binary
|
- name: Run gtest binary
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
47
.github/workflows/python_tests.yml
vendored
47
.github/workflows/python_tests.yml
vendored
@ -143,7 +143,7 @@ jobs:
|
|||||||
# Set prefix, to use OpenMP library from Conda env
|
# Set prefix, to use OpenMP library from Conda env
|
||||||
# See https://github.com/dmlc/xgboost/issues/7039#issuecomment-1025038228
|
# See https://github.com/dmlc/xgboost/issues/7039#issuecomment-1025038228
|
||||||
# to learn why we don't use libomp from Homebrew.
|
# to learn why we don't use libomp from Homebrew.
|
||||||
cmake .. -GNinja -DCMAKE_PREFIX_PATH=$CONDA_PREFIX
|
cmake .. -GNinja -DCMAKE_PREFIX_PATH=$CONDA_PREFIX -DBUILD_DEPRECATED_CLI=ON
|
||||||
ninja
|
ninja
|
||||||
|
|
||||||
- name: Install Python package
|
- name: Install Python package
|
||||||
@ -190,7 +190,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
mkdir build_msvc
|
mkdir build_msvc
|
||||||
cd build_msvc
|
cd build_msvc
|
||||||
cmake .. -G"Visual Studio 17 2022" -DCMAKE_CONFIGURATION_TYPES="Release" -A x64 -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON
|
cmake .. -G"Visual Studio 17 2022" -DCMAKE_CONFIGURATION_TYPES="Release" -A x64 -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON -DBUILD_DEPRECATED_CLI=ON
|
||||||
cmake --build . --config Release --parallel $(nproc)
|
cmake --build . --config Release --parallel $(nproc)
|
||||||
|
|
||||||
- name: Install Python package
|
- name: Install Python package
|
||||||
@ -234,7 +234,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake .. -GNinja -DCMAKE_PREFIX_PATH=$CONDA_PREFIX
|
cmake .. -GNinja -DCMAKE_PREFIX_PATH=$CONDA_PREFIX -DBUILD_DEPRECATED_CLI=ON
|
||||||
ninja
|
ninja
|
||||||
|
|
||||||
- name: Install Python package
|
- name: Install Python package
|
||||||
@ -255,3 +255,44 @@ jobs:
|
|||||||
shell: bash -l {0}
|
shell: bash -l {0}
|
||||||
run: |
|
run: |
|
||||||
pytest -s -v -rxXs --durations=0 ./tests/test_distributed/test_with_spark
|
pytest -s -v -rxXs --durations=0 ./tests/test_distributed/test_with_spark
|
||||||
|
|
||||||
|
python-system-installation-on-ubuntu:
|
||||||
|
name: Test XGBoost Python package System Installation on ${{ matrix.os }}
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-latest]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
submodules: 'true'
|
||||||
|
|
||||||
|
- name: Set up Python 3.8
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: 3.8
|
||||||
|
|
||||||
|
- name: Install ninja
|
||||||
|
run: |
|
||||||
|
sudo apt-get update && sudo apt-get install -y ninja-build
|
||||||
|
|
||||||
|
- name: Build XGBoost on Ubuntu
|
||||||
|
run: |
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake .. -GNinja
|
||||||
|
ninja
|
||||||
|
|
||||||
|
- name: Copy lib to system lib
|
||||||
|
run: |
|
||||||
|
cp lib/* "$(python -c 'import sys; print(sys.base_prefix)')/lib"
|
||||||
|
|
||||||
|
- name: Install XGBoost in Virtual Environment
|
||||||
|
run: |
|
||||||
|
cd python-package
|
||||||
|
pip install virtualenv
|
||||||
|
virtualenv venv
|
||||||
|
source venv/bin/activate && \
|
||||||
|
pip install -v . --config-settings use_system_libxgboost=True && \
|
||||||
|
python -c 'import xgboost'
|
||||||
|
|||||||
4
.github/workflows/r_tests.yml
vendored
4
.github/workflows/r_tests.yml
vendored
@ -25,7 +25,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: 'true'
|
submodules: 'true'
|
||||||
|
|
||||||
- uses: r-lib/actions/setup-r@50d1eae9b8da0bb3f8582c59a5b82225fa2fe7f2 # v2.3.1
|
- uses: r-lib/actions/setup-r@11a22a908006c25fe054c4ef0ac0436b1de3edbe # v2.6.4
|
||||||
with:
|
with:
|
||||||
r-version: ${{ matrix.config.r }}
|
r-version: ${{ matrix.config.r }}
|
||||||
|
|
||||||
@ -64,7 +64,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: 'true'
|
submodules: 'true'
|
||||||
|
|
||||||
- uses: r-lib/actions/setup-r@50d1eae9b8da0bb3f8582c59a5b82225fa2fe7f2 # v2.3.1
|
- uses: r-lib/actions/setup-r@11a22a908006c25fe054c4ef0ac0436b1de3edbe # v2.6.4
|
||||||
with:
|
with:
|
||||||
r-version: ${{ matrix.config.r }}
|
r-version: ${{ matrix.config.r }}
|
||||||
|
|
||||||
|
|||||||
6
.github/workflows/scorecards.yml
vendored
6
.github/workflows/scorecards.yml
vendored
@ -27,7 +27,7 @@ jobs:
|
|||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
|
|
||||||
- name: "Run analysis"
|
- name: "Run analysis"
|
||||||
uses: ossf/scorecard-action@99c53751e09b9529366343771cc321ec74e9bd3d # tag=v2.0.6
|
uses: ossf/scorecard-action@08b4669551908b1024bb425080c797723083c031 # tag=v2.2.0
|
||||||
with:
|
with:
|
||||||
results_file: results.sarif
|
results_file: results.sarif
|
||||||
results_format: sarif
|
results_format: sarif
|
||||||
@ -41,7 +41,7 @@ jobs:
|
|||||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||||
# format to the repository Actions tab.
|
# format to the repository Actions tab.
|
||||||
- name: "Upload artifact"
|
- name: "Upload artifact"
|
||||||
uses: actions/upload-artifact@6673cd052c4cd6fcf4b4e6e60ea986c889389535 # tag=v3.0.0
|
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # tag=v3.1.2
|
||||||
with:
|
with:
|
||||||
name: SARIF file
|
name: SARIF file
|
||||||
path: results.sarif
|
path: results.sarif
|
||||||
@ -49,6 +49,6 @@ jobs:
|
|||||||
|
|
||||||
# Upload the results to GitHub's code scanning dashboard.
|
# Upload the results to GitHub's code scanning dashboard.
|
||||||
- name: "Upload to code-scanning"
|
- name: "Upload to code-scanning"
|
||||||
uses: github/codeql-action/upload-sarif@5f532563584d71fdef14ee64d17bafb34f751ce5 # tag=v1.0.26
|
uses: github/codeql-action/upload-sarif@7b6664fa89524ee6e3c3e9749402d5afd69b3cd8 # tag=v2.14.1
|
||||||
with:
|
with:
|
||||||
sarif_file: results.sarif
|
sarif_file: results.sarif
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@ -48,6 +48,7 @@ Debug
|
|||||||
*.Rproj
|
*.Rproj
|
||||||
./xgboost.mpi
|
./xgboost.mpi
|
||||||
./xgboost.mock
|
./xgboost.mock
|
||||||
|
*.bak
|
||||||
#.Rbuildignore
|
#.Rbuildignore
|
||||||
R-package.Rproj
|
R-package.Rproj
|
||||||
*.cache*
|
*.cache*
|
||||||
@ -145,6 +146,8 @@ __MACOSX/
|
|||||||
model*.json
|
model*.json
|
||||||
|
|
||||||
# R tests
|
# R tests
|
||||||
|
*.htm
|
||||||
|
*.html
|
||||||
*.libsvm
|
*.libsvm
|
||||||
*.rds
|
*.rds
|
||||||
Rplots.pdf
|
Rplots.pdf
|
||||||
|
|||||||
@ -32,4 +32,3 @@ formats:
|
|||||||
python:
|
python:
|
||||||
install:
|
install:
|
||||||
- requirements: doc/requirements.txt
|
- requirements: doc/requirements.txt
|
||||||
system_packages: true
|
|
||||||
|
|||||||
1
CITATION
1
CITATION
@ -15,4 +15,3 @@
|
|||||||
address = {New York, NY, USA},
|
address = {New York, NY, USA},
|
||||||
keywords = {large-scale machine learning},
|
keywords = {large-scale machine learning},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
108
CMakeLists.txt
108
CMakeLists.txt
@ -1,5 +1,5 @@
|
|||||||
cmake_minimum_required(VERSION 3.18 FATAL_ERROR)
|
cmake_minimum_required(VERSION 3.18 FATAL_ERROR)
|
||||||
project(xgboost LANGUAGES CXX C VERSION 2.0.0)
|
project(xgboost LANGUAGES CXX C VERSION 2.1.0)
|
||||||
include(cmake/Utils.cmake)
|
include(cmake/Utils.cmake)
|
||||||
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
|
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
|
||||||
cmake_policy(SET CMP0022 NEW)
|
cmake_policy(SET CMP0022 NEW)
|
||||||
@ -14,8 +14,24 @@ endif ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUA
|
|||||||
|
|
||||||
message(STATUS "CMake version ${CMAKE_VERSION}")
|
message(STATUS "CMake version ${CMAKE_VERSION}")
|
||||||
|
|
||||||
if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0)
|
# Check compiler versions
|
||||||
message(FATAL_ERROR "GCC version must be at least 5.0!")
|
# Use recent compilers to ensure that std::filesystem is available
|
||||||
|
if(MSVC)
|
||||||
|
if(MSVC_VERSION LESS 1920)
|
||||||
|
message(FATAL_ERROR "Need Visual Studio 2019 or newer to build XGBoost")
|
||||||
|
endif()
|
||||||
|
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||||
|
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "8.1")
|
||||||
|
message(FATAL_ERROR "Need GCC 8.1 or newer to build XGBoost")
|
||||||
|
endif()
|
||||||
|
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
|
||||||
|
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "11.0")
|
||||||
|
message(FATAL_ERROR "Need Xcode 11.0 (AppleClang 11.0) or newer to build XGBoost")
|
||||||
|
endif()
|
||||||
|
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||||
|
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "9.0")
|
||||||
|
message(FATAL_ERROR "Need Clang 9.0 or newer to build XGBoost")
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
include(${xgboost_SOURCE_DIR}/cmake/FindPrefetchIntrinsics.cmake)
|
include(${xgboost_SOURCE_DIR}/cmake/FindPrefetchIntrinsics.cmake)
|
||||||
@ -29,8 +45,8 @@ set_default_configuration_release()
|
|||||||
option(BUILD_C_DOC "Build documentation for C APIs using Doxygen." OFF)
|
option(BUILD_C_DOC "Build documentation for C APIs using Doxygen." OFF)
|
||||||
option(USE_OPENMP "Build with OpenMP support." ON)
|
option(USE_OPENMP "Build with OpenMP support." ON)
|
||||||
option(BUILD_STATIC_LIB "Build static library" OFF)
|
option(BUILD_STATIC_LIB "Build static library" OFF)
|
||||||
|
option(BUILD_DEPRECATED_CLI "Build the deprecated command line interface" OFF)
|
||||||
option(FORCE_SHARED_CRT "Build with dynamic CRT on Windows (/MD)" OFF)
|
option(FORCE_SHARED_CRT "Build with dynamic CRT on Windows (/MD)" OFF)
|
||||||
option(RABIT_BUILD_MPI "Build MPI" OFF)
|
|
||||||
## Bindings
|
## Bindings
|
||||||
option(JVM_BINDINGS "Build JVM bindings" OFF)
|
option(JVM_BINDINGS "Build JVM bindings" OFF)
|
||||||
option(R_LIB "Build shared library for R package" OFF)
|
option(R_LIB "Build shared library for R package" OFF)
|
||||||
@ -50,6 +66,7 @@ option(HIDE_CXX_SYMBOLS "Build shared library and hide all C++ symbols" OFF)
|
|||||||
option(KEEP_BUILD_ARTIFACTS_IN_BINARY_DIR "Output build artifacts in CMake binary dir" OFF)
|
option(KEEP_BUILD_ARTIFACTS_IN_BINARY_DIR "Output build artifacts in CMake binary dir" OFF)
|
||||||
## CUDA
|
## CUDA
|
||||||
option(USE_CUDA "Build with GPU acceleration" OFF)
|
option(USE_CUDA "Build with GPU acceleration" OFF)
|
||||||
|
option(USE_PER_THREAD_DEFAULT_STREAM "Build with per-thread default stream" ON)
|
||||||
option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF)
|
option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF)
|
||||||
option(BUILD_WITH_SHARED_NCCL "Build with shared NCCL library." OFF)
|
option(BUILD_WITH_SHARED_NCCL "Build with shared NCCL library." OFF)
|
||||||
set(GPU_COMPUTE_VER "" CACHE STRING
|
set(GPU_COMPUTE_VER "" CACHE STRING
|
||||||
@ -58,10 +75,6 @@ set(GPU_COMPUTE_VER "" CACHE STRING
|
|||||||
option(USE_HIP "Build with GPU acceleration" OFF)
|
option(USE_HIP "Build with GPU acceleration" OFF)
|
||||||
option(USE_RCCL "Build with RCCL to enable distributed GPU support." OFF)
|
option(USE_RCCL "Build with RCCL to enable distributed GPU support." OFF)
|
||||||
option(BUILD_WITH_SHARED_RCCL "Build with shared RCCL library." OFF)
|
option(BUILD_WITH_SHARED_RCCL "Build with shared RCCL library." OFF)
|
||||||
## Copied From dmlc
|
|
||||||
option(USE_HDFS "Build with HDFS support" OFF)
|
|
||||||
option(USE_AZURE "Build with AZURE support" OFF)
|
|
||||||
option(USE_S3 "Build with S3 support" OFF)
|
|
||||||
## Sanitizers
|
## Sanitizers
|
||||||
option(USE_SANITIZER "Use santizer flags" OFF)
|
option(USE_SANITIZER "Use santizer flags" OFF)
|
||||||
option(SANITIZER_PATH "Path to sanitizes.")
|
option(SANITIZER_PATH "Path to sanitizes.")
|
||||||
@ -108,13 +121,6 @@ if (R_LIB AND GOOGLE_TEST)
|
|||||||
message(WARNING "Some C++ unittests will fail with `R_LIB` enabled,
|
message(WARNING "Some C++ unittests will fail with `R_LIB` enabled,
|
||||||
as R package redirects some functions to R runtime implementation.")
|
as R package redirects some functions to R runtime implementation.")
|
||||||
endif (R_LIB AND GOOGLE_TEST)
|
endif (R_LIB AND GOOGLE_TEST)
|
||||||
if (USE_AVX)
|
|
||||||
message(SEND_ERROR "The option 'USE_AVX' is deprecated as experimental AVX features have been removed from XGBoost.")
|
|
||||||
endif (USE_AVX)
|
|
||||||
if (PLUGIN_LZ4)
|
|
||||||
message(SEND_ERROR "The option 'PLUGIN_LZ4' is removed from XGBoost.")
|
|
||||||
endif (PLUGIN_LZ4)
|
|
||||||
|
|
||||||
if (PLUGIN_RMM AND NOT (USE_CUDA))
|
if (PLUGIN_RMM AND NOT (USE_CUDA))
|
||||||
message(SEND_ERROR "`PLUGIN_RMM` must be enabled with `USE_CUDA` flag.")
|
message(SEND_ERROR "`PLUGIN_RMM` must be enabled with `USE_CUDA` flag.")
|
||||||
endif (PLUGIN_RMM AND NOT (USE_CUDA))
|
endif (PLUGIN_RMM AND NOT (USE_CUDA))
|
||||||
@ -152,6 +158,26 @@ if (PLUGIN_FEDERATED)
|
|||||||
endif ()
|
endif ()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
#-- Removed options
|
||||||
|
if (USE_AVX)
|
||||||
|
message(SEND_ERROR "The option `USE_AVX` is deprecated as experimental AVX features have been removed from XGBoost.")
|
||||||
|
endif (USE_AVX)
|
||||||
|
if (PLUGIN_LZ4)
|
||||||
|
message(SEND_ERROR "The option `PLUGIN_LZ4` is removed from XGBoost.")
|
||||||
|
endif (PLUGIN_LZ4)
|
||||||
|
if (RABIT_BUILD_MPI)
|
||||||
|
message(SEND_ERROR "The option `RABIT_BUILD_MPI` has been removed from XGBoost.")
|
||||||
|
endif (RABIT_BUILD_MPI)
|
||||||
|
if (USE_S3)
|
||||||
|
message(SEND_ERROR "The option `USE_S3` has been removed from XGBoost")
|
||||||
|
endif (USE_S3)
|
||||||
|
if (USE_AZURE)
|
||||||
|
message(SEND_ERROR "The option `USE_AZURE` has been removed from XGBoost")
|
||||||
|
endif (USE_AZURE)
|
||||||
|
if (USE_HDFS)
|
||||||
|
message(SEND_ERROR "The option `USE_HDFS` has been removed from XGBoost")
|
||||||
|
endif (USE_HDFS)
|
||||||
|
|
||||||
#-- Sanitizer
|
#-- Sanitizer
|
||||||
if (USE_SANITIZER)
|
if (USE_SANITIZER)
|
||||||
include(cmake/Sanitizer.cmake)
|
include(cmake/Sanitizer.cmake)
|
||||||
@ -251,9 +277,6 @@ endif (MSVC)
|
|||||||
|
|
||||||
# rabit
|
# rabit
|
||||||
add_subdirectory(rabit)
|
add_subdirectory(rabit)
|
||||||
if (RABIT_BUILD_MPI)
|
|
||||||
find_package(MPI REQUIRED)
|
|
||||||
endif (RABIT_BUILD_MPI)
|
|
||||||
|
|
||||||
# core xgboost
|
# core xgboost
|
||||||
add_subdirectory(${xgboost_SOURCE_DIR}/src)
|
add_subdirectory(${xgboost_SOURCE_DIR}/src)
|
||||||
@ -274,6 +297,15 @@ add_subdirectory(${xgboost_SOURCE_DIR}/plugin)
|
|||||||
|
|
||||||
if (PLUGIN_RMM)
|
if (PLUGIN_RMM)
|
||||||
find_package(rmm REQUIRED)
|
find_package(rmm REQUIRED)
|
||||||
|
|
||||||
|
# Patch the rmm targets so they reference the static cudart
|
||||||
|
# Remove this patch once RMM stops specifying cudart requirement
|
||||||
|
# (since RMM is a header-only library, it should not specify cudart in its CMake config)
|
||||||
|
get_target_property(rmm_link_libs rmm::rmm INTERFACE_LINK_LIBRARIES)
|
||||||
|
list(REMOVE_ITEM rmm_link_libs CUDA::cudart)
|
||||||
|
list(APPEND rmm_link_libs CUDA::cudart_static)
|
||||||
|
set_target_properties(rmm::rmm PROPERTIES INTERFACE_LINK_LIBRARIES "${rmm_link_libs}")
|
||||||
|
get_target_property(rmm_link_libs rmm::rmm INTERFACE_LINK_LIBRARIES)
|
||||||
endif (PLUGIN_RMM)
|
endif (PLUGIN_RMM)
|
||||||
|
|
||||||
#-- library
|
#-- library
|
||||||
@ -290,19 +322,30 @@ target_include_directories(xgboost
|
|||||||
#-- End shared library
|
#-- End shared library
|
||||||
|
|
||||||
#-- CLI for xgboost
|
#-- CLI for xgboost
|
||||||
add_executable(runxgboost ${xgboost_SOURCE_DIR}/src/cli_main.cc)
|
if (BUILD_DEPRECATED_CLI)
|
||||||
target_link_libraries(runxgboost PRIVATE objxgboost)
|
add_executable(runxgboost ${xgboost_SOURCE_DIR}/src/cli_main.cc)
|
||||||
target_include_directories(runxgboost
|
target_link_libraries(runxgboost PRIVATE objxgboost)
|
||||||
|
target_include_directories(runxgboost
|
||||||
PRIVATE
|
PRIVATE
|
||||||
${xgboost_SOURCE_DIR}/include
|
${xgboost_SOURCE_DIR}/include
|
||||||
${xgboost_SOURCE_DIR}/dmlc-core/include
|
${xgboost_SOURCE_DIR}/dmlc-core/include
|
||||||
${xgboost_SOURCE_DIR}/rabit/include
|
${xgboost_SOURCE_DIR}/rabit/include
|
||||||
)
|
)
|
||||||
set_target_properties(runxgboost PROPERTIES OUTPUT_NAME xgboost)
|
set_target_properties(runxgboost PROPERTIES OUTPUT_NAME xgboost)
|
||||||
|
xgboost_target_properties(runxgboost)
|
||||||
|
xgboost_target_link_libraries(runxgboost)
|
||||||
|
xgboost_target_defs(runxgboost)
|
||||||
|
|
||||||
|
if (KEEP_BUILD_ARTIFACTS_IN_BINARY_DIR)
|
||||||
|
set_output_directory(runxgboost ${xgboost_BINARY_DIR})
|
||||||
|
else ()
|
||||||
|
set_output_directory(runxgboost ${xgboost_SOURCE_DIR})
|
||||||
|
endif (KEEP_BUILD_ARTIFACTS_IN_BINARY_DIR)
|
||||||
|
endif (BUILD_DEPRECATED_CLI)
|
||||||
#-- End CLI for xgboost
|
#-- End CLI for xgboost
|
||||||
|
|
||||||
# Common setup for all targets
|
# Common setup for all targets
|
||||||
foreach(target xgboost objxgboost dmlc runxgboost)
|
foreach(target xgboost objxgboost dmlc)
|
||||||
xgboost_target_properties(${target})
|
xgboost_target_properties(${target})
|
||||||
xgboost_target_link_libraries(${target})
|
xgboost_target_link_libraries(${target})
|
||||||
xgboost_target_defs(${target})
|
xgboost_target_defs(${target})
|
||||||
@ -315,14 +358,15 @@ if (JVM_BINDINGS)
|
|||||||
endif (JVM_BINDINGS)
|
endif (JVM_BINDINGS)
|
||||||
|
|
||||||
if (KEEP_BUILD_ARTIFACTS_IN_BINARY_DIR)
|
if (KEEP_BUILD_ARTIFACTS_IN_BINARY_DIR)
|
||||||
set_output_directory(runxgboost ${xgboost_BINARY_DIR})
|
|
||||||
set_output_directory(xgboost ${xgboost_BINARY_DIR}/lib)
|
set_output_directory(xgboost ${xgboost_BINARY_DIR}/lib)
|
||||||
else ()
|
else ()
|
||||||
set_output_directory(runxgboost ${xgboost_SOURCE_DIR})
|
|
||||||
set_output_directory(xgboost ${xgboost_SOURCE_DIR}/lib)
|
set_output_directory(xgboost ${xgboost_SOURCE_DIR}/lib)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# Ensure these two targets do not build simultaneously, as they produce outputs with conflicting names
|
# Ensure these two targets do not build simultaneously, as they produce outputs with conflicting names
|
||||||
add_dependencies(xgboost runxgboost)
|
if (BUILD_DEPRECATED_CLI)
|
||||||
|
add_dependencies(xgboost runxgboost)
|
||||||
|
endif (BUILD_DEPRECATED_CLI)
|
||||||
|
|
||||||
#-- Installing XGBoost
|
#-- Installing XGBoost
|
||||||
if (R_LIB)
|
if (R_LIB)
|
||||||
@ -358,9 +402,17 @@ install(DIRECTORY ${xgboost_SOURCE_DIR}/include/xgboost
|
|||||||
#
|
#
|
||||||
# https://github.com/dmlc/xgboost/issues/6085
|
# https://github.com/dmlc/xgboost/issues/6085
|
||||||
if (BUILD_STATIC_LIB)
|
if (BUILD_STATIC_LIB)
|
||||||
|
if (BUILD_DEPRECATED_CLI)
|
||||||
set(INSTALL_TARGETS xgboost runxgboost objxgboost dmlc)
|
set(INSTALL_TARGETS xgboost runxgboost objxgboost dmlc)
|
||||||
|
else()
|
||||||
|
set(INSTALL_TARGETS xgboost objxgboost dmlc)
|
||||||
|
endif (BUILD_DEPRECATED_CLI)
|
||||||
else (BUILD_STATIC_LIB)
|
else (BUILD_STATIC_LIB)
|
||||||
|
if (BUILD_DEPRECATED_CLI)
|
||||||
set(INSTALL_TARGETS xgboost runxgboost)
|
set(INSTALL_TARGETS xgboost runxgboost)
|
||||||
|
else(BUILD_DEPRECATED_CLI)
|
||||||
|
set(INSTALL_TARGETS xgboost)
|
||||||
|
endif (BUILD_DEPRECATED_CLI)
|
||||||
endif (BUILD_STATIC_LIB)
|
endif (BUILD_STATIC_LIB)
|
||||||
|
|
||||||
install(TARGETS ${INSTALL_TARGETS}
|
install(TARGETS ${INSTALL_TARGETS}
|
||||||
@ -410,6 +462,7 @@ if (GOOGLE_TEST)
|
|||||||
${xgboost_SOURCE_DIR}/tests/cli/machine.conf.in
|
${xgboost_SOURCE_DIR}/tests/cli/machine.conf.in
|
||||||
${xgboost_BINARY_DIR}/tests/cli/machine.conf
|
${xgboost_BINARY_DIR}/tests/cli/machine.conf
|
||||||
@ONLY)
|
@ONLY)
|
||||||
|
if (BUILD_DEPRECATED_CLI)
|
||||||
add_test(
|
add_test(
|
||||||
NAME TestXGBoostCLI
|
NAME TestXGBoostCLI
|
||||||
COMMAND runxgboost ${xgboost_BINARY_DIR}/tests/cli/machine.conf
|
COMMAND runxgboost ${xgboost_BINARY_DIR}/tests/cli/machine.conf
|
||||||
@ -417,6 +470,7 @@ if (GOOGLE_TEST)
|
|||||||
set_tests_properties(TestXGBoostCLI
|
set_tests_properties(TestXGBoostCLI
|
||||||
PROPERTIES
|
PROPERTIES
|
||||||
PASS_REGULAR_EXPRESSION ".*test-rmse:0.087.*")
|
PASS_REGULAR_EXPRESSION ".*test-rmse:0.087.*")
|
||||||
|
endif (BUILD_DEPRECATED_CLI)
|
||||||
endif (GOOGLE_TEST)
|
endif (GOOGLE_TEST)
|
||||||
|
|
||||||
# For MSVC: Call msvc_use_static_runtime() once again to completely
|
# For MSVC: Call msvc_use_static_runtime() once again to completely
|
||||||
|
|||||||
17
NEWS.md
17
NEWS.md
@ -3,6 +3,23 @@ XGBoost Change Log
|
|||||||
|
|
||||||
This file records the changes in xgboost library in reverse chronological order.
|
This file records the changes in xgboost library in reverse chronological order.
|
||||||
|
|
||||||
|
## 1.7.6 (2023 Jun 16)
|
||||||
|
|
||||||
|
This is a patch release for bug fixes. The CRAN package for the R binding is kept at 1.7.5.
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
* Fix distributed training with mixed dense and sparse partitions. (#9272)
|
||||||
|
* Fix monotone constraints on CPU with large trees. (#9122)
|
||||||
|
* [spark] Make the spark model have the same UID as its estimator (#9022)
|
||||||
|
* Optimize prediction with `QuantileDMatrix`. (#9096)
|
||||||
|
|
||||||
|
### Document
|
||||||
|
* Improve doxygen (#8959)
|
||||||
|
* Update the cuDF pip index URL. (#9106)
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
* Fix tests with pandas 2.0. (#9014)
|
||||||
|
|
||||||
## 1.7.5 (2023 Mar 30)
|
## 1.7.5 (2023 Mar 30)
|
||||||
This is a patch release for bug fixes.
|
This is a patch release for bug fixes.
|
||||||
|
|
||||||
|
|||||||
@ -1,41 +1,54 @@
|
|||||||
find_package(LibR REQUIRED)
|
find_package(LibR REQUIRED)
|
||||||
message(STATUS "LIBR_CORE_LIBRARY " ${LIBR_CORE_LIBRARY})
|
message(STATUS "LIBR_CORE_LIBRARY " ${LIBR_CORE_LIBRARY})
|
||||||
|
|
||||||
file(GLOB_RECURSE R_SOURCES
|
file(
|
||||||
|
GLOB_RECURSE R_SOURCES
|
||||||
${CMAKE_CURRENT_LIST_DIR}/src/*.cc
|
${CMAKE_CURRENT_LIST_DIR}/src/*.cc
|
||||||
${CMAKE_CURRENT_LIST_DIR}/src/*.c)
|
${CMAKE_CURRENT_LIST_DIR}/src/*.c
|
||||||
|
)
|
||||||
|
|
||||||
# Use object library to expose symbols
|
# Use object library to expose symbols
|
||||||
add_library(xgboost-r OBJECT ${R_SOURCES})
|
add_library(xgboost-r OBJECT ${R_SOURCES})
|
||||||
if (ENABLE_ALL_WARNINGS)
|
|
||||||
|
if(ENABLE_ALL_WARNINGS)
|
||||||
target_compile_options(xgboost-r PRIVATE -Wall -Wextra)
|
target_compile_options(xgboost-r PRIVATE -Wall -Wextra)
|
||||||
endif (ENABLE_ALL_WARNINGS)
|
endif()
|
||||||
target_compile_definitions(xgboost-r
|
|
||||||
PUBLIC
|
target_compile_definitions(
|
||||||
|
xgboost-r PUBLIC
|
||||||
-DXGBOOST_STRICT_R_MODE=1
|
-DXGBOOST_STRICT_R_MODE=1
|
||||||
-DXGBOOST_CUSTOMIZE_GLOBAL_PRNG=1
|
-DXGBOOST_CUSTOMIZE_GLOBAL_PRNG=1
|
||||||
-DDMLC_LOG_BEFORE_THROW=0
|
-DDMLC_LOG_BEFORE_THROW=0
|
||||||
-DDMLC_DISABLE_STDIN=1
|
-DDMLC_DISABLE_STDIN=1
|
||||||
-DDMLC_LOG_CUSTOMIZE=1
|
-DDMLC_LOG_CUSTOMIZE=1
|
||||||
-DRABIT_STRICT_CXX98_)
|
-DRABIT_STRICT_CXX98_
|
||||||
target_include_directories(xgboost-r
|
)
|
||||||
PRIVATE
|
|
||||||
|
target_include_directories(
|
||||||
|
xgboost-r PRIVATE
|
||||||
${LIBR_INCLUDE_DIRS}
|
${LIBR_INCLUDE_DIRS}
|
||||||
${PROJECT_SOURCE_DIR}/include
|
${PROJECT_SOURCE_DIR}/include
|
||||||
${PROJECT_SOURCE_DIR}/dmlc-core/include
|
${PROJECT_SOURCE_DIR}/dmlc-core/include
|
||||||
${PROJECT_SOURCE_DIR}/rabit/include)
|
${PROJECT_SOURCE_DIR}/rabit/include
|
||||||
|
)
|
||||||
|
|
||||||
target_link_libraries(xgboost-r PUBLIC ${LIBR_CORE_LIBRARY})
|
target_link_libraries(xgboost-r PUBLIC ${LIBR_CORE_LIBRARY})
|
||||||
if (USE_OPENMP)
|
|
||||||
|
if(USE_OPENMP)
|
||||||
find_package(OpenMP REQUIRED)
|
find_package(OpenMP REQUIRED)
|
||||||
target_link_libraries(xgboost-r PUBLIC OpenMP::OpenMP_CXX OpenMP::OpenMP_C)
|
target_link_libraries(xgboost-r PUBLIC OpenMP::OpenMP_CXX OpenMP::OpenMP_C)
|
||||||
endif (USE_OPENMP)
|
endif()
|
||||||
|
|
||||||
set_target_properties(
|
set_target_properties(
|
||||||
xgboost-r PROPERTIES
|
xgboost-r PROPERTIES
|
||||||
CXX_STANDARD 17
|
CXX_STANDARD 17
|
||||||
CXX_STANDARD_REQUIRED ON
|
CXX_STANDARD_REQUIRED ON
|
||||||
POSITION_INDEPENDENT_CODE ON)
|
POSITION_INDEPENDENT_CODE ON
|
||||||
|
)
|
||||||
|
|
||||||
# Get compilation and link flags of xgboost-r and propagate to objxgboost
|
# Get compilation and link flags of xgboost-r and propagate to objxgboost
|
||||||
target_link_libraries(objxgboost PUBLIC xgboost-r)
|
target_link_libraries(objxgboost PUBLIC xgboost-r)
|
||||||
|
|
||||||
# Add all objects of xgboost-r to objxgboost
|
# Add all objects of xgboost-r to objxgboost
|
||||||
target_sources(objxgboost INTERFACE $<TARGET_OBJECTS:xgboost-r>)
|
target_sources(objxgboost INTERFACE $<TARGET_OBJECTS:xgboost-r>)
|
||||||
|
|
||||||
|
|||||||
@ -1,8 +1,8 @@
|
|||||||
Package: xgboost
|
Package: xgboost
|
||||||
Type: Package
|
Type: Package
|
||||||
Title: Extreme Gradient Boosting
|
Title: Extreme Gradient Boosting
|
||||||
Version: 2.0.0.1
|
Version: 2.1.0.0
|
||||||
Date: 2022-10-18
|
Date: 2023-08-19
|
||||||
Authors@R: c(
|
Authors@R: c(
|
||||||
person("Tianqi", "Chen", role = c("aut"),
|
person("Tianqi", "Chen", role = c("aut"),
|
||||||
email = "tianqi.tchen@gmail.com"),
|
email = "tianqi.tchen@gmail.com"),
|
||||||
|
|||||||
@ -511,7 +511,7 @@ cb.cv.predict <- function(save_models = FALSE) {
|
|||||||
if (save_models) {
|
if (save_models) {
|
||||||
env$basket$models <- lapply(env$bst_folds, function(fd) {
|
env$basket$models <- lapply(env$bst_folds, function(fd) {
|
||||||
xgb.attr(fd$bst, 'niter') <- env$end_iteration - 1
|
xgb.attr(fd$bst, 'niter') <- env$end_iteration - 1
|
||||||
xgb.Booster.complete(xgb.handleToBooster(fd$bst), saveraw = TRUE)
|
xgb.Booster.complete(xgb.handleToBooster(handle = fd$bst, raw = NULL), saveraw = TRUE)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -659,7 +659,7 @@ cb.gblinear.history <- function(sparse = FALSE) {
|
|||||||
} else { # xgb.cv:
|
} else { # xgb.cv:
|
||||||
cf <- vector("list", length(env$bst_folds))
|
cf <- vector("list", length(env$bst_folds))
|
||||||
for (i in seq_along(env$bst_folds)) {
|
for (i in seq_along(env$bst_folds)) {
|
||||||
dmp <- xgb.dump(xgb.handleToBooster(env$bst_folds[[i]]$bst))
|
dmp <- xgb.dump(xgb.handleToBooster(handle = env$bst_folds[[i]]$bst, raw = NULL))
|
||||||
cf[[i]] <- as.numeric(grep('(booster|bias|weigh)', dmp, invert = TRUE, value = TRUE))
|
cf[[i]] <- as.numeric(grep('(booster|bias|weigh)', dmp, invert = TRUE, value = TRUE))
|
||||||
if (sparse) cf[[i]] <- as(cf[[i]], "sparseVector")
|
if (sparse) cf[[i]] <- as(cf[[i]], "sparseVector")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -140,7 +140,7 @@ check.custom.eval <- function(env = parent.frame()) {
|
|||||||
|
|
||||||
|
|
||||||
# Update a booster handle for an iteration with dtrain data
|
# Update a booster handle for an iteration with dtrain data
|
||||||
xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
|
xgb.iter.update <- function(booster_handle, dtrain, iter, obj) {
|
||||||
if (!identical(class(booster_handle), "xgb.Booster.handle")) {
|
if (!identical(class(booster_handle), "xgb.Booster.handle")) {
|
||||||
stop("booster_handle must be of xgb.Booster.handle class")
|
stop("booster_handle must be of xgb.Booster.handle class")
|
||||||
}
|
}
|
||||||
@ -151,10 +151,33 @@ xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
|
|||||||
if (is.null(obj)) {
|
if (is.null(obj)) {
|
||||||
.Call(XGBoosterUpdateOneIter_R, booster_handle, as.integer(iter), dtrain)
|
.Call(XGBoosterUpdateOneIter_R, booster_handle, as.integer(iter), dtrain)
|
||||||
} else {
|
} else {
|
||||||
pred <- predict(booster_handle, dtrain, outputmargin = TRUE, training = TRUE,
|
pred <- predict(
|
||||||
ntreelimit = 0)
|
booster_handle,
|
||||||
|
dtrain,
|
||||||
|
outputmargin = TRUE,
|
||||||
|
training = TRUE,
|
||||||
|
reshape = TRUE
|
||||||
|
)
|
||||||
gpair <- obj(pred, dtrain)
|
gpair <- obj(pred, dtrain)
|
||||||
.Call(XGBoosterBoostOneIter_R, booster_handle, dtrain, gpair$grad, gpair$hess)
|
n_samples <- dim(dtrain)[1]
|
||||||
|
|
||||||
|
msg <- paste(
|
||||||
|
"Since 2.1.0, the shape of the gradient and hessian is required to be ",
|
||||||
|
"(n_samples, n_targets) or (n_samples, n_classes).",
|
||||||
|
sep = ""
|
||||||
|
)
|
||||||
|
if (is.matrix(gpair$grad) && dim(gpair$grad)[1] != n_samples) {
|
||||||
|
warning(msg)
|
||||||
|
}
|
||||||
|
if (is.numeric(gpair$grad) && length(gpair$grad) != n_samples) {
|
||||||
|
warning(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
gpair$grad <- matrix(gpair$grad, nrow = n_samples)
|
||||||
|
gpair$hess <- matrix(gpair$hess, nrow = n_samples)
|
||||||
|
.Call(
|
||||||
|
XGBoosterBoostOneIter_R, booster_handle, dtrain, iter, gpair$grad, gpair$hess
|
||||||
|
)
|
||||||
}
|
}
|
||||||
return(TRUE)
|
return(TRUE)
|
||||||
}
|
}
|
||||||
@ -163,7 +186,7 @@ xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
|
|||||||
# Evaluate one iteration.
|
# Evaluate one iteration.
|
||||||
# Returns a named vector of evaluation metrics
|
# Returns a named vector of evaluation metrics
|
||||||
# with the names in a 'datasetname-metricname' format.
|
# with the names in a 'datasetname-metricname' format.
|
||||||
xgb.iter.eval <- function(booster_handle, watchlist, iter, feval = NULL) {
|
xgb.iter.eval <- function(booster_handle, watchlist, iter, feval) {
|
||||||
if (!identical(class(booster_handle), "xgb.Booster.handle"))
|
if (!identical(class(booster_handle), "xgb.Booster.handle"))
|
||||||
stop("class of booster_handle must be xgb.Booster.handle")
|
stop("class of booster_handle must be xgb.Booster.handle")
|
||||||
|
|
||||||
@ -234,7 +257,7 @@ generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
|
|||||||
y <- factor(y)
|
y <- factor(y)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
folds <- xgb.createFolds(y, nfold)
|
folds <- xgb.createFolds(y = y, k = nfold)
|
||||||
} else {
|
} else {
|
||||||
# make simple non-stratified folds
|
# make simple non-stratified folds
|
||||||
kstep <- length(rnd_idx) %/% nfold
|
kstep <- length(rnd_idx) %/% nfold
|
||||||
@ -251,7 +274,7 @@ generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
|
|||||||
# Creates CV folds stratified by the values of y.
|
# Creates CV folds stratified by the values of y.
|
||||||
# It was borrowed from caret::createFolds and simplified
|
# It was borrowed from caret::createFolds and simplified
|
||||||
# by always returning an unnamed list of fold indices.
|
# by always returning an unnamed list of fold indices.
|
||||||
xgb.createFolds <- function(y, k = 10) {
|
xgb.createFolds <- function(y, k) {
|
||||||
if (is.numeric(y)) {
|
if (is.numeric(y)) {
|
||||||
## Group the numeric data based on their magnitudes
|
## Group the numeric data based on their magnitudes
|
||||||
## and sample within those groups.
|
## and sample within those groups.
|
||||||
|
|||||||
@ -1,7 +1,6 @@
|
|||||||
# Construct an internal xgboost Booster and return a handle to it.
|
# Construct an internal xgboost Booster and return a handle to it.
|
||||||
# internal utility function
|
# internal utility function
|
||||||
xgb.Booster.handle <- function(params = list(), cachelist = list(),
|
xgb.Booster.handle <- function(params, cachelist, modelfile, handle) {
|
||||||
modelfile = NULL, handle = NULL) {
|
|
||||||
if (typeof(cachelist) != "list" ||
|
if (typeof(cachelist) != "list" ||
|
||||||
!all(vapply(cachelist, inherits, logical(1), what = 'xgb.DMatrix'))) {
|
!all(vapply(cachelist, inherits, logical(1), what = 'xgb.DMatrix'))) {
|
||||||
stop("cachelist must be a list of xgb.DMatrix objects")
|
stop("cachelist must be a list of xgb.DMatrix objects")
|
||||||
@ -12,7 +11,7 @@ xgb.Booster.handle <- function(params = list(), cachelist = list(),
|
|||||||
## A filename
|
## A filename
|
||||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||||
modelfile <- path.expand(modelfile)
|
modelfile <- path.expand(modelfile)
|
||||||
.Call(XGBoosterLoadModel_R, handle, modelfile[1])
|
.Call(XGBoosterLoadModel_R, handle, enc2utf8(modelfile[1]))
|
||||||
class(handle) <- "xgb.Booster.handle"
|
class(handle) <- "xgb.Booster.handle"
|
||||||
if (length(params) > 0) {
|
if (length(params) > 0) {
|
||||||
xgb.parameters(handle) <- params
|
xgb.parameters(handle) <- params
|
||||||
@ -44,7 +43,7 @@ xgb.Booster.handle <- function(params = list(), cachelist = list(),
|
|||||||
|
|
||||||
# Convert xgb.Booster.handle to xgb.Booster
|
# Convert xgb.Booster.handle to xgb.Booster
|
||||||
# internal utility function
|
# internal utility function
|
||||||
xgb.handleToBooster <- function(handle, raw = NULL) {
|
xgb.handleToBooster <- function(handle, raw) {
|
||||||
bst <- list(handle = handle, raw = raw)
|
bst <- list(handle = handle, raw = raw)
|
||||||
class(bst) <- "xgb.Booster"
|
class(bst) <- "xgb.Booster"
|
||||||
return(bst)
|
return(bst)
|
||||||
@ -129,7 +128,12 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
|||||||
stop("argument type must be xgb.Booster")
|
stop("argument type must be xgb.Booster")
|
||||||
|
|
||||||
if (is.null.handle(object$handle)) {
|
if (is.null.handle(object$handle)) {
|
||||||
object$handle <- xgb.Booster.handle(modelfile = object$raw, handle = object$handle)
|
object$handle <- xgb.Booster.handle(
|
||||||
|
params = list(),
|
||||||
|
cachelist = list(),
|
||||||
|
modelfile = object$raw,
|
||||||
|
handle = object$handle
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
if (is.null(object$raw) && saveraw) {
|
if (is.null(object$raw) && saveraw) {
|
||||||
object$raw <- xgb.serialize(object$handle)
|
object$raw <- xgb.serialize(object$handle)
|
||||||
@ -475,7 +479,7 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
|
|||||||
#' @export
|
#' @export
|
||||||
predict.xgb.Booster.handle <- function(object, ...) {
|
predict.xgb.Booster.handle <- function(object, ...) {
|
||||||
|
|
||||||
bst <- xgb.handleToBooster(object)
|
bst <- xgb.handleToBooster(handle = object, raw = NULL)
|
||||||
|
|
||||||
ret <- predict(bst, ...)
|
ret <- predict(bst, ...)
|
||||||
return(ret)
|
return(ret)
|
||||||
|
|||||||
@ -88,7 +88,7 @@ xgb.DMatrix <- function(data, info = list(), missing = NA, silent = FALSE, nthre
|
|||||||
|
|
||||||
# get dmatrix from data, label
|
# get dmatrix from data, label
|
||||||
# internal helper method
|
# internal helper method
|
||||||
xgb.get.DMatrix <- function(data, label = NULL, missing = NA, weight = NULL, nthread = NULL) {
|
xgb.get.DMatrix <- function(data, label, missing, weight, nthread) {
|
||||||
if (inherits(data, "dgCMatrix") || is.matrix(data)) {
|
if (inherits(data, "dgCMatrix") || is.matrix(data)) {
|
||||||
if (is.null(label)) {
|
if (is.null(label)) {
|
||||||
stop("label must be provided when data is a matrix")
|
stop("label must be provided when data is a matrix")
|
||||||
|
|||||||
@ -135,9 +135,6 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
|
|||||||
check.custom.obj()
|
check.custom.obj()
|
||||||
check.custom.eval()
|
check.custom.eval()
|
||||||
|
|
||||||
#if (is.null(params[['eval_metric']]) && is.null(feval))
|
|
||||||
# stop("Either 'eval_metric' or 'feval' must be provided for CV")
|
|
||||||
|
|
||||||
# Check the labels
|
# Check the labels
|
||||||
if ((inherits(data, 'xgb.DMatrix') && is.null(getinfo(data, 'label'))) ||
|
if ((inherits(data, 'xgb.DMatrix') && is.null(getinfo(data, 'label'))) ||
|
||||||
(!inherits(data, 'xgb.DMatrix') && is.null(label))) {
|
(!inherits(data, 'xgb.DMatrix') && is.null(label))) {
|
||||||
@ -161,10 +158,6 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
|
|||||||
folds <- generate.cv.folds(nfold, nrow(data), stratified, cv_label, params)
|
folds <- generate.cv.folds(nfold, nrow(data), stratified, cv_label, params)
|
||||||
}
|
}
|
||||||
|
|
||||||
# Potential TODO: sequential CV
|
|
||||||
#if (strategy == 'sequential')
|
|
||||||
# stop('Sequential CV strategy is not yet implemented')
|
|
||||||
|
|
||||||
# verbosity & evaluation printing callback:
|
# verbosity & evaluation printing callback:
|
||||||
params <- c(params, list(silent = 1))
|
params <- c(params, list(silent = 1))
|
||||||
print_every_n <- max(as.integer(print_every_n), 1L)
|
print_every_n <- max(as.integer(print_every_n), 1L)
|
||||||
@ -194,7 +187,13 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
|
|||||||
|
|
||||||
# create the booster-folds
|
# create the booster-folds
|
||||||
# train_folds
|
# train_folds
|
||||||
dall <- xgb.get.DMatrix(data, label, missing, nthread = params$nthread)
|
dall <- xgb.get.DMatrix(
|
||||||
|
data = data,
|
||||||
|
label = label,
|
||||||
|
missing = missing,
|
||||||
|
weight = NULL,
|
||||||
|
nthread = params$nthread
|
||||||
|
)
|
||||||
bst_folds <- lapply(seq_along(folds), function(k) {
|
bst_folds <- lapply(seq_along(folds), function(k) {
|
||||||
dtest <- slice(dall, folds[[k]])
|
dtest <- slice(dall, folds[[k]])
|
||||||
# code originally contributed by @RolandASc on stackoverflow
|
# code originally contributed by @RolandASc on stackoverflow
|
||||||
@ -202,7 +201,12 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
|
|||||||
dtrain <- slice(dall, unlist(folds[-k]))
|
dtrain <- slice(dall, unlist(folds[-k]))
|
||||||
else
|
else
|
||||||
dtrain <- slice(dall, train_folds[[k]])
|
dtrain <- slice(dall, train_folds[[k]])
|
||||||
handle <- xgb.Booster.handle(params, list(dtrain, dtest))
|
handle <- xgb.Booster.handle(
|
||||||
|
params = params,
|
||||||
|
cachelist = list(dtrain, dtest),
|
||||||
|
modelfile = NULL,
|
||||||
|
handle = NULL
|
||||||
|
)
|
||||||
list(dtrain = dtrain, bst = handle, watchlist = list(train = dtrain, test = dtest), index = folds[[k]])
|
list(dtrain = dtrain, bst = handle, watchlist = list(train = dtrain, test = dtest), index = folds[[k]])
|
||||||
})
|
})
|
||||||
rm(dall)
|
rm(dall)
|
||||||
@ -223,8 +227,18 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
|
|||||||
for (f in cb$pre_iter) f()
|
for (f in cb$pre_iter) f()
|
||||||
|
|
||||||
msg <- lapply(bst_folds, function(fd) {
|
msg <- lapply(bst_folds, function(fd) {
|
||||||
xgb.iter.update(fd$bst, fd$dtrain, iteration - 1, obj)
|
xgb.iter.update(
|
||||||
xgb.iter.eval(fd$bst, fd$watchlist, iteration - 1, feval)
|
booster_handle = fd$bst,
|
||||||
|
dtrain = fd$dtrain,
|
||||||
|
iter = iteration - 1,
|
||||||
|
obj = obj
|
||||||
|
)
|
||||||
|
xgb.iter.eval(
|
||||||
|
booster_handle = fd$bst,
|
||||||
|
watchlist = fd$watchlist,
|
||||||
|
iter = iteration - 1,
|
||||||
|
feval = feval
|
||||||
|
)
|
||||||
})
|
})
|
||||||
msg <- simplify2array(msg)
|
msg <- simplify2array(msg)
|
||||||
bst_evaluation <- rowMeans(msg)
|
bst_evaluation <- rowMeans(msg)
|
||||||
|
|||||||
@ -142,6 +142,7 @@ xgb.ggplot.shap.summary <- function(data, shap_contrib = NULL, features = NULL,
|
|||||||
#'
|
#'
|
||||||
#' @return A data.table containing the observation ID, the feature name, the
|
#' @return A data.table containing the observation ID, the feature name, the
|
||||||
#' feature value (normalized if specified), and the SHAP contribution value.
|
#' feature value (normalized if specified), and the SHAP contribution value.
|
||||||
|
#' @noRd
|
||||||
prepare.ggplot.shap.data <- function(data_list, normalize = FALSE) {
|
prepare.ggplot.shap.data <- function(data_list, normalize = FALSE) {
|
||||||
data <- data_list[["data"]]
|
data <- data_list[["data"]]
|
||||||
shap_contrib <- data_list[["shap_contrib"]]
|
shap_contrib <- data_list[["shap_contrib"]]
|
||||||
@ -170,6 +171,7 @@ prepare.ggplot.shap.data <- function(data_list, normalize = FALSE) {
|
|||||||
#' @param x Numeric vector
|
#' @param x Numeric vector
|
||||||
#'
|
#'
|
||||||
#' @return Numeric vector with mean 0 and sd 1.
|
#' @return Numeric vector with mean 0 and sd 1.
|
||||||
|
#' @noRd
|
||||||
normalize <- function(x) {
|
normalize <- function(x) {
|
||||||
loc <- mean(x, na.rm = TRUE)
|
loc <- mean(x, na.rm = TRUE)
|
||||||
scale <- stats::sd(x, na.rm = TRUE)
|
scale <- stats::sd(x, na.rm = TRUE)
|
||||||
@ -181,7 +183,7 @@ normalize <- function(x) {
|
|||||||
# ... the plots
|
# ... the plots
|
||||||
# cols number of columns
|
# cols number of columns
|
||||||
# internal utility function
|
# internal utility function
|
||||||
multiplot <- function(..., cols = 1) {
|
multiplot <- function(..., cols) {
|
||||||
plots <- list(...)
|
plots <- list(...)
|
||||||
num_plots <- length(plots)
|
num_plots <- length(plots)
|
||||||
|
|
||||||
|
|||||||
@ -35,7 +35,12 @@ xgb.load <- function(modelfile) {
|
|||||||
if (is.null(modelfile))
|
if (is.null(modelfile))
|
||||||
stop("xgb.load: modelfile cannot be NULL")
|
stop("xgb.load: modelfile cannot be NULL")
|
||||||
|
|
||||||
handle <- xgb.Booster.handle(modelfile = modelfile)
|
handle <- xgb.Booster.handle(
|
||||||
|
params = list(),
|
||||||
|
cachelist = list(),
|
||||||
|
modelfile = modelfile,
|
||||||
|
handle = NULL
|
||||||
|
)
|
||||||
# re-use modelfile if it is raw so we do not need to serialize
|
# re-use modelfile if it is raw so we do not need to serialize
|
||||||
if (typeof(modelfile) == "raw") {
|
if (typeof(modelfile) == "raw") {
|
||||||
warning(
|
warning(
|
||||||
@ -45,9 +50,9 @@ xgb.load <- function(modelfile) {
|
|||||||
" `xgb.unserialize` instead. "
|
" `xgb.unserialize` instead. "
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
bst <- xgb.handleToBooster(handle, modelfile)
|
bst <- xgb.handleToBooster(handle = handle, raw = modelfile)
|
||||||
} else {
|
} else {
|
||||||
bst <- xgb.handleToBooster(handle, NULL)
|
bst <- xgb.handleToBooster(handle = handle, raw = NULL)
|
||||||
}
|
}
|
||||||
bst <- xgb.Booster.complete(bst, saveraw = TRUE)
|
bst <- xgb.Booster.complete(bst, saveraw = TRUE)
|
||||||
return(bst)
|
return(bst)
|
||||||
|
|||||||
@ -86,8 +86,7 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
|
|||||||
text <- xgb.dump(model = model, with_stats = TRUE)
|
text <- xgb.dump(model = model, with_stats = TRUE)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (length(text) < 2 ||
|
if (length(text) < 2 || !any(grepl('leaf=(\\d+)', text))) {
|
||||||
sum(grepl('leaf=(\\d+)', text)) < 1) {
|
|
||||||
stop("Non-tree model detected! This function can only be used with tree models.")
|
stop("Non-tree model detected! This function can only be used with tree models.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -136,7 +136,7 @@ get.leaf.depth <- function(dt_tree) {
|
|||||||
# list of paths to each leaf in a tree
|
# list of paths to each leaf in a tree
|
||||||
paths <- lapply(paths_tmp$vpath, names)
|
paths <- lapply(paths_tmp$vpath, names)
|
||||||
# combine into a resulting path lengths table for a tree
|
# combine into a resulting path lengths table for a tree
|
||||||
data.table(Depth = sapply(paths, length), ID = To[Leaf == TRUE])
|
data.table(Depth = lengths(paths), ID = To[Leaf == TRUE])
|
||||||
}, by = Tree]
|
}, by = Tree]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -193,7 +193,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
|||||||
#' hence allows us to see which features have a negative / positive contribution
|
#' hence allows us to see which features have a negative / positive contribution
|
||||||
#' on the model prediction, and whether the contribution is different for larger
|
#' on the model prediction, and whether the contribution is different for larger
|
||||||
#' or smaller values of the feature. We effectively try to replicate the
|
#' or smaller values of the feature. We effectively try to replicate the
|
||||||
#' \code{summary_plot} function from https://github.com/slundberg/shap.
|
#' \code{summary_plot} function from https://github.com/shap/shap.
|
||||||
#'
|
#'
|
||||||
#' @inheritParams xgb.plot.shap
|
#' @inheritParams xgb.plot.shap
|
||||||
#'
|
#'
|
||||||
@ -202,7 +202,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
|||||||
#'
|
#'
|
||||||
#' @examples # See \code{\link{xgb.plot.shap}}.
|
#' @examples # See \code{\link{xgb.plot.shap}}.
|
||||||
#' @seealso \code{\link{xgb.plot.shap}}, \code{\link{xgb.ggplot.shap.summary}},
|
#' @seealso \code{\link{xgb.plot.shap}}, \code{\link{xgb.ggplot.shap.summary}},
|
||||||
#' \url{https://github.com/slundberg/shap}
|
#' \url{https://github.com/shap/shap}
|
||||||
xgb.plot.shap.summary <- function(data, shap_contrib = NULL, features = NULL, top_n = 10, model = NULL,
|
xgb.plot.shap.summary <- function(data, shap_contrib = NULL, features = NULL, top_n = 10, model = NULL,
|
||||||
trees = NULL, target_class = NULL, approxcontrib = FALSE, subsample = NULL) {
|
trees = NULL, target_class = NULL, approxcontrib = FALSE, subsample = NULL) {
|
||||||
# Only ggplot implementation is available.
|
# Only ggplot implementation is available.
|
||||||
|
|||||||
@ -43,6 +43,6 @@ xgb.save <- function(model, fname) {
|
|||||||
}
|
}
|
||||||
model <- xgb.Booster.complete(model, saveraw = FALSE)
|
model <- xgb.Booster.complete(model, saveraw = FALSE)
|
||||||
fname <- path.expand(fname)
|
fname <- path.expand(fname)
|
||||||
.Call(XGBoosterSaveModel_R, model$handle, fname[1])
|
.Call(XGBoosterSaveModel_R, model$handle, enc2utf8(fname[1]))
|
||||||
return(TRUE)
|
return(TRUE)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -363,8 +363,13 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
|||||||
is_update <- NVL(params[['process_type']], '.') == 'update'
|
is_update <- NVL(params[['process_type']], '.') == 'update'
|
||||||
|
|
||||||
# Construct a booster (either a new one or load from xgb_model)
|
# Construct a booster (either a new one or load from xgb_model)
|
||||||
handle <- xgb.Booster.handle(params, append(watchlist, dtrain), xgb_model)
|
handle <- xgb.Booster.handle(
|
||||||
bst <- xgb.handleToBooster(handle)
|
params = params,
|
||||||
|
cachelist = append(watchlist, dtrain),
|
||||||
|
modelfile = xgb_model,
|
||||||
|
handle = NULL
|
||||||
|
)
|
||||||
|
bst <- xgb.handleToBooster(handle = handle, raw = NULL)
|
||||||
|
|
||||||
# extract parameters that can affect the relationship b/w #trees and #iterations
|
# extract parameters that can affect the relationship b/w #trees and #iterations
|
||||||
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1)
|
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1)
|
||||||
@ -390,10 +395,21 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
|||||||
|
|
||||||
for (f in cb$pre_iter) f()
|
for (f in cb$pre_iter) f()
|
||||||
|
|
||||||
xgb.iter.update(bst$handle, dtrain, iteration - 1, obj)
|
xgb.iter.update(
|
||||||
|
booster_handle = bst$handle,
|
||||||
|
dtrain = dtrain,
|
||||||
|
iter = iteration - 1,
|
||||||
|
obj = obj
|
||||||
|
)
|
||||||
|
|
||||||
if (length(watchlist) > 0)
|
if (length(watchlist) > 0) {
|
||||||
bst_evaluation <- xgb.iter.eval(bst$handle, watchlist, iteration - 1, feval) # nolint: object_usage_linter
|
bst_evaluation <- xgb.iter.eval( # nolint: object_usage_linter
|
||||||
|
booster_handle = bst$handle,
|
||||||
|
watchlist = watchlist,
|
||||||
|
iter = iteration - 1,
|
||||||
|
feval = feval
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
xgb.attr(bst$handle, 'niter') <- iteration - 1
|
xgb.attr(bst$handle, 'niter') <- iteration - 1
|
||||||
|
|
||||||
|
|||||||
@ -10,7 +10,13 @@ xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL,
|
|||||||
save_period = NULL, save_name = "xgboost.model",
|
save_period = NULL, save_name = "xgboost.model",
|
||||||
xgb_model = NULL, callbacks = list(), ...) {
|
xgb_model = NULL, callbacks = list(), ...) {
|
||||||
merged <- check.booster.params(params, ...)
|
merged <- check.booster.params(params, ...)
|
||||||
dtrain <- xgb.get.DMatrix(data, label, missing, weight, nthread = merged$nthread)
|
dtrain <- xgb.get.DMatrix(
|
||||||
|
data = data,
|
||||||
|
label = label,
|
||||||
|
missing = missing,
|
||||||
|
weight = weight,
|
||||||
|
nthread = merged$nthread
|
||||||
|
)
|
||||||
|
|
||||||
watchlist <- list(train = dtrain)
|
watchlist <- list(train = dtrain)
|
||||||
|
|
||||||
|
|||||||
18
R-package/configure
vendored
18
R-package/configure
vendored
@ -1,6 +1,6 @@
|
|||||||
#! /bin/sh
|
#! /bin/sh
|
||||||
# Guess values for system-dependent variables and create Makefiles.
|
# Guess values for system-dependent variables and create Makefiles.
|
||||||
# Generated by GNU Autoconf 2.71 for xgboost 2.0.0.
|
# Generated by GNU Autoconf 2.71 for xgboost 2.1.0.
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
# Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation,
|
# Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation,
|
||||||
@ -607,8 +607,8 @@ MAKEFLAGS=
|
|||||||
# Identity of this package.
|
# Identity of this package.
|
||||||
PACKAGE_NAME='xgboost'
|
PACKAGE_NAME='xgboost'
|
||||||
PACKAGE_TARNAME='xgboost'
|
PACKAGE_TARNAME='xgboost'
|
||||||
PACKAGE_VERSION='2.0.0'
|
PACKAGE_VERSION='2.1.0'
|
||||||
PACKAGE_STRING='xgboost 2.0.0'
|
PACKAGE_STRING='xgboost 2.1.0'
|
||||||
PACKAGE_BUGREPORT=''
|
PACKAGE_BUGREPORT=''
|
||||||
PACKAGE_URL=''
|
PACKAGE_URL=''
|
||||||
|
|
||||||
@ -1225,7 +1225,7 @@ if test "$ac_init_help" = "long"; then
|
|||||||
# Omit some internal or obsolete options to make the list less imposing.
|
# Omit some internal or obsolete options to make the list less imposing.
|
||||||
# This message is too long to be a string in the A/UX 3.1 sh.
|
# This message is too long to be a string in the A/UX 3.1 sh.
|
||||||
cat <<_ACEOF
|
cat <<_ACEOF
|
||||||
\`configure' configures xgboost 2.0.0 to adapt to many kinds of systems.
|
\`configure' configures xgboost 2.1.0 to adapt to many kinds of systems.
|
||||||
|
|
||||||
Usage: $0 [OPTION]... [VAR=VALUE]...
|
Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||||
|
|
||||||
@ -1287,7 +1287,7 @@ fi
|
|||||||
|
|
||||||
if test -n "$ac_init_help"; then
|
if test -n "$ac_init_help"; then
|
||||||
case $ac_init_help in
|
case $ac_init_help in
|
||||||
short | recursive ) echo "Configuration of xgboost 2.0.0:";;
|
short | recursive ) echo "Configuration of xgboost 2.1.0:";;
|
||||||
esac
|
esac
|
||||||
cat <<\_ACEOF
|
cat <<\_ACEOF
|
||||||
|
|
||||||
@ -1367,7 +1367,7 @@ fi
|
|||||||
test -n "$ac_init_help" && exit $ac_status
|
test -n "$ac_init_help" && exit $ac_status
|
||||||
if $ac_init_version; then
|
if $ac_init_version; then
|
||||||
cat <<\_ACEOF
|
cat <<\_ACEOF
|
||||||
xgboost configure 2.0.0
|
xgboost configure 2.1.0
|
||||||
generated by GNU Autoconf 2.71
|
generated by GNU Autoconf 2.71
|
||||||
|
|
||||||
Copyright (C) 2021 Free Software Foundation, Inc.
|
Copyright (C) 2021 Free Software Foundation, Inc.
|
||||||
@ -1533,7 +1533,7 @@ cat >config.log <<_ACEOF
|
|||||||
This file contains any messages produced by compilers while
|
This file contains any messages produced by compilers while
|
||||||
running configure, to aid debugging if configure makes a mistake.
|
running configure, to aid debugging if configure makes a mistake.
|
||||||
|
|
||||||
It was created by xgboost $as_me 2.0.0, which was
|
It was created by xgboost $as_me 2.1.0, which was
|
||||||
generated by GNU Autoconf 2.71. Invocation command line was
|
generated by GNU Autoconf 2.71. Invocation command line was
|
||||||
|
|
||||||
$ $0$ac_configure_args_raw
|
$ $0$ac_configure_args_raw
|
||||||
@ -3412,7 +3412,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
|
|||||||
# report actual input values of CONFIG_FILES etc. instead of their
|
# report actual input values of CONFIG_FILES etc. instead of their
|
||||||
# values after options handling.
|
# values after options handling.
|
||||||
ac_log="
|
ac_log="
|
||||||
This file was extended by xgboost $as_me 2.0.0, which was
|
This file was extended by xgboost $as_me 2.1.0, which was
|
||||||
generated by GNU Autoconf 2.71. Invocation command line was
|
generated by GNU Autoconf 2.71. Invocation command line was
|
||||||
|
|
||||||
CONFIG_FILES = $CONFIG_FILES
|
CONFIG_FILES = $CONFIG_FILES
|
||||||
@ -3467,7 +3467,7 @@ ac_cs_config_escaped=`printf "%s\n" "$ac_cs_config" | sed "s/^ //; s/'/'\\\\\\\\
|
|||||||
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
||||||
ac_cs_config='$ac_cs_config_escaped'
|
ac_cs_config='$ac_cs_config_escaped'
|
||||||
ac_cs_version="\\
|
ac_cs_version="\\
|
||||||
xgboost config.status 2.0.0
|
xgboost config.status 2.1.0
|
||||||
configured by $0, generated by GNU Autoconf 2.71,
|
configured by $0, generated by GNU Autoconf 2.71,
|
||||||
with options \\"\$ac_cs_config\\"
|
with options \\"\$ac_cs_config\\"
|
||||||
|
|
||||||
|
|||||||
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
AC_PREREQ(2.69)
|
AC_PREREQ(2.69)
|
||||||
|
|
||||||
AC_INIT([xgboost],[2.0.0],[],[xgboost],[])
|
AC_INIT([xgboost],[2.1.0],[],[xgboost],[])
|
||||||
|
|
||||||
: ${R_HOME=`R RHOME`}
|
: ${R_HOME=`R RHOME`}
|
||||||
if test -z "${R_HOME}"; then
|
if test -z "${R_HOME}"; then
|
||||||
|
|||||||
@ -44,7 +44,7 @@ treeInteractions <- function(input_tree, input_max_depth) {
|
|||||||
|
|
||||||
# Remove non-interactions (same variable)
|
# Remove non-interactions (same variable)
|
||||||
interaction_list <- lapply(interaction_list, unique) # remove same variables
|
interaction_list <- lapply(interaction_list, unique) # remove same variables
|
||||||
interaction_length <- sapply(interaction_list, length)
|
interaction_length <- lengths(interaction_list)
|
||||||
interaction_list <- interaction_list[interaction_length > 1]
|
interaction_list <- interaction_list[interaction_length > 1]
|
||||||
interaction_list <- unique(lapply(interaction_list, sort))
|
interaction_list <- unique(lapply(interaction_list, sort))
|
||||||
return(interaction_list)
|
return(interaction_list)
|
||||||
|
|||||||
@ -1,18 +0,0 @@
|
|||||||
% Generated by roxygen2: do not edit by hand
|
|
||||||
% Please edit documentation in R/xgb.ggplot.R
|
|
||||||
\name{normalize}
|
|
||||||
\alias{normalize}
|
|
||||||
\title{Scale feature value to have mean 0, standard deviation 1}
|
|
||||||
\usage{
|
|
||||||
normalize(x)
|
|
||||||
}
|
|
||||||
\arguments{
|
|
||||||
\item{x}{Numeric vector}
|
|
||||||
}
|
|
||||||
\value{
|
|
||||||
Numeric vector with mean 0 and sd 1.
|
|
||||||
}
|
|
||||||
\description{
|
|
||||||
This is used to compare multiple features on the same plot.
|
|
||||||
Internal utility function
|
|
||||||
}
|
|
||||||
@ -1,27 +0,0 @@
|
|||||||
% Generated by roxygen2: do not edit by hand
|
|
||||||
% Please edit documentation in R/xgb.ggplot.R
|
|
||||||
\name{prepare.ggplot.shap.data}
|
|
||||||
\alias{prepare.ggplot.shap.data}
|
|
||||||
\title{Combine and melt feature values and SHAP contributions for sample
|
|
||||||
observations.}
|
|
||||||
\usage{
|
|
||||||
prepare.ggplot.shap.data(data_list, normalize = FALSE)
|
|
||||||
}
|
|
||||||
\arguments{
|
|
||||||
\item{data_list}{List containing 'data' and 'shap_contrib' returned by
|
|
||||||
\code{xgb.shap.data()}.}
|
|
||||||
|
|
||||||
\item{normalize}{Whether to standardize feature values to have mean 0 and
|
|
||||||
standard deviation 1 (useful for comparing multiple features on the same
|
|
||||||
plot). Default \code{FALSE}.}
|
|
||||||
}
|
|
||||||
\value{
|
|
||||||
A data.table containing the observation ID, the feature name, the
|
|
||||||
feature value (normalized if specified), and the SHAP contribution value.
|
|
||||||
}
|
|
||||||
\description{
|
|
||||||
Conforms to data format required for ggplot functions.
|
|
||||||
}
|
|
||||||
\details{
|
|
||||||
Internal utility function.
|
|
||||||
}
|
|
||||||
@ -67,12 +67,12 @@ Each point (observation) is coloured based on its feature value. The plot
|
|||||||
hence allows us to see which features have a negative / positive contribution
|
hence allows us to see which features have a negative / positive contribution
|
||||||
on the model prediction, and whether the contribution is different for larger
|
on the model prediction, and whether the contribution is different for larger
|
||||||
or smaller values of the feature. We effectively try to replicate the
|
or smaller values of the feature. We effectively try to replicate the
|
||||||
\code{summary_plot} function from https://github.com/slundberg/shap.
|
\code{summary_plot} function from https://github.com/shap/shap.
|
||||||
}
|
}
|
||||||
\examples{
|
\examples{
|
||||||
# See \code{\link{xgb.plot.shap}}.
|
# See \code{\link{xgb.plot.shap}}.
|
||||||
}
|
}
|
||||||
\seealso{
|
\seealso{
|
||||||
\code{\link{xgb.plot.shap}}, \code{\link{xgb.ggplot.shap.summary}},
|
\code{\link{xgb.plot.shap}}, \code{\link{xgb.ggplot.shap.summary}},
|
||||||
\url{https://github.com/slundberg/shap}
|
\url{https://github.com/shap/shap}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -5,8 +5,11 @@ ENABLE_STD_THREAD=1
|
|||||||
|
|
||||||
CXX_STD = CXX17
|
CXX_STD = CXX17
|
||||||
|
|
||||||
XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\
|
XGB_RFLAGS = \
|
||||||
-DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) -DDMLC_DISABLE_STDIN=1\
|
-DXGBOOST_STRICT_R_MODE=1 \
|
||||||
|
-DDMLC_LOG_BEFORE_THROW=0 \
|
||||||
|
-DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) \
|
||||||
|
-DDMLC_DISABLE_STDIN=1 \
|
||||||
-DDMLC_LOG_CUSTOMIZE=1
|
-DDMLC_LOG_CUSTOMIZE=1
|
||||||
|
|
||||||
# disable the use of thread_local for 32 bit windows:
|
# disable the use of thread_local for 32 bit windows:
|
||||||
@ -15,9 +18,25 @@ ifeq ($(R_OSTYPE)$(WIN),windows)
|
|||||||
endif
|
endif
|
||||||
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
|
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
|
||||||
|
|
||||||
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
|
PKG_CPPFLAGS = \
|
||||||
PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ @ENDIAN_FLAG@ -pthread $(CXX_VISIBILITY)
|
-I$(PKGROOT)/include \
|
||||||
PKG_LIBS = @OPENMP_CXXFLAGS@ @OPENMP_LIB@ @ENDIAN_FLAG@ @BACKTRACE_LIB@ -pthread
|
-I$(PKGROOT)/dmlc-core/include \
|
||||||
|
-I$(PKGROOT)/rabit/include \
|
||||||
|
-I$(PKGROOT) \
|
||||||
|
$(XGB_RFLAGS)
|
||||||
|
|
||||||
|
PKG_CXXFLAGS = \
|
||||||
|
@OPENMP_CXXFLAGS@ \
|
||||||
|
@ENDIAN_FLAG@ \
|
||||||
|
-pthread \
|
||||||
|
$(CXX_VISIBILITY)
|
||||||
|
|
||||||
|
PKG_LIBS = \
|
||||||
|
@OPENMP_CXXFLAGS@ \
|
||||||
|
@OPENMP_LIB@ \
|
||||||
|
@ENDIAN_FLAG@ \
|
||||||
|
@BACKTRACE_LIB@ \
|
||||||
|
-pthread
|
||||||
|
|
||||||
OBJECTS= \
|
OBJECTS= \
|
||||||
./xgboost_R.o \
|
./xgboost_R.o \
|
||||||
@ -47,6 +66,7 @@ OBJECTS= \
|
|||||||
$(PKGROOT)/src/data/data.o \
|
$(PKGROOT)/src/data/data.o \
|
||||||
$(PKGROOT)/src/data/sparse_page_raw_format.o \
|
$(PKGROOT)/src/data/sparse_page_raw_format.o \
|
||||||
$(PKGROOT)/src/data/ellpack_page.o \
|
$(PKGROOT)/src/data/ellpack_page.o \
|
||||||
|
$(PKGROOT)/src/data/file_iterator.o \
|
||||||
$(PKGROOT)/src/data/gradient_index.o \
|
$(PKGROOT)/src/data/gradient_index.o \
|
||||||
$(PKGROOT)/src/data/gradient_index_page_source.o \
|
$(PKGROOT)/src/data/gradient_index_page_source.o \
|
||||||
$(PKGROOT)/src/data/gradient_index_format.o \
|
$(PKGROOT)/src/data/gradient_index_format.o \
|
||||||
@ -68,6 +88,8 @@ OBJECTS= \
|
|||||||
$(PKGROOT)/src/tree/updater_quantile_hist.o \
|
$(PKGROOT)/src/tree/updater_quantile_hist.o \
|
||||||
$(PKGROOT)/src/tree/updater_refresh.o \
|
$(PKGROOT)/src/tree/updater_refresh.o \
|
||||||
$(PKGROOT)/src/tree/updater_sync.o \
|
$(PKGROOT)/src/tree/updater_sync.o \
|
||||||
|
$(PKGROOT)/src/tree/hist/param.o \
|
||||||
|
$(PKGROOT)/src/tree/hist/histogram.o \
|
||||||
$(PKGROOT)/src/linear/linear_updater.o \
|
$(PKGROOT)/src/linear/linear_updater.o \
|
||||||
$(PKGROOT)/src/linear/updater_coordinate.o \
|
$(PKGROOT)/src/linear/updater_coordinate.o \
|
||||||
$(PKGROOT)/src/linear/updater_shotgun.o \
|
$(PKGROOT)/src/linear/updater_shotgun.o \
|
||||||
@ -82,6 +104,7 @@ OBJECTS= \
|
|||||||
$(PKGROOT)/src/common/charconv.o \
|
$(PKGROOT)/src/common/charconv.o \
|
||||||
$(PKGROOT)/src/common/column_matrix.o \
|
$(PKGROOT)/src/common/column_matrix.o \
|
||||||
$(PKGROOT)/src/common/common.o \
|
$(PKGROOT)/src/common/common.o \
|
||||||
|
$(PKGROOT)/src/common/error_msg.o \
|
||||||
$(PKGROOT)/src/common/hist_util.o \
|
$(PKGROOT)/src/common/hist_util.o \
|
||||||
$(PKGROOT)/src/common/host_device_vector.o \
|
$(PKGROOT)/src/common/host_device_vector.o \
|
||||||
$(PKGROOT)/src/common/io.o \
|
$(PKGROOT)/src/common/io.o \
|
||||||
|
|||||||
@ -5,8 +5,11 @@ ENABLE_STD_THREAD=0
|
|||||||
|
|
||||||
CXX_STD = CXX17
|
CXX_STD = CXX17
|
||||||
|
|
||||||
XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\
|
XGB_RFLAGS = \
|
||||||
-DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) -DDMLC_DISABLE_STDIN=1\
|
-DXGBOOST_STRICT_R_MODE=1 \
|
||||||
|
-DDMLC_LOG_BEFORE_THROW=0 \
|
||||||
|
-DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) \
|
||||||
|
-DDMLC_DISABLE_STDIN=1 \
|
||||||
-DDMLC_LOG_CUSTOMIZE=1
|
-DDMLC_LOG_CUSTOMIZE=1
|
||||||
|
|
||||||
# disable the use of thread_local for 32 bit windows:
|
# disable the use of thread_local for 32 bit windows:
|
||||||
@ -15,9 +18,25 @@ ifeq ($(R_OSTYPE)$(WIN),windows)
|
|||||||
endif
|
endif
|
||||||
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
|
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
|
||||||
|
|
||||||
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
|
PKG_CPPFLAGS = \
|
||||||
PKG_CXXFLAGS= $(SHLIB_OPENMP_CXXFLAGS) -DDMLC_CMAKE_LITTLE_ENDIAN=1 $(SHLIB_PTHREAD_FLAGS) $(CXX_VISIBILITY)
|
-I$(PKGROOT)/include \
|
||||||
PKG_LIBS = $(SHLIB_OPENMP_CXXFLAGS) -DDMLC_CMAKE_LITTLE_ENDIAN=1 $(SHLIB_PTHREAD_FLAGS) -lwsock32 -lws2_32
|
-I$(PKGROOT)/dmlc-core/include \
|
||||||
|
-I$(PKGROOT)/rabit/include \
|
||||||
|
-I$(PKGROOT) \
|
||||||
|
$(XGB_RFLAGS)
|
||||||
|
|
||||||
|
PKG_CXXFLAGS = \
|
||||||
|
$(SHLIB_OPENMP_CXXFLAGS) \
|
||||||
|
-DDMLC_CMAKE_LITTLE_ENDIAN=1 \
|
||||||
|
$(SHLIB_PTHREAD_FLAGS) \
|
||||||
|
$(CXX_VISIBILITY)
|
||||||
|
|
||||||
|
PKG_LIBS = \
|
||||||
|
$(SHLIB_OPENMP_CXXFLAGS) \
|
||||||
|
-DDMLC_CMAKE_LITTLE_ENDIAN=1 \
|
||||||
|
$(SHLIB_PTHREAD_FLAGS) \
|
||||||
|
-lwsock32 \
|
||||||
|
-lws2_32
|
||||||
|
|
||||||
OBJECTS= \
|
OBJECTS= \
|
||||||
./xgboost_R.o \
|
./xgboost_R.o \
|
||||||
@ -47,6 +66,7 @@ OBJECTS= \
|
|||||||
$(PKGROOT)/src/data/data.o \
|
$(PKGROOT)/src/data/data.o \
|
||||||
$(PKGROOT)/src/data/sparse_page_raw_format.o \
|
$(PKGROOT)/src/data/sparse_page_raw_format.o \
|
||||||
$(PKGROOT)/src/data/ellpack_page.o \
|
$(PKGROOT)/src/data/ellpack_page.o \
|
||||||
|
$(PKGROOT)/src/data/file_iterator.o \
|
||||||
$(PKGROOT)/src/data/gradient_index.o \
|
$(PKGROOT)/src/data/gradient_index.o \
|
||||||
$(PKGROOT)/src/data/gradient_index_page_source.o \
|
$(PKGROOT)/src/data/gradient_index_page_source.o \
|
||||||
$(PKGROOT)/src/data/gradient_index_format.o \
|
$(PKGROOT)/src/data/gradient_index_format.o \
|
||||||
@ -68,6 +88,8 @@ OBJECTS= \
|
|||||||
$(PKGROOT)/src/tree/updater_quantile_hist.o \
|
$(PKGROOT)/src/tree/updater_quantile_hist.o \
|
||||||
$(PKGROOT)/src/tree/updater_refresh.o \
|
$(PKGROOT)/src/tree/updater_refresh.o \
|
||||||
$(PKGROOT)/src/tree/updater_sync.o \
|
$(PKGROOT)/src/tree/updater_sync.o \
|
||||||
|
$(PKGROOT)/src/tree/hist/param.o \
|
||||||
|
$(PKGROOT)/src/tree/hist/histogram.o \
|
||||||
$(PKGROOT)/src/linear/linear_updater.o \
|
$(PKGROOT)/src/linear/linear_updater.o \
|
||||||
$(PKGROOT)/src/linear/updater_coordinate.o \
|
$(PKGROOT)/src/linear/updater_coordinate.o \
|
||||||
$(PKGROOT)/src/linear/updater_shotgun.o \
|
$(PKGROOT)/src/linear/updater_shotgun.o \
|
||||||
@ -82,6 +104,7 @@ OBJECTS= \
|
|||||||
$(PKGROOT)/src/common/charconv.o \
|
$(PKGROOT)/src/common/charconv.o \
|
||||||
$(PKGROOT)/src/common/column_matrix.o \
|
$(PKGROOT)/src/common/column_matrix.o \
|
||||||
$(PKGROOT)/src/common/common.o \
|
$(PKGROOT)/src/common/common.o \
|
||||||
|
$(PKGROOT)/src/common/error_msg.o \
|
||||||
$(PKGROOT)/src/common/hist_util.o \
|
$(PKGROOT)/src/common/hist_util.o \
|
||||||
$(PKGROOT)/src/common/host_device_vector.o \
|
$(PKGROOT)/src/common/host_device_vector.o \
|
||||||
$(PKGROOT)/src/common/io.o \
|
$(PKGROOT)/src/common/io.o \
|
||||||
|
|||||||
@ -16,7 +16,7 @@ Check these declarations against the C/Fortran source code.
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* .Call calls */
|
/* .Call calls */
|
||||||
extern SEXP XGBoosterBoostOneIter_R(SEXP, SEXP, SEXP, SEXP);
|
extern SEXP XGBoosterTrainOneIter_R(SEXP, SEXP, SEXP, SEXP, SEXP);
|
||||||
extern SEXP XGBoosterCreate_R(SEXP);
|
extern SEXP XGBoosterCreate_R(SEXP);
|
||||||
extern SEXP XGBoosterCreateInEmptyObj_R(SEXP, SEXP);
|
extern SEXP XGBoosterCreateInEmptyObj_R(SEXP, SEXP);
|
||||||
extern SEXP XGBoosterDumpModel_R(SEXP, SEXP, SEXP, SEXP);
|
extern SEXP XGBoosterDumpModel_R(SEXP, SEXP, SEXP, SEXP);
|
||||||
@ -53,7 +53,7 @@ extern SEXP XGBGetGlobalConfig_R(void);
|
|||||||
extern SEXP XGBoosterFeatureScore_R(SEXP, SEXP);
|
extern SEXP XGBoosterFeatureScore_R(SEXP, SEXP);
|
||||||
|
|
||||||
static const R_CallMethodDef CallEntries[] = {
|
static const R_CallMethodDef CallEntries[] = {
|
||||||
{"XGBoosterBoostOneIter_R", (DL_FUNC) &XGBoosterBoostOneIter_R, 4},
|
{"XGBoosterBoostOneIter_R", (DL_FUNC) &XGBoosterTrainOneIter_R, 5},
|
||||||
{"XGBoosterCreate_R", (DL_FUNC) &XGBoosterCreate_R, 1},
|
{"XGBoosterCreate_R", (DL_FUNC) &XGBoosterCreate_R, 1},
|
||||||
{"XGBoosterCreateInEmptyObj_R", (DL_FUNC) &XGBoosterCreateInEmptyObj_R, 2},
|
{"XGBoosterCreateInEmptyObj_R", (DL_FUNC) &XGBoosterCreateInEmptyObj_R, 2},
|
||||||
{"XGBoosterDumpModel_R", (DL_FUNC) &XGBoosterDumpModel_R, 4},
|
{"XGBoosterDumpModel_R", (DL_FUNC) &XGBoosterDumpModel_R, 4},
|
||||||
|
|||||||
@ -48,13 +48,6 @@
|
|||||||
|
|
||||||
using dmlc::BeginPtr;
|
using dmlc::BeginPtr;
|
||||||
|
|
||||||
xgboost::Context const *BoosterCtx(BoosterHandle handle) {
|
|
||||||
CHECK_HANDLE();
|
|
||||||
auto *learner = static_cast<xgboost::Learner *>(handle);
|
|
||||||
CHECK(learner);
|
|
||||||
return learner->Ctx();
|
|
||||||
}
|
|
||||||
|
|
||||||
xgboost::Context const *DMatrixCtx(DMatrixHandle handle) {
|
xgboost::Context const *DMatrixCtx(DMatrixHandle handle) {
|
||||||
CHECK_HANDLE();
|
CHECK_HANDLE();
|
||||||
auto p_m = static_cast<std::shared_ptr<xgboost::DMatrix> *>(handle);
|
auto p_m = static_cast<std::shared_ptr<xgboost::DMatrix> *>(handle);
|
||||||
@ -120,11 +113,25 @@ XGB_DLL SEXP XGDMatrixCreateFromMat_R(SEXP mat, SEXP missing, SEXP n_threads) {
|
|||||||
ctx.nthread = asInteger(n_threads);
|
ctx.nthread = asInteger(n_threads);
|
||||||
std::int32_t threads = ctx.Threads();
|
std::int32_t threads = ctx.Threads();
|
||||||
|
|
||||||
|
if (is_int) {
|
||||||
xgboost::common::ParallelFor(nrow, threads, [&](xgboost::omp_ulong i) {
|
xgboost::common::ParallelFor(nrow, threads, [&](xgboost::omp_ulong i) {
|
||||||
for (size_t j = 0; j < ncol; ++j) {
|
for (size_t j = 0; j < ncol; ++j) {
|
||||||
data[i * ncol + j] = is_int ? static_cast<float>(iin[i + nrow * j]) : din[i + nrow * j];
|
auto v = iin[i + nrow * j];
|
||||||
|
if (v == NA_INTEGER) {
|
||||||
|
data[i * ncol + j] = std::numeric_limits<float>::quiet_NaN();
|
||||||
|
} else {
|
||||||
|
data[i * ncol + j] = static_cast<float>(v);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
} else {
|
||||||
|
xgboost::common::ParallelFor(nrow, threads, [&](xgboost::omp_ulong i) {
|
||||||
|
for (size_t j = 0; j < ncol; ++j) {
|
||||||
|
data[i * ncol + j] = din[i + nrow * j];
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
DMatrixHandle handle;
|
DMatrixHandle handle;
|
||||||
CHECK_CALL(XGDMatrixCreateFromMat_omp(BeginPtr(data), nrow, ncol,
|
CHECK_CALL(XGDMatrixCreateFromMat_omp(BeginPtr(data), nrow, ncol,
|
||||||
asReal(missing), &handle, threads));
|
asReal(missing), &handle, threads));
|
||||||
@ -394,21 +401,25 @@ XGB_DLL SEXP XGBoosterUpdateOneIter_R(SEXP handle, SEXP iter, SEXP dtrain) {
|
|||||||
return R_NilValue;
|
return R_NilValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
XGB_DLL SEXP XGBoosterBoostOneIter_R(SEXP handle, SEXP dtrain, SEXP grad, SEXP hess) {
|
XGB_DLL SEXP XGBoosterTrainOneIter_R(SEXP handle, SEXP dtrain, SEXP iter, SEXP grad, SEXP hess) {
|
||||||
R_API_BEGIN();
|
R_API_BEGIN();
|
||||||
CHECK_EQ(length(grad), length(hess))
|
CHECK_EQ(length(grad), length(hess)) << "gradient and hess must have same length.";
|
||||||
<< "gradient and hess must have same length";
|
SEXP gdim = getAttrib(grad, R_DimSymbol);
|
||||||
int len = length(grad);
|
auto n_samples = static_cast<std::size_t>(INTEGER(gdim)[0]);
|
||||||
std::vector<float> tgrad(len), thess(len);
|
auto n_targets = static_cast<std::size_t>(INTEGER(gdim)[1]);
|
||||||
auto ctx = BoosterCtx(R_ExternalPtrAddr(handle));
|
|
||||||
xgboost::common::ParallelFor(len, ctx->Threads(), [&](xgboost::omp_ulong j) {
|
SEXP hdim = getAttrib(hess, R_DimSymbol);
|
||||||
tgrad[j] = REAL(grad)[j];
|
CHECK_EQ(INTEGER(hdim)[0], n_samples) << "mismatched size between gradient and hessian";
|
||||||
thess[j] = REAL(hess)[j];
|
CHECK_EQ(INTEGER(hdim)[1], n_targets) << "mismatched size between gradient and hessian";
|
||||||
});
|
double const *d_grad = REAL(grad);
|
||||||
CHECK_CALL(XGBoosterBoostOneIter(R_ExternalPtrAddr(handle),
|
double const *d_hess = REAL(hess);
|
||||||
R_ExternalPtrAddr(dtrain),
|
|
||||||
BeginPtr(tgrad), BeginPtr(thess),
|
auto ctx = xgboost::detail::BoosterCtx(R_ExternalPtrAddr(handle));
|
||||||
len));
|
auto [s_grad, s_hess] = xgboost::detail::MakeGradientInterface(
|
||||||
|
ctx, d_grad, d_hess, xgboost::linalg::kF, n_samples, n_targets);
|
||||||
|
CHECK_CALL(XGBoosterTrainOneIter(R_ExternalPtrAddr(handle), R_ExternalPtrAddr(dtrain),
|
||||||
|
asInteger(iter), s_grad.c_str(), s_hess.c_str()));
|
||||||
|
|
||||||
R_API_END();
|
R_API_END();
|
||||||
return R_NilValue;
|
return R_NilValue;
|
||||||
}
|
}
|
||||||
@ -424,7 +435,7 @@ XGB_DLL SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evn
|
|||||||
std::vector<const char*> vec_sptr;
|
std::vector<const char*> vec_sptr;
|
||||||
for (int i = 0; i < len; ++i) {
|
for (int i = 0; i < len; ++i) {
|
||||||
vec_dmats.push_back(R_ExternalPtrAddr(VECTOR_ELT(dmats, i)));
|
vec_dmats.push_back(R_ExternalPtrAddr(VECTOR_ELT(dmats, i)));
|
||||||
vec_names.push_back(std::string(CHAR(asChar(VECTOR_ELT(evnames, i)))));
|
vec_names.emplace_back(CHAR(asChar(VECTOR_ELT(evnames, i))));
|
||||||
}
|
}
|
||||||
for (int i = 0; i < len; ++i) {
|
for (int i = 0; i < len; ++i) {
|
||||||
vec_sptr.push_back(vec_names[i].c_str());
|
vec_sptr.push_back(vec_names[i].c_str());
|
||||||
@ -460,7 +471,7 @@ XGB_DLL SEXP XGBoosterPredictFromDMatrix_R(SEXP handle, SEXP dmat, SEXP json_con
|
|||||||
len *= out_shape[i];
|
len *= out_shape[i];
|
||||||
}
|
}
|
||||||
r_out_result = PROTECT(allocVector(REALSXP, len));
|
r_out_result = PROTECT(allocVector(REALSXP, len));
|
||||||
auto ctx = BoosterCtx(R_ExternalPtrAddr(handle));
|
auto ctx = xgboost::detail::BoosterCtx(R_ExternalPtrAddr(handle));
|
||||||
xgboost::common::ParallelFor(len, ctx->Threads(), [&](xgboost::omp_ulong i) {
|
xgboost::common::ParallelFor(len, ctx->Threads(), [&](xgboost::omp_ulong i) {
|
||||||
REAL(r_out_result)[i] = out_result[i];
|
REAL(r_out_result)[i] = out_result[i];
|
||||||
});
|
});
|
||||||
@ -669,7 +680,7 @@ XGB_DLL SEXP XGBoosterFeatureScore_R(SEXP handle, SEXP json_config) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
out_scores_sexp = PROTECT(allocVector(REALSXP, len));
|
out_scores_sexp = PROTECT(allocVector(REALSXP, len));
|
||||||
auto ctx = BoosterCtx(R_ExternalPtrAddr(handle));
|
auto ctx = xgboost::detail::BoosterCtx(R_ExternalPtrAddr(handle));
|
||||||
xgboost::common::ParallelFor(len, ctx->Threads(), [&](xgboost::omp_ulong i) {
|
xgboost::common::ParallelFor(len, ctx->Threads(), [&](xgboost::omp_ulong i) {
|
||||||
REAL(out_scores_sexp)[i] = out_scores[i];
|
REAL(out_scores_sexp)[i] = out_scores[i];
|
||||||
});
|
});
|
||||||
|
|||||||
@ -161,12 +161,13 @@ XGB_DLL SEXP XGBoosterUpdateOneIter_R(SEXP ext, SEXP iter, SEXP dtrain);
|
|||||||
* \brief update the model, by directly specify gradient and second order gradient,
|
* \brief update the model, by directly specify gradient and second order gradient,
|
||||||
* this can be used to replace UpdateOneIter, to support customized loss function
|
* this can be used to replace UpdateOneIter, to support customized loss function
|
||||||
* \param handle handle
|
* \param handle handle
|
||||||
|
* \param iter The current training iteration.
|
||||||
* \param dtrain training data
|
* \param dtrain training data
|
||||||
* \param grad gradient statistics
|
* \param grad gradient statistics
|
||||||
* \param hess second order gradient statistics
|
* \param hess second order gradient statistics
|
||||||
* \return R_NilValue
|
* \return R_NilValue
|
||||||
*/
|
*/
|
||||||
XGB_DLL SEXP XGBoosterBoostOneIter_R(SEXP handle, SEXP dtrain, SEXP grad, SEXP hess);
|
XGB_DLL SEXP XGBoosterTrainOneIter_R(SEXP handle, SEXP dtrain, SEXP iter, SEXP grad, SEXP hess);
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* \brief get evaluation statistics for xgboost
|
* \brief get evaluation statistics for xgboost
|
||||||
|
|||||||
@ -85,9 +85,18 @@ test_that("dart prediction works", {
|
|||||||
rnorm(100)
|
rnorm(100)
|
||||||
|
|
||||||
set.seed(1994)
|
set.seed(1994)
|
||||||
booster_by_xgboost <- xgboost(data = d, label = y, max_depth = 2, booster = "dart",
|
booster_by_xgboost <- xgboost(
|
||||||
rate_drop = 0.5, one_drop = TRUE,
|
data = d,
|
||||||
eta = 1, nthread = 2, nrounds = nrounds, objective = "reg:squarederror")
|
label = y,
|
||||||
|
max_depth = 2,
|
||||||
|
booster = "dart",
|
||||||
|
rate_drop = 0.5,
|
||||||
|
one_drop = TRUE,
|
||||||
|
eta = 1,
|
||||||
|
nthread = 2,
|
||||||
|
nrounds = nrounds,
|
||||||
|
objective = "reg:squarederror"
|
||||||
|
)
|
||||||
pred_by_xgboost_0 <- predict(booster_by_xgboost, newdata = d, ntreelimit = 0)
|
pred_by_xgboost_0 <- predict(booster_by_xgboost, newdata = d, ntreelimit = 0)
|
||||||
pred_by_xgboost_1 <- predict(booster_by_xgboost, newdata = d, ntreelimit = nrounds)
|
pred_by_xgboost_1 <- predict(booster_by_xgboost, newdata = d, ntreelimit = nrounds)
|
||||||
expect_true(all(matrix(pred_by_xgboost_0, byrow = TRUE) == matrix(pred_by_xgboost_1, byrow = TRUE)))
|
expect_true(all(matrix(pred_by_xgboost_0, byrow = TRUE) == matrix(pred_by_xgboost_1, byrow = TRUE)))
|
||||||
@ -97,14 +106,14 @@ test_that("dart prediction works", {
|
|||||||
|
|
||||||
set.seed(1994)
|
set.seed(1994)
|
||||||
dtrain <- xgb.DMatrix(data = d, info = list(label = y))
|
dtrain <- xgb.DMatrix(data = d, info = list(label = y))
|
||||||
booster_by_train <- xgb.train(params = list(
|
booster_by_train <- xgb.train(
|
||||||
|
params = list(
|
||||||
booster = "dart",
|
booster = "dart",
|
||||||
max_depth = 2,
|
max_depth = 2,
|
||||||
eta = 1,
|
eta = 1,
|
||||||
rate_drop = 0.5,
|
rate_drop = 0.5,
|
||||||
one_drop = TRUE,
|
one_drop = TRUE,
|
||||||
nthread = 1,
|
nthread = 1,
|
||||||
tree_method = "exact",
|
|
||||||
objective = "reg:squarederror"
|
objective = "reg:squarederror"
|
||||||
),
|
),
|
||||||
data = dtrain,
|
data = dtrain,
|
||||||
@ -399,7 +408,7 @@ test_that("colsample_bytree works", {
|
|||||||
xgb.importance(model = bst)
|
xgb.importance(model = bst)
|
||||||
# If colsample_bytree works properly, a variety of features should be used
|
# If colsample_bytree works properly, a variety of features should be used
|
||||||
# in the 100 trees
|
# in the 100 trees
|
||||||
expect_gte(nrow(xgb.importance(model = bst)), 30)
|
expect_gte(nrow(xgb.importance(model = bst)), 28)
|
||||||
})
|
})
|
||||||
|
|
||||||
test_that("Configuration works", {
|
test_that("Configuration works", {
|
||||||
|
|||||||
@ -64,23 +64,80 @@ test_that("custom objective using DMatrix attr works", {
|
|||||||
expect_equal(class(bst), "xgb.Booster")
|
expect_equal(class(bst), "xgb.Booster")
|
||||||
})
|
})
|
||||||
|
|
||||||
test_that("custom objective with multi-class works", {
|
test_that("custom objective with multi-class shape", {
|
||||||
data <- as.matrix(iris[, -5])
|
data <- as.matrix(iris[, -5])
|
||||||
label <- as.numeric(iris$Species) - 1
|
label <- as.numeric(iris$Species) - 1
|
||||||
dtrain <- xgb.DMatrix(data = data, label = label)
|
dtrain <- xgb.DMatrix(data = data, label = label)
|
||||||
nclasses <- 3
|
n_classes <- 3
|
||||||
|
|
||||||
fake_softprob <- function(preds, dtrain) {
|
fake_softprob <- function(preds, dtrain) {
|
||||||
expect_true(all(matrix(preds) == 0.5))
|
expect_true(all(matrix(preds) == 0.5))
|
||||||
grad <- rnorm(dim(as.matrix(preds))[1])
|
## use numeric vector here to test compatibility with XGBoost < 2.1
|
||||||
expect_equal(dim(data)[1] * nclasses, dim(as.matrix(preds))[1])
|
grad <- rnorm(length(as.matrix(preds)))
|
||||||
hess <- rnorm(dim(as.matrix(preds))[1])
|
expect_equal(dim(data)[1] * n_classes, dim(as.matrix(preds))[1] * n_classes)
|
||||||
return (list(grad = grad, hess = hess))
|
hess <- rnorm(length(as.matrix(preds)))
|
||||||
|
return(list(grad = grad, hess = hess))
|
||||||
}
|
}
|
||||||
fake_merror <- function(preds, dtrain) {
|
fake_merror <- function(preds, dtrain) {
|
||||||
expect_equal(dim(data)[1] * nclasses, dim(as.matrix(preds))[1])
|
expect_equal(dim(data)[1] * n_classes, dim(as.matrix(preds))[1])
|
||||||
}
|
}
|
||||||
param$objective <- fake_softprob
|
param$objective <- fake_softprob
|
||||||
param$eval_metric <- fake_merror
|
param$eval_metric <- fake_merror
|
||||||
bst <- xgb.train(param, dtrain, 1, num_class = nclasses)
|
bst <- xgb.train(param, dtrain, 1, num_class = n_classes)
|
||||||
|
})
|
||||||
|
|
||||||
|
softmax <- function(values) {
|
||||||
|
values <- as.numeric(values)
|
||||||
|
exps <- exp(values)
|
||||||
|
den <- sum(exps)
|
||||||
|
return(exps / den)
|
||||||
|
}
|
||||||
|
|
||||||
|
softprob <- function(predt, dtrain) {
|
||||||
|
y <- getinfo(dtrain, "label")
|
||||||
|
|
||||||
|
n_samples <- dim(predt)[1]
|
||||||
|
n_classes <- dim(predt)[2]
|
||||||
|
|
||||||
|
grad <- matrix(nrow = n_samples, ncol = n_classes)
|
||||||
|
hess <- matrix(nrow = n_samples, ncol = n_classes)
|
||||||
|
|
||||||
|
for (i in seq_len(n_samples)) {
|
||||||
|
t <- y[i]
|
||||||
|
p <- softmax(predt[i, ])
|
||||||
|
for (c in seq_len(n_classes)) {
|
||||||
|
g <- if (c - 1 == t) {
|
||||||
|
p[c] - 1.0
|
||||||
|
} else {
|
||||||
|
p[c]
|
||||||
|
}
|
||||||
|
h <- max((2.0 * p[c] * (1.0 - p[c])), 1e-6)
|
||||||
|
grad[i, c] <- g
|
||||||
|
hess[i, c] <- h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return(list(grad = grad, hess = hess))
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
test_that("custom objective with multi-class works", {
|
||||||
|
data <- as.matrix(iris[, -5])
|
||||||
|
label <- as.numeric(iris$Species) - 1
|
||||||
|
|
||||||
|
dtrain <- xgb.DMatrix(data = data, label = label)
|
||||||
|
|
||||||
|
param$num_class <- 3
|
||||||
|
param$objective <- softprob
|
||||||
|
param$eval_metric <- "merror"
|
||||||
|
param$base_score <- 0.5
|
||||||
|
|
||||||
|
custom_bst <- xgb.train(param, dtrain, 2)
|
||||||
|
custom_predt <- predict(custom_bst, dtrain)
|
||||||
|
|
||||||
|
param$objective <- "multi:softmax"
|
||||||
|
builtin_bst <- xgb.train(param, dtrain, 2)
|
||||||
|
builtin_predt <- predict(builtin_bst, dtrain)
|
||||||
|
|
||||||
|
expect_equal(custom_predt, builtin_predt)
|
||||||
})
|
})
|
||||||
|
|||||||
@ -56,6 +56,42 @@ test_that("xgb.DMatrix: basic construction", {
|
|||||||
expect_equal(raw_fd, raw_dgc)
|
expect_equal(raw_fd, raw_dgc)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
test_that("xgb.DMatrix: NA", {
|
||||||
|
n_samples <- 3
|
||||||
|
x <- cbind(
|
||||||
|
x1 = sample(x = 4, size = n_samples, replace = TRUE),
|
||||||
|
x2 = sample(x = 4, size = n_samples, replace = TRUE)
|
||||||
|
)
|
||||||
|
x[1, "x1"] <- NA
|
||||||
|
|
||||||
|
m <- xgb.DMatrix(x)
|
||||||
|
xgb.DMatrix.save(m, "int.dmatrix")
|
||||||
|
|
||||||
|
x <- matrix(as.numeric(x), nrow = n_samples, ncol = 2)
|
||||||
|
colnames(x) <- c("x1", "x2")
|
||||||
|
m <- xgb.DMatrix(x)
|
||||||
|
|
||||||
|
xgb.DMatrix.save(m, "float.dmatrix")
|
||||||
|
|
||||||
|
iconn <- file("int.dmatrix", "rb")
|
||||||
|
fconn <- file("float.dmatrix", "rb")
|
||||||
|
|
||||||
|
expect_equal(file.size("int.dmatrix"), file.size("float.dmatrix"))
|
||||||
|
|
||||||
|
bytes <- file.size("int.dmatrix")
|
||||||
|
idmatrix <- readBin(iconn, "raw", n = bytes)
|
||||||
|
fdmatrix <- readBin(fconn, "raw", n = bytes)
|
||||||
|
|
||||||
|
expect_equal(length(idmatrix), length(fdmatrix))
|
||||||
|
expect_equal(idmatrix, fdmatrix)
|
||||||
|
|
||||||
|
close(iconn)
|
||||||
|
close(fconn)
|
||||||
|
|
||||||
|
file.remove("int.dmatrix")
|
||||||
|
file.remove("float.dmatrix")
|
||||||
|
})
|
||||||
|
|
||||||
test_that("xgb.DMatrix: saving, loading", {
|
test_that("xgb.DMatrix: saving, loading", {
|
||||||
# save to a local file
|
# save to a local file
|
||||||
dtest1 <- xgb.DMatrix(test_data, label = test_label)
|
dtest1 <- xgb.DMatrix(test_data, label = test_label)
|
||||||
@ -72,6 +108,7 @@ test_that("xgb.DMatrix: saving, loading", {
|
|||||||
tmp <- c("0 1:1 2:1", "1 3:1", "0 1:1")
|
tmp <- c("0 1:1 2:1", "1 3:1", "0 1:1")
|
||||||
tmp_file <- tempfile(fileext = ".libsvm")
|
tmp_file <- tempfile(fileext = ".libsvm")
|
||||||
writeLines(tmp, tmp_file)
|
writeLines(tmp, tmp_file)
|
||||||
|
expect_true(file.exists(tmp_file))
|
||||||
dtest4 <- xgb.DMatrix(paste(tmp_file, "?format=libsvm", sep = ""), silent = TRUE)
|
dtest4 <- xgb.DMatrix(paste(tmp_file, "?format=libsvm", sep = ""), silent = TRUE)
|
||||||
expect_equal(dim(dtest4), c(3, 4))
|
expect_equal(dim(dtest4), c(3, 4))
|
||||||
expect_equal(getinfo(dtest4, 'label'), c(0, 1, 0))
|
expect_equal(getinfo(dtest4, 'label'), c(0, 1, 0))
|
||||||
|
|||||||
@ -189,7 +189,7 @@ test_that("SHAPs sum to predictions, with or without DART", {
|
|||||||
tol <- 1e-5
|
tol <- 1e-5
|
||||||
|
|
||||||
expect_equal(rowSums(shap), pred, tol = tol)
|
expect_equal(rowSums(shap), pred, tol = tol)
|
||||||
expect_equal(apply(shapi, 1, sum), pred, tol = tol)
|
expect_equal(rowSums(shapi), pred, tol = tol)
|
||||||
for (i in seq_len(nrow(d)))
|
for (i in seq_len(nrow(d)))
|
||||||
for (f in list(rowSums, colSums))
|
for (f in list(rowSums, colSums))
|
||||||
expect_equal(f(shapi[i, , ]), shap[i, ], tol = tol)
|
expect_equal(f(shapi[i, , ]), shap[i, ], tol = tol)
|
||||||
|
|||||||
@ -76,8 +76,6 @@ test_that("Models from previous versions of XGBoost can be loaded", {
|
|||||||
name <- m[3]
|
name <- m[3]
|
||||||
is_rds <- endsWith(model_file, '.rds')
|
is_rds <- endsWith(model_file, '.rds')
|
||||||
is_json <- endsWith(model_file, '.json')
|
is_json <- endsWith(model_file, '.json')
|
||||||
|
|
||||||
cpp_warning <- capture.output({
|
|
||||||
# Expect an R warning when a model is loaded from RDS and it was generated by version < 1.1.x
|
# Expect an R warning when a model is loaded from RDS and it was generated by version < 1.1.x
|
||||||
if (is_rds && compareVersion(model_xgb_ver, '1.1.1.1') < 0) {
|
if (is_rds && compareVersion(model_xgb_ver, '1.1.1.1') < 0) {
|
||||||
booster <- readRDS(model_file)
|
booster <- readRDS(model_file)
|
||||||
@ -94,14 +92,4 @@ test_that("Models from previous versions of XGBoost can be loaded", {
|
|||||||
run_booster_check(booster, name)
|
run_booster_check(booster, name)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
cpp_warning <- paste0(cpp_warning, collapse = ' ')
|
|
||||||
if (is_rds && compareVersion(model_xgb_ver, '1.1.1.1') >= 0) {
|
|
||||||
# Expect a C++ warning when a model is loaded from RDS and it was generated by old XGBoost`
|
|
||||||
m <- grepl(paste0('.*If you are loading a serialized model ',
|
|
||||||
'\\(like pickle in Python, RDS in R\\).*',
|
|
||||||
'for more details about differences between ',
|
|
||||||
'saving model and serializing.*'), cpp_warning, perl = TRUE)
|
|
||||||
expect_true(length(m) > 0 && all(m))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
|
|||||||
21
R-package/tests/testthat/test_unicode.R
Normal file
21
R-package/tests/testthat/test_unicode.R
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
context("Test Unicode handling")
|
||||||
|
|
||||||
|
data(agaricus.train, package = 'xgboost')
|
||||||
|
data(agaricus.test, package = 'xgboost')
|
||||||
|
train <- agaricus.train
|
||||||
|
test <- agaricus.test
|
||||||
|
set.seed(1994)
|
||||||
|
|
||||||
|
test_that("Can save and load models with Unicode paths", {
|
||||||
|
nrounds <- 2
|
||||||
|
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||||
|
eta = 1, nthread = 2, nrounds = nrounds, objective = "binary:logistic",
|
||||||
|
eval_metric = "error")
|
||||||
|
tmpdir <- tempdir()
|
||||||
|
lapply(c("모델.json", "がうる・ぐら.json", "类继承.ubj"), function(x) {
|
||||||
|
path <- file.path(tmpdir, x)
|
||||||
|
xgb.save(bst, path)
|
||||||
|
bst2 <- xgb.load(path)
|
||||||
|
expect_equal(predict(bst, test$data), predict(bst2, test$data))
|
||||||
|
})
|
||||||
|
})
|
||||||
@ -13,7 +13,10 @@ test_that("updating the model works", {
|
|||||||
watchlist <- list(train = dtrain, test = dtest)
|
watchlist <- list(train = dtrain, test = dtest)
|
||||||
|
|
||||||
# no-subsampling
|
# no-subsampling
|
||||||
p1 <- list(objective = "binary:logistic", max_depth = 2, eta = 0.05, nthread = 2)
|
p1 <- list(
|
||||||
|
objective = "binary:logistic", max_depth = 2, eta = 0.05, nthread = 2,
|
||||||
|
updater = "grow_colmaker,prune"
|
||||||
|
)
|
||||||
set.seed(11)
|
set.seed(11)
|
||||||
bst1 <- xgb.train(p1, dtrain, nrounds = 10, watchlist, verbose = 0)
|
bst1 <- xgb.train(p1, dtrain, nrounds = 10, watchlist, verbose = 0)
|
||||||
tr1 <- xgb.model.dt.tree(model = bst1)
|
tr1 <- xgb.model.dt.tree(model = bst1)
|
||||||
|
|||||||
@ -51,24 +51,24 @@ A *categorical* variable has a fixed number of different values. For instance, i
|
|||||||
>
|
>
|
||||||
> Type `?factor` in the console for more information.
|
> Type `?factor` in the console for more information.
|
||||||
|
|
||||||
To answer the question above we will convert *categorical* variables to `numeric` one.
|
To answer the question above we will convert *categorical* variables to `numeric` ones.
|
||||||
|
|
||||||
### Conversion from categorical to numeric variables
|
### Conversion from categorical to numeric variables
|
||||||
|
|
||||||
#### Looking at the raw data
|
#### Looking at the raw data
|
||||||
|
|
||||||
In this Vignette we will see how to transform a *dense* `data.frame` (*dense* = few zeroes in the matrix) with *categorical* variables to a very *sparse* matrix (*sparse* = lots of zero in the matrix) of `numeric` features.
|
+In this Vignette we will see how to transform a *dense* `data.frame` (*dense* = the majority of the matrix is non-zero) with *categorical* variables to a very *sparse* matrix (*sparse* = lots of zero entries in the matrix) of `numeric` features.
|
||||||
|
|
||||||
The method we are going to see is usually called [one-hot encoding](https://en.wikipedia.org/wiki/One-hot).
|
The method we are going to see is usually called [one-hot encoding](https://en.wikipedia.org/wiki/One-hot).
|
||||||
|
|
||||||
The first step is to load `Arthritis` dataset in memory and wrap it with `data.table` package.
|
The first step is to load the `Arthritis` dataset in memory and wrap it with the `data.table` package.
|
||||||
|
|
||||||
```{r, results='hide'}
|
```{r, results='hide'}
|
||||||
data(Arthritis)
|
data(Arthritis)
|
||||||
df <- data.table(Arthritis, keep.rownames = FALSE)
|
df <- data.table(Arthritis, keep.rownames = FALSE)
|
||||||
```
|
```
|
||||||
|
|
||||||
> `data.table` is 100% compliant with **R** `data.frame` but its syntax is more consistent and its performance for large dataset is [best in class](https://stackoverflow.com/questions/21435339/data-table-vs-dplyr-can-one-do-something-well-the-other-cant-or-does-poorly) (`dplyr` from **R** and `Pandas` from **Python** [included](https://github.com/Rdatatable/data.table/wiki/Benchmarks-%3A-Grouping)). Some parts of **XGBoost** **R** package use `data.table`.
|
> `data.table` is 100% compliant with **R** `data.frame` but its syntax is more consistent and its performance for large dataset is [best in class](https://stackoverflow.com/questions/21435339/data-table-vs-dplyr-can-one-do-something-well-the-other-cant-or-does-poorly) (`dplyr` from **R** and `Pandas` from **Python** [included](https://github.com/Rdatatable/data.table/wiki/Benchmarks-%3A-Grouping)). Some parts of **XGBoost's** **R** package use `data.table`.
|
||||||
|
|
||||||
The first thing we want to do is to have a look to the first few lines of the `data.table`:
|
The first thing we want to do is to have a look to the first few lines of the `data.table`:
|
||||||
|
|
||||||
@ -95,19 +95,19 @@ We will add some new *categorical* features to see if it helps.
|
|||||||
|
|
||||||
##### Grouping per 10 years
|
##### Grouping per 10 years
|
||||||
|
|
||||||
For the first feature we create groups of age by rounding the real age.
|
For the first features we create groups of age by rounding the real age.
|
||||||
|
|
||||||
Note that we transform it to `factor` so the algorithm treat these age groups as independent values.
|
Note that we transform it to `factor` so the algorithm treats these age groups as independent values.
|
||||||
|
|
||||||
Therefore, 20 is not closer to 30 than 60. To make it short, the distance between ages is lost in this transformation.
|
Therefore, 20 is not closer to 30 than 60. In other words, the distance between ages is lost in this transformation.
|
||||||
|
|
||||||
```{r}
|
```{r}
|
||||||
head(df[, AgeDiscret := as.factor(round(Age / 10, 0))])
|
head(df[, AgeDiscret := as.factor(round(Age / 10, 0))])
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Random split into two groups
|
##### Randomly split into two groups
|
||||||
|
|
||||||
Following is an even stronger simplification of the real age with an arbitrary split at 30 years old. We choose this value **based on nothing**. We will see later if simplifying the information based on arbitrary values is a good strategy (you may already have an idea of how well it will work...).
|
The following is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value **based on nothing**. We will see later if simplifying the information based on arbitrary values is a good strategy (you may already have an idea of how well it will work...).
|
||||||
|
|
||||||
```{r}
|
```{r}
|
||||||
head(df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))])
|
head(df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))])
|
||||||
@ -119,7 +119,7 @@ These new features are highly correlated to the `Age` feature because they are s
|
|||||||
|
|
||||||
For many machine learning algorithms, using correlated features is not a good idea. It may sometimes make prediction less accurate, and most of the time make interpretation of the model almost impossible. GLM, for instance, assumes that the features are uncorrelated.
|
For many machine learning algorithms, using correlated features is not a good idea. It may sometimes make prediction less accurate, and most of the time make interpretation of the model almost impossible. GLM, for instance, assumes that the features are uncorrelated.
|
||||||
|
|
||||||
Fortunately, decision tree algorithms (including boosted trees) are very robust to these features. Therefore we have nothing to do to manage this situation.
|
Fortunately, decision tree algorithms (including boosted trees) are very robust to these features. Therefore we don't have to do anything to manage this situation.
|
||||||
|
|
||||||
##### Cleaning data
|
##### Cleaning data
|
||||||
|
|
||||||
@ -144,7 +144,7 @@ We will use the [dummy contrast coding](https://stats.oarc.ucla.edu/r/library/r-
|
|||||||
|
|
||||||
The purpose is to transform each value of each *categorical* feature into a *binary* feature `{0, 1}`.
|
The purpose is to transform each value of each *categorical* feature into a *binary* feature `{0, 1}`.
|
||||||
|
|
||||||
For example, the column `Treatment` will be replaced by two columns, `TreatmentPlacebo`, and `TreatmentTreated`. Each of them will be *binary*. Therefore, an observation which has the value `Placebo` in column `Treatment` before the transformation will have after the transformation the value `1` in the new column `TreatmentPlacebo` and the value `0` in the new column `TreatmentTreated`. The column `TreatmentPlacebo` will disappear during the contrast encoding, as it would be absorbed into a common constant intercept column.
|
For example, the column `Treatment` will be replaced by two columns, `TreatmentPlacebo`, and `TreatmentTreated`. Each of them will be *binary*. Therefore, an observation which has the value `Placebo` in column `Treatment` before the transformation will have the value `1` in the new column `TreatmentPlacebo` and the value `0` in the new column `TreatmentTreated` after the transformation. The column `TreatmentPlacebo` will disappear during the contrast encoding, as it would be absorbed into a common constant intercept column.
|
||||||
|
|
||||||
Column `Improved` is excluded because it will be our `label` column, the one we want to predict.
|
Column `Improved` is excluded because it will be our `label` column, the one we want to predict.
|
||||||
|
|
||||||
@ -176,13 +176,9 @@ bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 4,
|
|||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
You can see some `train-error: 0.XXXXX` lines followed by a number. It decreases. Each line shows how well the model explains your data. Lower is better.
|
You can see some `train-logloss: 0.XXXXX` lines followed by a number. It decreases. Each line shows how well the model explains the data. Lower is better.
|
||||||
|
|
||||||
A small value for training error may be a symptom of [overfitting](https://en.wikipedia.org/wiki/Overfitting), meaning the model will not accurately predict the future values.
|
A small value for training error may be a symptom of [overfitting](https://en.wikipedia.org/wiki/Overfitting), meaning the model will not accurately predict unseen values.
|
||||||
|
|
||||||
> Here you can see the numbers decrease until line 7 and then increase.
|
|
||||||
>
|
|
||||||
> It probably means we are overfitting. To fix that I should reduce the number of rounds to `nrounds = 4`. I will let things like that because I don't really care for the purpose of this example :-)
|
|
||||||
|
|
||||||
Feature importance
|
Feature importance
|
||||||
------------------
|
------------------
|
||||||
@ -199,64 +195,35 @@ importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bs
|
|||||||
head(importance)
|
head(importance)
|
||||||
```
|
```
|
||||||
|
|
||||||
> The column `Gain` provide the information we are looking for.
|
> The column `Gain` provides the information we are looking for.
|
||||||
>
|
>
|
||||||
> As you can see, features are classified by `Gain`.
|
> As you can see, features are classified by `Gain`.
|
||||||
|
|
||||||
`Gain` is the improvement in accuracy brought by a feature to the branches it is on. The idea is that before adding a new split on a feature X to the branch there was some wrongly classified elements, after adding the split on this feature, there are two new branches, and each of these branch is more accurate (one branch saying if your observation is on this branch then it should be classified as `1`, and the other branch saying the exact opposite).
|
`Gain` is the improvement in accuracy brought by a feature to the branches it is on. The idea is that before adding a new split on a feature X to the branch there were some wrongly classified elements; after adding the split on this feature, there are two new branches, and each of these branches is more accurate (one branch saying if your observation is on this branch then it should be classified as `1`, and the other branch saying the exact opposite).
|
||||||
|
|
||||||
`Cover` measures the relative quantity of observations concerned by a feature.
|
`Cover` is related to the second order derivative (or Hessian) of the loss function with respect to a particular variable; thus, a large value indicates a variable has a large potential impact on the loss function and so is important.
|
||||||
|
|
||||||
`Frequency` is a simpler way to measure the `Gain`. It just counts the number of times a feature is used in all generated trees. You should not use it (unless you know why you want to use it).
|
`Frequency` is a simpler way to measure the `Gain`. It just counts the number of times a feature is used in all generated trees. You should not use it (unless you know why you want to use it).
|
||||||
|
|
||||||
#### Improvement in the interpretability of feature importance data.table
|
|
||||||
|
|
||||||
We can go deeper in the analysis of the model. In the `data.table` above, we have discovered which features counts to predict if the illness will go or not. But we don't yet know the role of these features. For instance, one of the question we may want to answer would be: does receiving a placebo treatment helps to recover from the illness?
|
|
||||||
|
|
||||||
One simple solution is to count the co-occurrences of a feature and a class of the classification.
|
|
||||||
|
|
||||||
For that purpose we will execute the same function as above but using two more parameters, `data` and `label`.
|
|
||||||
|
|
||||||
```{r}
|
|
||||||
importanceRaw <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst, data = sparse_matrix, label = output_vector)
|
|
||||||
|
|
||||||
# Cleaning for better display
|
|
||||||
importanceClean <- importanceRaw[, `:=`(Cover = NULL, Frequency = NULL)]
|
|
||||||
|
|
||||||
head(importanceClean)
|
|
||||||
```
|
|
||||||
|
|
||||||
> In the table above we have removed two not needed columns and select only the first lines.
|
|
||||||
|
|
||||||
First thing you notice is the new column `Split`. It is the split applied to the feature on a branch of one of the tree. Each split is present, therefore a feature can appear several times in this table. Here we can see the feature `Age` is used several times with different splits.
|
|
||||||
|
|
||||||
How the split is applied to count the co-occurrences? It is always `<`. For instance, in the second line, we measure the number of persons under 61.5 years with the illness gone after the treatment.
|
|
||||||
|
|
||||||
The two other new columns are `RealCover` and `RealCover %`. In the first column it measures the number of observations in the dataset where the split is respected and the label marked as `1`. The second column is the percentage of the whole population that `RealCover` represents.
|
|
||||||
|
|
||||||
Therefore, according to our findings, getting a placebo doesn't seem to help but being younger than 61 years may help (seems logic).
|
|
||||||
|
|
||||||
> You may wonder how to interpret the `< 1.00001` on the first line. Basically, in a sparse `Matrix`, there is no `0`, therefore, looking for one hot-encoded categorical observations validating the rule `< 1.00001` is like just looking for `1` for this feature.
|
|
||||||
|
|
||||||
### Plotting the feature importance
|
### Plotting the feature importance
|
||||||
|
|
||||||
|
|
||||||
All these things are nice, but it would be even better to plot the results.
|
All these things are nice, but it would be even better to plot the results.
|
||||||
|
|
||||||
```{r, fig.width=8, fig.height=5, fig.align='center'}
|
```{r, fig.width=8, fig.height=5, fig.align='center'}
|
||||||
xgb.plot.importance(importance_matrix = importance)
|
xgb.plot.importance(importance_matrix = importance)
|
||||||
```
|
```
|
||||||
|
|
||||||
Feature have automatically been divided in 2 clusters: the interesting features... and the others.
|
Running this line of code, you should get a bar chart showing the importance of the 6 features (containing the same data as the output we saw earlier, but displaying it visually for easier consumption). Note that `xgb.ggplot.importance` is also available for all the ggplot2 fans!
|
||||||
|
|
||||||
> Depending of the dataset and the learning parameters you may have more than two clusters. Default value is to limit them to `10`, but you can increase this limit. Look at the function documentation for more information.
|
> Depending of the dataset and the learning parameters you may have more than two clusters. Default value is to limit them to `10`, but you can increase this limit. Look at the function documentation for more information.
|
||||||
|
|
||||||
According to the plot above, the most important features in this dataset to predict if the treatment will work are :
|
According to the plot above, the most important features in this dataset to predict if the treatment will work are :
|
||||||
|
|
||||||
* the Age ;
|
* An individual's age;
|
||||||
* having received a placebo or not ;
|
* Having received a placebo or not;
|
||||||
* the sex is third but already included in the not interesting features group ;
|
* Gender;
|
||||||
* then we see our generated features (AgeDiscret). We can see that their contribution is very low.
|
* Our generated feature AgeDiscret. We can see that its contribution is very low.
|
||||||
|
|
||||||
|
|
||||||
### Do these results make sense?
|
### Do these results make sense?
|
||||||
|
|
||||||
@ -270,53 +237,53 @@ c2 <- chisq.test(df$Age, output_vector)
|
|||||||
print(c2)
|
print(c2)
|
||||||
```
|
```
|
||||||
|
|
||||||
Pearson correlation between Age and illness disappearing is **`r round(c2$statistic, 2 )`**.
|
The Pearson correlation between Age and illness disappearing is **`r round(c2$statistic, 2 )`**.
|
||||||
|
|
||||||
```{r, warning=FALSE, message=FALSE}
|
```{r, warning=FALSE, message=FALSE}
|
||||||
c2 <- chisq.test(df$AgeDiscret, output_vector)
|
c2 <- chisq.test(df$AgeDiscret, output_vector)
|
||||||
print(c2)
|
print(c2)
|
||||||
```
|
```
|
||||||
|
|
||||||
Our first simplification of Age gives a Pearson correlation is **`r round(c2$statistic, 2)`**.
|
Our first simplification of Age gives a Pearson correlation of **`r round(c2$statistic, 2)`**.
|
||||||
|
|
||||||
```{r, warning=FALSE, message=FALSE}
|
```{r, warning=FALSE, message=FALSE}
|
||||||
c2 <- chisq.test(df$AgeCat, output_vector)
|
c2 <- chisq.test(df$AgeCat, output_vector)
|
||||||
print(c2)
|
print(c2)
|
||||||
```
|
```
|
||||||
|
|
||||||
The perfectly random split I did between young and old at 30 years old have a low correlation of **`r round(c2$statistic, 2)`**. It's a result we may expect as may be in my mind > 30 years is being old (I am 32 and starting feeling old, this may explain that), but for the illness we are studying, the age to be vulnerable is not the same.
|
The perfectly random split we did between young and old at 30 years old has a low correlation of **2.36**. This suggests that, for the particular illness we are studying, the age at which someone is vulnerable to this disease is likely very different from 30.
|
||||||
|
|
||||||
Morality: don't let your *gut* lower the quality of your model.
|
Moral of the story: don't let your *gut* lower the quality of your model.
|
||||||
|
|
||||||
In *data science* expression, there is the word *science* :-)
|
In *data science*, there is the word *science* :-)
|
||||||
|
|
||||||
Conclusion
|
Conclusion
|
||||||
----------
|
----------
|
||||||
|
|
||||||
As you can see, in general *destroying information by simplifying it won't improve your model*. **Chi2** just demonstrates that.
|
As you can see, in general *destroying information by simplifying it won't improve your model*. **Chi2** just demonstrates that.
|
||||||
|
|
||||||
But in more complex cases, creating a new feature based on existing one which makes link with the outcome more obvious may help the algorithm and improve the model.
|
But in more complex cases, creating a new feature from an existing one may help the algorithm and improve the model.
|
||||||
|
|
||||||
The case studied here is not enough complex to show that. Check [Kaggle website](http://www.kaggle.com/) for some challenging datasets. However it's almost always worse when you add some arbitrary rules.
|
+The case studied here is not complex enough to show that. Check [Kaggle website](https://www.kaggle.com/) for some challenging datasets.
|
||||||
|
|
||||||
Moreover, you can notice that even if we have added some not useful new features highly correlated with other features, the boosting tree algorithm have been able to choose the best one, which in this case is the Age.
|
Moreover, you can see that even if we have added some new features which are not very useful/highly correlated with other features, the boosting tree algorithm was still able to choose the best one (which in this case is the Age).
|
||||||
|
|
||||||
Linear model may not be that smart in this scenario.
|
Linear models may not perform as well.
|
||||||
|
|
||||||
Special Note: What about Random Forests™?
|
Special Note: What about Random Forests™?
|
||||||
-----------------------------------------
|
-----------------------------------------
|
||||||
|
|
||||||
As you may know, [Random Forests](https://en.wikipedia.org/wiki/Random_forest) algorithm is cousin with boosting and both are part of the [ensemble learning](https://en.wikipedia.org/wiki/Ensemble_learning) family.
|
As you may know, the [Random Forests](https://en.wikipedia.org/wiki/Random_forest) algorithm is cousin with boosting and both are part of the [ensemble learning](https://en.wikipedia.org/wiki/Ensemble_learning) family.
|
||||||
|
|
||||||
Both trains several decision trees for one dataset. The *main* difference is that in Random Forests, trees are independent and in boosting, the tree `N+1` focus its learning on the loss (<=> what has not been well modeled by the tree `N`).
|
Both train several decision trees for one dataset. The *main* difference is that in Random Forests, trees are independent and in boosting, the `N+1`-st tree focuses its learning on the loss (<=> what has not been well modeled by the tree `N`).
|
||||||
|
|
||||||
This difference have an impact on a corner case in feature importance analysis: the *correlated features*.
|
This difference can have an impact on a edge case in feature importance analysis: *correlated features*.
|
||||||
|
|
||||||
Imagine two features perfectly correlated, feature `A` and feature `B`. For one specific tree, if the algorithm needs one of them, it will choose randomly (true in both boosting and Random Forests).
|
Imagine two features perfectly correlated, feature `A` and feature `B`. For one specific tree, if the algorithm needs one of them, it will choose randomly (true in both boosting and Random Forests).
|
||||||
|
|
||||||
However, in Random Forests this random choice will be done for each tree, because each tree is independent from the others. Therefore, approximatively, depending of your parameters, 50% of the trees will choose feature `A` and the other 50% will choose feature `B`. So the *importance* of the information contained in `A` and `B` (which is the same, because they are perfectly correlated) is diluted in `A` and `B`. So you won't easily know this information is important to predict what you want to predict! It is even worse when you have 10 correlated features...
|
However, in Random Forests this random choice will be done for each tree, because each tree is independent from the others. Therefore, approximately (and depending on your parameters) 50% of the trees will choose feature `A` and the other 50% will choose feature `B`. So the *importance* of the information contained in `A` and `B` (which is the same, because they are perfectly correlated) is diluted in `A` and `B`. So you won't easily know this information is important to predict what you want to predict! It is even worse when you have 10 correlated features...
|
||||||
|
|
||||||
In boosting, when a specific link between feature and outcome have been learned by the algorithm, it will try to not refocus on it (in theory it is what happens, reality is not always that simple). Therefore, all the importance will be on feature `A` or on feature `B` (but not both). You will know that one feature have an important role in the link between the observations and the label. It is still up to you to search for the correlated features to the one detected as important if you need to know all of them.
|
In boosting, when a specific link between feature and outcome have been learned by the algorithm, it will try to not refocus on it (in theory it is what happens, reality is not always that simple). Therefore, all the importance will be on feature `A` or on feature `B` (but not both). You will know that one feature has an important role in the link between the observations and the label. It is still up to you to search for the correlated features to the one detected as important if you need to know all of them.
|
||||||
|
|
||||||
If you want to try Random Forests algorithm, you can tweak XGBoost parameters!
|
If you want to try Random Forests algorithm, you can tweak XGBoost parameters!
|
||||||
|
|
||||||
|
|||||||
@ -18,13 +18,11 @@
|
|||||||
publisher={Institute of Mathematical Statistics}
|
publisher={Institute of Mathematical Statistics}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@misc{
|
@misc{
|
||||||
Bache+Lichman:2013 ,
|
Bache+Lichman:2013 ,
|
||||||
author = "K. Bache and M. Lichman",
|
author = "K. Bache and M. Lichman",
|
||||||
year = "2013",
|
year = "2013",
|
||||||
title = "{UCI} Machine Learning Repository",
|
title = "{UCI} Machine Learning Repository",
|
||||||
url = "http://archive.ics.uci.edu/ml/",
|
url = "https://archive.ics.uci.edu/",
|
||||||
institution = "University of California, Irvine, School of Information and Computer Sciences"
|
institution = "University of California, Irvine, School of Information and Computer Sciences"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -48,7 +48,6 @@ Become a sponsor and get a logo here. See details at [Sponsoring the XGBoost Pro
|
|||||||
|
|
||||||
<a href="https://www.nvidia.com/en-us/" target="_blank"><img src="https://raw.githubusercontent.com/xgboost-ai/xgboost-ai.github.io/master/images/sponsors/nvidia.jpg" alt="NVIDIA" width="72" height="72"></a>
|
<a href="https://www.nvidia.com/en-us/" target="_blank"><img src="https://raw.githubusercontent.com/xgboost-ai/xgboost-ai.github.io/master/images/sponsors/nvidia.jpg" alt="NVIDIA" width="72" height="72"></a>
|
||||||
<a href="https://www.intel.com/" target="_blank"><img src="https://images.opencollective.com/intel-corporation/2fa85c1/logo/256.png" width="72" height="72"></a>
|
<a href="https://www.intel.com/" target="_blank"><img src="https://images.opencollective.com/intel-corporation/2fa85c1/logo/256.png" width="72" height="72"></a>
|
||||||
<a href="https://getkoffie.com/?utm_source=opencollective&utm_medium=github&utm_campaign=xgboost" target="_blank"><img src="https://images.opencollective.com/koffielabs/f391ab8/logo/256.png" width="72" height="72"></a>
|
|
||||||
|
|
||||||
### Backers
|
### Backers
|
||||||
[[Become a backer](https://opencollective.com/xgboost#backer)]
|
[[Become a backer](https://opencollective.com/xgboost#backer)]
|
||||||
|
|||||||
@ -90,8 +90,8 @@ function(format_gencode_flags flags out)
|
|||||||
endif()
|
endif()
|
||||||
# Set up architecture flags
|
# Set up architecture flags
|
||||||
if(NOT flags)
|
if(NOT flags)
|
||||||
if (CUDA_VERSION VERSION_GREATER_EQUAL "11.1")
|
if (CUDA_VERSION VERSION_GREATER_EQUAL "11.8")
|
||||||
set(flags "50;60;70;80")
|
set(flags "50;60;70;80;90")
|
||||||
elseif (CUDA_VERSION VERSION_GREATER_EQUAL "11.0")
|
elseif (CUDA_VERSION VERSION_GREATER_EQUAL "11.0")
|
||||||
set(flags "50;60;70;80")
|
set(flags "50;60;70;80")
|
||||||
elseif(CUDA_VERSION VERSION_GREATER_EQUAL "10.0")
|
elseif(CUDA_VERSION VERSION_GREATER_EQUAL "10.0")
|
||||||
@ -133,6 +133,11 @@ function(xgboost_set_cuda_flags target)
|
|||||||
$<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler=${OpenMP_CXX_FLAGS}>
|
$<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler=${OpenMP_CXX_FLAGS}>
|
||||||
$<$<COMPILE_LANGUAGE:CUDA>:-Xfatbin=-compress-all>)
|
$<$<COMPILE_LANGUAGE:CUDA>:-Xfatbin=-compress-all>)
|
||||||
|
|
||||||
|
if (USE_PER_THREAD_DEFAULT_STREAM)
|
||||||
|
target_compile_options(${target} PRIVATE
|
||||||
|
$<$<COMPILE_LANGUAGE:CUDA>:--default-stream per-thread>)
|
||||||
|
endif (USE_PER_THREAD_DEFAULT_STREAM)
|
||||||
|
|
||||||
if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.18")
|
if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.18")
|
||||||
set_property(TARGET ${target} PROPERTY CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES})
|
set_property(TARGET ${target} PROPERTY CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES})
|
||||||
endif (CMAKE_VERSION VERSION_GREATER_EQUAL "3.18")
|
endif (CMAKE_VERSION VERSION_GREATER_EQUAL "3.18")
|
||||||
@ -172,7 +177,8 @@ function(xgboost_set_cuda_flags target)
|
|||||||
set_target_properties(${target} PROPERTIES
|
set_target_properties(${target} PROPERTIES
|
||||||
CUDA_STANDARD 17
|
CUDA_STANDARD 17
|
||||||
CUDA_STANDARD_REQUIRED ON
|
CUDA_STANDARD_REQUIRED ON
|
||||||
CUDA_SEPARABLE_COMPILATION OFF)
|
CUDA_SEPARABLE_COMPILATION OFF
|
||||||
|
CUDA_RUNTIME_LIBRARY Static)
|
||||||
endfunction(xgboost_set_cuda_flags)
|
endfunction(xgboost_set_cuda_flags)
|
||||||
|
|
||||||
# Set HIP related flags to target.
|
# Set HIP related flags to target.
|
||||||
@ -295,6 +301,7 @@ macro(xgboost_target_link_libraries target)
|
|||||||
|
|
||||||
if (USE_CUDA)
|
if (USE_CUDA)
|
||||||
xgboost_set_cuda_flags(${target})
|
xgboost_set_cuda_flags(${target})
|
||||||
|
target_link_libraries(${target} PUBLIC CUDA::cudart_static)
|
||||||
endif (USE_CUDA)
|
endif (USE_CUDA)
|
||||||
|
|
||||||
if (USE_HIP)
|
if (USE_HIP)
|
||||||
@ -313,10 +320,6 @@ macro(xgboost_target_link_libraries target)
|
|||||||
target_link_libraries(${target} PRIVATE CUDA::nvToolsExt)
|
target_link_libraries(${target} PRIVATE CUDA::nvToolsExt)
|
||||||
endif (USE_NVTX)
|
endif (USE_NVTX)
|
||||||
|
|
||||||
if (RABIT_BUILD_MPI)
|
|
||||||
target_link_libraries(${target} PRIVATE MPI::MPI_CXX)
|
|
||||||
endif (RABIT_BUILD_MPI)
|
|
||||||
|
|
||||||
if (MINGW)
|
if (MINGW)
|
||||||
target_link_libraries(${target} PRIVATE wsock32 ws2_32)
|
target_link_libraries(${target} PRIVATE wsock32 ws2_32)
|
||||||
endif (MINGW)
|
endif (MINGW)
|
||||||
|
|||||||
@ -52,11 +52,11 @@ endif (BUILD_WITH_SHARED_NCCL)
|
|||||||
|
|
||||||
find_path(NCCL_INCLUDE_DIR
|
find_path(NCCL_INCLUDE_DIR
|
||||||
NAMES nccl.h
|
NAMES nccl.h
|
||||||
PATHS $ENV{NCCL_ROOT}/include ${NCCL_ROOT}/include)
|
HINTS ${NCCL_ROOT}/include $ENV{NCCL_ROOT}/include)
|
||||||
|
|
||||||
find_library(NCCL_LIBRARY
|
find_library(NCCL_LIBRARY
|
||||||
NAMES ${NCCL_LIB_NAME}
|
NAMES ${NCCL_LIB_NAME}
|
||||||
PATHS $ENV{NCCL_ROOT}/lib/ ${NCCL_ROOT}/lib)
|
HINTS ${NCCL_ROOT}/lib $ENV{NCCL_ROOT}/lib/)
|
||||||
|
|
||||||
message(STATUS "Using nccl library: ${NCCL_LIBRARY}")
|
message(STATUS "Using nccl library: ${NCCL_LIBRARY}")
|
||||||
|
|
||||||
|
|||||||
4
demo/CLI/README.rst
Normal file
4
demo/CLI/README.rst
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
XGBoost Command Line Interface Walkthrough
|
||||||
|
==========================================
|
||||||
|
|
||||||
|
Please note that the command line interface is deprecated in 2.1.0, use other language bindings instead. For a list of available bindings, see https://xgboost.readthedocs.io/en/stable/
|
||||||
@ -106,7 +106,7 @@ Please send pull requests if you find ones that are missing here.
|
|||||||
- Prarthana Bhat, 2nd place winner in [DYD Competition](https://datahack.analyticsvidhya.com/contest/date-your-data/). Link to [Solution](https://github.com/analyticsvidhya/DateYourData/blob/master/Prathna_Bhat_Model.R).
|
- Prarthana Bhat, 2nd place winner in [DYD Competition](https://datahack.analyticsvidhya.com/contest/date-your-data/). Link to [Solution](https://github.com/analyticsvidhya/DateYourData/blob/master/Prathna_Bhat_Model.R).
|
||||||
|
|
||||||
## Talks
|
## Talks
|
||||||
- [XGBoost: A Scalable Tree Boosting System](http://datascience.la/xgboost-workshop-and-meetup-talk-with-tianqi-chen/) (video+slides) by Tianqi Chen at the Los Angeles Data Science meetup
|
- XGBoost: A Scalable Tree Boosting System ([video] (https://www.youtube.com/watch?v=Vly8xGnNiWs) + [slides](https://speakerdeck.com/datasciencela/tianqi-chen-xgboost-overview-and-latest-news-la-meetup-talk)) by Tianqi Chen at the Los Angeles Data Science meetup
|
||||||
|
|
||||||
## Tutorials
|
## Tutorials
|
||||||
|
|
||||||
|
|||||||
@ -11,33 +11,43 @@ import numpy as np
|
|||||||
|
|
||||||
import xgboost as xgb
|
import xgboost as xgb
|
||||||
|
|
||||||
plt.rcParams.update({'font.size': 13})
|
plt.rcParams.update({"font.size": 13})
|
||||||
|
|
||||||
|
|
||||||
# Function to visualize censored labels
|
# Function to visualize censored labels
|
||||||
def plot_censored_labels(X, y_lower, y_upper):
|
def plot_censored_labels(
|
||||||
def replace_inf(x, target_value):
|
X: np.ndarray, y_lower: np.ndarray, y_upper: np.ndarray
|
||||||
|
) -> None:
|
||||||
|
def replace_inf(x: np.ndarray, target_value: float) -> np.ndarray:
|
||||||
x[np.isinf(x)] = target_value
|
x[np.isinf(x)] = target_value
|
||||||
return x
|
return x
|
||||||
plt.plot(X, y_lower, 'o', label='y_lower', color='blue')
|
|
||||||
plt.plot(X, y_upper, 'o', label='y_upper', color='fuchsia')
|
plt.plot(X, y_lower, "o", label="y_lower", color="blue")
|
||||||
plt.vlines(X, ymin=replace_inf(y_lower, 0.01), ymax=replace_inf(y_upper, 1000),
|
plt.plot(X, y_upper, "o", label="y_upper", color="fuchsia")
|
||||||
label='Range for y', color='gray')
|
plt.vlines(
|
||||||
|
X,
|
||||||
|
ymin=replace_inf(y_lower, 0.01),
|
||||||
|
ymax=replace_inf(y_upper, 1000.0),
|
||||||
|
label="Range for y",
|
||||||
|
color="gray",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# Toy data
|
# Toy data
|
||||||
X = np.array([1, 2, 3, 4, 5]).reshape((-1, 1))
|
X = np.array([1, 2, 3, 4, 5]).reshape((-1, 1))
|
||||||
INF = np.inf
|
INF = np.inf
|
||||||
y_lower = np.array([ 10, 15, -INF, 30, 100])
|
y_lower = np.array([10, 15, -INF, 30, 100])
|
||||||
y_upper = np.array([INF, INF, 20, 50, INF])
|
y_upper = np.array([INF, INF, 20, 50, INF])
|
||||||
|
|
||||||
# Visualize toy data
|
# Visualize toy data
|
||||||
plt.figure(figsize=(5, 4))
|
plt.figure(figsize=(5, 4))
|
||||||
plot_censored_labels(X, y_lower, y_upper)
|
plot_censored_labels(X, y_lower, y_upper)
|
||||||
plt.ylim((6, 200))
|
plt.ylim((6, 200))
|
||||||
plt.legend(loc='lower right')
|
plt.legend(loc="lower right")
|
||||||
plt.title('Toy data')
|
plt.title("Toy data")
|
||||||
plt.xlabel('Input feature')
|
plt.xlabel("Input feature")
|
||||||
plt.ylabel('Label')
|
plt.ylabel("Label")
|
||||||
plt.yscale('log')
|
plt.yscale("log")
|
||||||
plt.tight_layout()
|
plt.tight_layout()
|
||||||
plt.show(block=True)
|
plt.show(block=True)
|
||||||
|
|
||||||
@ -46,54 +56,83 @@ grid_pts = np.linspace(0.8, 5.2, 1000).reshape((-1, 1))
|
|||||||
|
|
||||||
# Train AFT model using XGBoost
|
# Train AFT model using XGBoost
|
||||||
dmat = xgb.DMatrix(X)
|
dmat = xgb.DMatrix(X)
|
||||||
dmat.set_float_info('label_lower_bound', y_lower)
|
dmat.set_float_info("label_lower_bound", y_lower)
|
||||||
dmat.set_float_info('label_upper_bound', y_upper)
|
dmat.set_float_info("label_upper_bound", y_upper)
|
||||||
params = {'max_depth': 3, 'objective':'survival:aft', 'min_child_weight': 0}
|
params = {"max_depth": 3, "objective": "survival:aft", "min_child_weight": 0}
|
||||||
|
|
||||||
accuracy_history = []
|
accuracy_history = []
|
||||||
def plot_intermediate_model_callback(env):
|
|
||||||
"""Custom callback to plot intermediate models"""
|
|
||||||
# Compute y_pred = prediction using the intermediate model, at current boosting iteration
|
class PlotIntermediateModel(xgb.callback.TrainingCallback):
|
||||||
y_pred = env.model.predict(dmat)
|
"""Custom callback to plot intermediate models."""
|
||||||
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper) includes
|
|
||||||
# the corresponding predicted label (y_pred)
|
def __init__(self) -> None:
|
||||||
acc = np.sum(np.logical_and(y_pred >= y_lower, y_pred <= y_upper)/len(X) * 100)
|
super().__init__()
|
||||||
|
|
||||||
|
def after_iteration(
|
||||||
|
self,
|
||||||
|
model: xgb.Booster,
|
||||||
|
epoch: int,
|
||||||
|
evals_log: xgb.callback.TrainingCallback.EvalsLog,
|
||||||
|
) -> bool:
|
||||||
|
"""Run after training is finished."""
|
||||||
|
# Compute y_pred = prediction using the intermediate model, at current boosting
|
||||||
|
# iteration
|
||||||
|
y_pred = model.predict(dmat)
|
||||||
|
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper)
|
||||||
|
# includes the corresponding predicted label (y_pred)
|
||||||
|
acc = np.sum(
|
||||||
|
np.logical_and(y_pred >= y_lower, y_pred <= y_upper) / len(X) * 100
|
||||||
|
)
|
||||||
accuracy_history.append(acc)
|
accuracy_history.append(acc)
|
||||||
|
|
||||||
# Plot ranged labels as well as predictions by the model
|
# Plot ranged labels as well as predictions by the model
|
||||||
plt.subplot(5, 3, env.iteration + 1)
|
plt.subplot(5, 3, epoch + 1)
|
||||||
plot_censored_labels(X, y_lower, y_upper)
|
plot_censored_labels(X, y_lower, y_upper)
|
||||||
y_pred_grid_pts = env.model.predict(xgb.DMatrix(grid_pts))
|
y_pred_grid_pts = model.predict(xgb.DMatrix(grid_pts))
|
||||||
plt.plot(grid_pts, y_pred_grid_pts, 'r-', label='XGBoost AFT model', linewidth=4)
|
plt.plot(
|
||||||
plt.title('Iteration {}'.format(env.iteration), x=0.5, y=0.8)
|
grid_pts, y_pred_grid_pts, "r-", label="XGBoost AFT model", linewidth=4
|
||||||
|
)
|
||||||
|
plt.title("Iteration {}".format(epoch), x=0.5, y=0.8)
|
||||||
plt.xlim((0.8, 5.2))
|
plt.xlim((0.8, 5.2))
|
||||||
plt.ylim((1 if np.min(y_pred) < 6 else 6, 200))
|
plt.ylim((1 if np.min(y_pred) < 6 else 6, 200))
|
||||||
plt.yscale('log')
|
plt.yscale("log")
|
||||||
|
return False
|
||||||
|
|
||||||
res = {}
|
|
||||||
plt.figure(figsize=(12,13))
|
res: xgb.callback.TrainingCallback.EvalsLog = {}
|
||||||
bst = xgb.train(params, dmat, 15, [(dmat, 'train')], evals_result=res,
|
plt.figure(figsize=(12, 13))
|
||||||
callbacks=[plot_intermediate_model_callback])
|
bst = xgb.train(
|
||||||
|
params,
|
||||||
|
dmat,
|
||||||
|
15,
|
||||||
|
[(dmat, "train")],
|
||||||
|
evals_result=res,
|
||||||
|
callbacks=[PlotIntermediateModel()],
|
||||||
|
)
|
||||||
plt.tight_layout()
|
plt.tight_layout()
|
||||||
plt.legend(loc='lower center', ncol=4,
|
plt.legend(
|
||||||
|
loc="lower center",
|
||||||
|
ncol=4,
|
||||||
bbox_to_anchor=(0.5, 0),
|
bbox_to_anchor=(0.5, 0),
|
||||||
bbox_transform=plt.gcf().transFigure)
|
bbox_transform=plt.gcf().transFigure,
|
||||||
|
)
|
||||||
plt.tight_layout()
|
plt.tight_layout()
|
||||||
|
|
||||||
# Plot negative log likelihood over boosting iterations
|
# Plot negative log likelihood over boosting iterations
|
||||||
plt.figure(figsize=(8,3))
|
plt.figure(figsize=(8, 3))
|
||||||
plt.subplot(1, 2, 1)
|
plt.subplot(1, 2, 1)
|
||||||
plt.plot(res['train']['aft-nloglik'], 'b-o', label='aft-nloglik')
|
plt.plot(res["train"]["aft-nloglik"], "b-o", label="aft-nloglik")
|
||||||
plt.xlabel('# Boosting Iterations')
|
plt.xlabel("# Boosting Iterations")
|
||||||
plt.legend(loc='best')
|
plt.legend(loc="best")
|
||||||
|
|
||||||
# Plot "accuracy" over boosting iterations
|
# Plot "accuracy" over boosting iterations
|
||||||
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper) includes
|
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper) includes
|
||||||
# the corresponding predicted label (y_pred)
|
# the corresponding predicted label (y_pred)
|
||||||
plt.subplot(1, 2, 2)
|
plt.subplot(1, 2, 2)
|
||||||
plt.plot(accuracy_history, 'r-o', label='Accuracy (%)')
|
plt.plot(accuracy_history, "r-o", label="Accuracy (%)")
|
||||||
plt.xlabel('# Boosting Iterations')
|
plt.xlabel("# Boosting Iterations")
|
||||||
plt.legend(loc='best')
|
plt.legend(loc="best")
|
||||||
plt.tight_layout()
|
plt.tight_layout()
|
||||||
|
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|||||||
@ -53,15 +53,7 @@ int main() {
|
|||||||
// configure the training
|
// configure the training
|
||||||
// available parameters are described here:
|
// available parameters are described here:
|
||||||
// https://xgboost.readthedocs.io/en/latest/parameter.html
|
// https://xgboost.readthedocs.io/en/latest/parameter.html
|
||||||
safe_xgboost(XGBoosterSetParam(booster, "tree_method", use_gpu ? "gpu_hist" : "hist"));
|
safe_xgboost(XGBoosterSetParam(booster, "device", use_gpu ? "cuda" : "cpu"));
|
||||||
if (use_gpu) {
|
|
||||||
// set the GPU to use;
|
|
||||||
// this is not necessary, but provided here as an illustration
|
|
||||||
safe_xgboost(XGBoosterSetParam(booster, "gpu_id", "0"));
|
|
||||||
} else {
|
|
||||||
// avoid evaluating objective and metric on a GPU
|
|
||||||
safe_xgboost(XGBoosterSetParam(booster, "gpu_id", "-1"));
|
|
||||||
}
|
|
||||||
|
|
||||||
safe_xgboost(XGBoosterSetParam(booster, "objective", "binary:logistic"));
|
safe_xgboost(XGBoosterSetParam(booster, "objective", "binary:logistic"));
|
||||||
safe_xgboost(XGBoosterSetParam(booster, "min_child_weight", "1"));
|
safe_xgboost(XGBoosterSetParam(booster, "min_child_weight", "1"));
|
||||||
|
|||||||
@ -18,43 +18,45 @@ def main(client):
|
|||||||
# The Veterans' Administration Lung Cancer Trial
|
# The Veterans' Administration Lung Cancer Trial
|
||||||
# The Statistical Analysis of Failure Time Data by Kalbfleisch J. and Prentice R (1980)
|
# The Statistical Analysis of Failure Time Data by Kalbfleisch J. and Prentice R (1980)
|
||||||
CURRENT_DIR = os.path.dirname(__file__)
|
CURRENT_DIR = os.path.dirname(__file__)
|
||||||
df = dd.read_csv(os.path.join(CURRENT_DIR, os.pardir, 'data', 'veterans_lung_cancer.csv'))
|
df = dd.read_csv(
|
||||||
|
os.path.join(CURRENT_DIR, os.pardir, "data", "veterans_lung_cancer.csv")
|
||||||
|
)
|
||||||
|
|
||||||
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
|
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
|
||||||
# DMatrix scatter around workers.
|
# DMatrix scatter around workers.
|
||||||
# For AFT survival, you'd need to extract the lower and upper bounds for the label
|
# For AFT survival, you'd need to extract the lower and upper bounds for the label
|
||||||
# and pass them as arguments to DaskDMatrix.
|
# and pass them as arguments to DaskDMatrix.
|
||||||
y_lower_bound = df['Survival_label_lower_bound']
|
y_lower_bound = df["Survival_label_lower_bound"]
|
||||||
y_upper_bound = df['Survival_label_upper_bound']
|
y_upper_bound = df["Survival_label_upper_bound"]
|
||||||
X = df.drop(['Survival_label_lower_bound',
|
X = df.drop(["Survival_label_lower_bound", "Survival_label_upper_bound"], axis=1)
|
||||||
'Survival_label_upper_bound'], axis=1)
|
dtrain = DaskDMatrix(
|
||||||
dtrain = DaskDMatrix(client, X, label_lower_bound=y_lower_bound,
|
client, X, label_lower_bound=y_lower_bound, label_upper_bound=y_upper_bound
|
||||||
label_upper_bound=y_upper_bound)
|
)
|
||||||
|
|
||||||
# Use train method from xgboost.dask instead of xgboost. This
|
# Use train method from xgboost.dask instead of xgboost. This
|
||||||
# distributed version of train returns a dictionary containing the
|
# distributed version of train returns a dictionary containing the
|
||||||
# resulting booster and evaluation history obtained from
|
# resulting booster and evaluation history obtained from
|
||||||
# evaluation metrics.
|
# evaluation metrics.
|
||||||
params = {'verbosity': 1,
|
params = {
|
||||||
'objective': 'survival:aft',
|
"verbosity": 1,
|
||||||
'eval_metric': 'aft-nloglik',
|
"objective": "survival:aft",
|
||||||
'learning_rate': 0.05,
|
"eval_metric": "aft-nloglik",
|
||||||
'aft_loss_distribution_scale': 1.20,
|
"learning_rate": 0.05,
|
||||||
'aft_loss_distribution': 'normal',
|
"aft_loss_distribution_scale": 1.20,
|
||||||
'max_depth': 6,
|
"aft_loss_distribution": "normal",
|
||||||
'lambda': 0.01,
|
"max_depth": 6,
|
||||||
'alpha': 0.02}
|
"lambda": 0.01,
|
||||||
output = xgb.dask.train(client,
|
"alpha": 0.02,
|
||||||
params,
|
}
|
||||||
dtrain,
|
output = xgb.dask.train(
|
||||||
num_boost_round=100,
|
client, params, dtrain, num_boost_round=100, evals=[(dtrain, "train")]
|
||||||
evals=[(dtrain, 'train')])
|
)
|
||||||
bst = output['booster']
|
bst = output["booster"]
|
||||||
history = output['history']
|
history = output["history"]
|
||||||
|
|
||||||
# you can pass output directly into `predict` too.
|
# you can pass output directly into `predict` too.
|
||||||
prediction = xgb.dask.predict(client, bst, dtrain)
|
prediction = xgb.dask.predict(client, bst, dtrain)
|
||||||
print('Evaluation history: ', history)
|
print("Evaluation history: ", history)
|
||||||
|
|
||||||
# Uncomment the following line to save the model to the disk
|
# Uncomment the following line to save the model to the disk
|
||||||
# bst.save_model('survival_model.json')
|
# bst.save_model('survival_model.json')
|
||||||
@ -62,7 +64,7 @@ def main(client):
|
|||||||
return prediction
|
return prediction
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
# or use other clusters for scaling
|
# or use other clusters for scaling
|
||||||
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
|
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
|
||||||
with Client(cluster) as client:
|
with Client(cluster) as client:
|
||||||
|
|||||||
@ -15,7 +15,7 @@ def main(client):
|
|||||||
m = 100000
|
m = 100000
|
||||||
n = 100
|
n = 100
|
||||||
X = da.random.random(size=(m, n), chunks=100)
|
X = da.random.random(size=(m, n), chunks=100)
|
||||||
y = da.random.random(size=(m, ), chunks=100)
|
y = da.random.random(size=(m,), chunks=100)
|
||||||
|
|
||||||
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
|
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
|
||||||
# DMatrix scatter around workers.
|
# DMatrix scatter around workers.
|
||||||
@ -25,21 +25,23 @@ def main(client):
|
|||||||
# distributed version of train returns a dictionary containing the
|
# distributed version of train returns a dictionary containing the
|
||||||
# resulting booster and evaluation history obtained from
|
# resulting booster and evaluation history obtained from
|
||||||
# evaluation metrics.
|
# evaluation metrics.
|
||||||
output = xgb.dask.train(client,
|
output = xgb.dask.train(
|
||||||
{'verbosity': 1,
|
client,
|
||||||
'tree_method': 'hist'},
|
{"verbosity": 1, "tree_method": "hist"},
|
||||||
dtrain,
|
dtrain,
|
||||||
num_boost_round=4, evals=[(dtrain, 'train')])
|
num_boost_round=4,
|
||||||
bst = output['booster']
|
evals=[(dtrain, "train")],
|
||||||
history = output['history']
|
)
|
||||||
|
bst = output["booster"]
|
||||||
|
history = output["history"]
|
||||||
|
|
||||||
# you can pass output directly into `predict` too.
|
# you can pass output directly into `predict` too.
|
||||||
prediction = xgb.dask.predict(client, bst, dtrain)
|
prediction = xgb.dask.predict(client, bst, dtrain)
|
||||||
print('Evaluation history:', history)
|
print("Evaluation history:", history)
|
||||||
return prediction
|
return prediction
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
# or use other clusters for scaling
|
# or use other clusters for scaling
|
||||||
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
|
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
|
||||||
with Client(cluster) as client:
|
with Client(cluster) as client:
|
||||||
|
|||||||
@ -13,33 +13,38 @@ from xgboost import dask as dxgb
|
|||||||
from xgboost.dask import DaskDMatrix
|
from xgboost.dask import DaskDMatrix
|
||||||
|
|
||||||
|
|
||||||
def using_dask_matrix(client: Client, X, y):
|
def using_dask_matrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
|
||||||
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
|
# DaskDMatrix acts like normal DMatrix, works as a proxy for local DMatrix scatter
|
||||||
# DMatrix scatter around workers.
|
# around workers.
|
||||||
dtrain = DaskDMatrix(client, X, y)
|
dtrain = DaskDMatrix(client, X, y)
|
||||||
|
|
||||||
# Use train method from xgboost.dask instead of xgboost. This
|
# Use train method from xgboost.dask instead of xgboost. This distributed version
|
||||||
# distributed version of train returns a dictionary containing the
|
# of train returns a dictionary containing the resulting booster and evaluation
|
||||||
# resulting booster and evaluation history obtained from
|
# history obtained from evaluation metrics.
|
||||||
# evaluation metrics.
|
output = xgb.dask.train(
|
||||||
output = xgb.dask.train(client,
|
client,
|
||||||
{'verbosity': 2,
|
{
|
||||||
|
"verbosity": 2,
|
||||||
|
"tree_method": "hist",
|
||||||
# Golden line for GPU training
|
# Golden line for GPU training
|
||||||
'tree_method': 'gpu_hist'},
|
"device": "cuda",
|
||||||
|
},
|
||||||
dtrain,
|
dtrain,
|
||||||
num_boost_round=4, evals=[(dtrain, 'train')])
|
num_boost_round=4,
|
||||||
bst = output['booster']
|
evals=[(dtrain, "train")],
|
||||||
history = output['history']
|
)
|
||||||
|
bst = output["booster"]
|
||||||
|
history = output["history"]
|
||||||
|
|
||||||
# you can pass output directly into `predict` too.
|
# you can pass output directly into `predict` too.
|
||||||
prediction = xgb.dask.predict(client, bst, dtrain)
|
prediction = xgb.dask.predict(client, bst, dtrain)
|
||||||
print('Evaluation history:', history)
|
print("Evaluation history:", history)
|
||||||
return prediction
|
return prediction
|
||||||
|
|
||||||
|
|
||||||
def using_quantile_device_dmatrix(client: Client, X, y):
|
def using_quantile_device_dmatrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
|
||||||
"""`DaskQuantileDMatrix` is a data type specialized for `gpu_hist` and `hist` tree
|
"""`DaskQuantileDMatrix` is a data type specialized for `hist` tree methods for
|
||||||
methods for reducing memory usage.
|
reducing memory usage.
|
||||||
|
|
||||||
.. versionadded:: 1.2.0
|
.. versionadded:: 1.2.0
|
||||||
|
|
||||||
@ -52,26 +57,28 @@ def using_quantile_device_dmatrix(client: Client, X, y):
|
|||||||
# the `ref` argument of `DaskQuantileDMatrix`.
|
# the `ref` argument of `DaskQuantileDMatrix`.
|
||||||
dtrain = dxgb.DaskQuantileDMatrix(client, X, y)
|
dtrain = dxgb.DaskQuantileDMatrix(client, X, y)
|
||||||
output = xgb.dask.train(
|
output = xgb.dask.train(
|
||||||
client, {"verbosity": 2, "tree_method": "gpu_hist"}, dtrain, num_boost_round=4
|
client,
|
||||||
|
{"verbosity": 2, "tree_method": "hist", "device": "cuda"},
|
||||||
|
dtrain,
|
||||||
|
num_boost_round=4,
|
||||||
)
|
)
|
||||||
|
|
||||||
prediction = xgb.dask.predict(client, output, X)
|
prediction = xgb.dask.predict(client, output, X)
|
||||||
return prediction
|
return prediction
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
# `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here
|
# `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here
|
||||||
# `n_workers` represents the number of GPUs since we use one GPU per worker
|
# `n_workers` represents the number of GPUs since we use one GPU per worker process.
|
||||||
# process.
|
|
||||||
with LocalCUDACluster(n_workers=2, threads_per_worker=4) as cluster:
|
with LocalCUDACluster(n_workers=2, threads_per_worker=4) as cluster:
|
||||||
with Client(cluster) as client:
|
with Client(cluster) as client:
|
||||||
# generate some random data for demonstration
|
# generate some random data for demonstration
|
||||||
m = 100000
|
m = 100000
|
||||||
n = 100
|
n = 100
|
||||||
X = da.random.random(size=(m, n), chunks=10000)
|
X = da.random.random(size=(m, n), chunks=10000)
|
||||||
y = da.random.random(size=(m, ), chunks=10000)
|
y = da.random.random(size=(m,), chunks=10000)
|
||||||
|
|
||||||
print('Using DaskQuantileDMatrix')
|
print("Using DaskQuantileDMatrix")
|
||||||
from_ddqdm = using_quantile_device_dmatrix(client, X, y)
|
from_ddqdm = using_quantile_device_dmatrix(client, X, y)
|
||||||
print('Using DMatrix')
|
print("Using DMatrix")
|
||||||
from_dmatrix = using_dask_matrix(client, X, y)
|
from_dmatrix = using_dask_matrix(client, X, y)
|
||||||
|
|||||||
@ -21,7 +21,8 @@ def main(client):
|
|||||||
y = da.random.random(m, partition_size)
|
y = da.random.random(m, partition_size)
|
||||||
|
|
||||||
regressor = xgboost.dask.DaskXGBRegressor(verbosity=1)
|
regressor = xgboost.dask.DaskXGBRegressor(verbosity=1)
|
||||||
regressor.set_params(tree_method='gpu_hist')
|
# set the device to CUDA
|
||||||
|
regressor.set_params(tree_method="hist", device="cuda")
|
||||||
# assigning client here is optional
|
# assigning client here is optional
|
||||||
regressor.client = client
|
regressor.client = client
|
||||||
|
|
||||||
@ -31,13 +32,13 @@ def main(client):
|
|||||||
bst = regressor.get_booster()
|
bst = regressor.get_booster()
|
||||||
history = regressor.evals_result()
|
history = regressor.evals_result()
|
||||||
|
|
||||||
print('Evaluation history:', history)
|
print("Evaluation history:", history)
|
||||||
# returned prediction is always a dask array.
|
# returned prediction is always a dask array.
|
||||||
assert isinstance(prediction, da.Array)
|
assert isinstance(prediction, da.Array)
|
||||||
return bst # returning the trained model
|
return bst # returning the trained model
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
# With dask cuda, one can scale up XGBoost to arbitrary GPU clusters.
|
# With dask cuda, one can scale up XGBoost to arbitrary GPU clusters.
|
||||||
# `LocalCUDACluster` used here is only for demonstration purpose.
|
# `LocalCUDACluster` used here is only for demonstration purpose.
|
||||||
with LocalCUDACluster() as cluster:
|
with LocalCUDACluster() as cluster:
|
||||||
|
|||||||
@ -1,5 +0,0 @@
|
|||||||
# GPU Acceleration Demo
|
|
||||||
|
|
||||||
`cover_type.py` shows how to train a model on the [forest cover type](https://archive.ics.uci.edu/ml/datasets/covertype) dataset using GPU acceleration. The forest cover type dataset has 581,012 rows and 54 features, making it time consuming to process. We compare the run-time and accuracy of the GPU and CPU histogram algorithms.
|
|
||||||
|
|
||||||
`shap.ipynb` demonstrates using GPU acceleration to compute SHAP values for feature importance.
|
|
||||||
8
demo/gpu_acceleration/README.rst
Normal file
8
demo/gpu_acceleration/README.rst
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
:orphan:
|
||||||
|
|
||||||
|
GPU Acceleration Demo
|
||||||
|
=====================
|
||||||
|
|
||||||
|
This is a collection of demonstration scripts to showcase the basic usage of GPU. Please
|
||||||
|
see :doc:`/gpu/index` for more info. There are other demonstrations for distributed GPU
|
||||||
|
training using dask or spark.
|
||||||
@ -1,41 +1,49 @@
|
|||||||
|
"""
|
||||||
|
Using xgboost on GPU devices
|
||||||
|
============================
|
||||||
|
|
||||||
|
Shows how to train a model on the `forest cover type
|
||||||
|
<https://archive.ics.uci.edu/ml/datasets/covertype>`_ dataset using GPU
|
||||||
|
acceleration. The forest cover type dataset has 581,012 rows and 54 features, making it
|
||||||
|
time consuming to process. We compare the run-time and accuracy of the GPU and CPU
|
||||||
|
histogram algorithms.
|
||||||
|
|
||||||
|
In addition, The demo showcases using GPU with other GPU-related libraries including
|
||||||
|
cupy and cuml. These libraries are not strictly required.
|
||||||
|
|
||||||
|
"""
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
import cupy as cp
|
||||||
|
from cuml.model_selection import train_test_split
|
||||||
from sklearn.datasets import fetch_covtype
|
from sklearn.datasets import fetch_covtype
|
||||||
from sklearn.model_selection import train_test_split
|
|
||||||
|
|
||||||
import xgboost as xgb
|
import xgboost as xgb
|
||||||
|
|
||||||
# Fetch dataset using sklearn
|
# Fetch dataset using sklearn
|
||||||
cov = fetch_covtype()
|
X, y = fetch_covtype(return_X_y=True)
|
||||||
X = cov.data
|
X = cp.array(X)
|
||||||
y = cov.target
|
y = cp.array(y)
|
||||||
|
y -= y.min()
|
||||||
|
|
||||||
# Create 0.75/0.25 train/test split
|
# Create 0.75/0.25 train/test split
|
||||||
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, train_size=0.75,
|
X_train, X_test, y_train, y_test = train_test_split(
|
||||||
random_state=42)
|
X, y, test_size=0.25, train_size=0.75, random_state=42
|
||||||
|
)
|
||||||
|
|
||||||
# Specify sufficient boosting iterations to reach a minimum
|
# Specify sufficient boosting iterations to reach a minimum
|
||||||
num_round = 3000
|
num_round = 3000
|
||||||
|
|
||||||
# Leave most parameters as default
|
# Leave most parameters as default
|
||||||
param = {'objective': 'multi:softmax', # Specify multiclass classification
|
clf = xgb.XGBClassifier(device="cuda", n_estimators=num_round)
|
||||||
'num_class': 8, # Number of possible output classes
|
|
||||||
'tree_method': 'gpu_hist' # Use GPU accelerated algorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
# Convert input data from numpy to XGBoost format
|
|
||||||
dtrain = xgb.DMatrix(X_train, label=y_train)
|
|
||||||
dtest = xgb.DMatrix(X_test, label=y_test)
|
|
||||||
|
|
||||||
gpu_res = {} # Store accuracy result
|
|
||||||
tmp = time.time()
|
|
||||||
# Train model
|
# Train model
|
||||||
xgb.train(param, dtrain, num_round, evals=[(dtest, 'test')], evals_result=gpu_res)
|
start = time.time()
|
||||||
print("GPU Training Time: %s seconds" % (str(time.time() - tmp)))
|
clf.fit(X_train, y_train, eval_set=[(X_test, y_test)])
|
||||||
|
gpu_res = clf.evals_result()
|
||||||
|
print("GPU Training Time: %s seconds" % (str(time.time() - start)))
|
||||||
|
|
||||||
# Repeat for CPU algorithm
|
# Repeat for CPU algorithm
|
||||||
tmp = time.time()
|
clf = xgb.XGBClassifier(device="cpu", n_estimators=num_round)
|
||||||
param['tree_method'] = 'hist'
|
start = time.time()
|
||||||
cpu_res = {}
|
cpu_res = clf.evals_result()
|
||||||
xgb.train(param, dtrain, num_round, evals=[(dtest, 'test')], evals_result=cpu_res)
|
print("CPU Training Time: %s seconds" % (str(time.time() - start)))
|
||||||
print("CPU Training Time: %s seconds" % (str(time.time() - tmp)))
|
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
55
demo/gpu_acceleration/tree_shap.py
Normal file
55
demo/gpu_acceleration/tree_shap.py
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
"""
|
||||||
|
Use GPU to speedup SHAP value computation
|
||||||
|
=========================================
|
||||||
|
|
||||||
|
Demonstrates using GPU acceleration to compute SHAP values for feature importance.
|
||||||
|
|
||||||
|
"""
|
||||||
|
import shap
|
||||||
|
from sklearn.datasets import fetch_california_housing
|
||||||
|
|
||||||
|
import xgboost as xgb
|
||||||
|
|
||||||
|
# Fetch dataset using sklearn
|
||||||
|
data = fetch_california_housing()
|
||||||
|
print(data.DESCR)
|
||||||
|
X = data.data
|
||||||
|
y = data.target
|
||||||
|
|
||||||
|
num_round = 500
|
||||||
|
|
||||||
|
param = {
|
||||||
|
"eta": 0.05,
|
||||||
|
"max_depth": 10,
|
||||||
|
"tree_method": "hist",
|
||||||
|
"device": "cuda",
|
||||||
|
}
|
||||||
|
|
||||||
|
# GPU accelerated training
|
||||||
|
dtrain = xgb.DMatrix(X, label=y, feature_names=data.feature_names)
|
||||||
|
model = xgb.train(param, dtrain, num_round)
|
||||||
|
|
||||||
|
# Compute shap values using GPU with xgboost
|
||||||
|
model.set_param({"device": "cuda"})
|
||||||
|
shap_values = model.predict(dtrain, pred_contribs=True)
|
||||||
|
|
||||||
|
# Compute shap interaction values using GPU
|
||||||
|
shap_interaction_values = model.predict(dtrain, pred_interactions=True)
|
||||||
|
|
||||||
|
|
||||||
|
# shap will call the GPU accelerated version as long as the device parameter is set to
|
||||||
|
# "cuda"
|
||||||
|
explainer = shap.TreeExplainer(model)
|
||||||
|
shap_values = explainer.shap_values(X)
|
||||||
|
|
||||||
|
# visualize the first prediction's explanation
|
||||||
|
shap.force_plot(
|
||||||
|
explainer.expected_value,
|
||||||
|
shap_values[0, :],
|
||||||
|
X[0, :],
|
||||||
|
feature_names=data.feature_names,
|
||||||
|
matplotlib=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Show a summary of feature importance
|
||||||
|
shap.summary_plot(shap_values, X, plot_type="bar", feature_names=data.feature_names)
|
||||||
@ -1,9 +1,9 @@
|
|||||||
'''
|
"""
|
||||||
Demo for using and defining callback functions
|
Demo for using and defining callback functions
|
||||||
==============================================
|
==============================================
|
||||||
|
|
||||||
.. versionadded:: 1.3.0
|
.. versionadded:: 1.3.0
|
||||||
'''
|
"""
|
||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
import tempfile
|
import tempfile
|
||||||
@ -17,10 +17,11 @@ import xgboost as xgb
|
|||||||
|
|
||||||
|
|
||||||
class Plotting(xgb.callback.TrainingCallback):
|
class Plotting(xgb.callback.TrainingCallback):
|
||||||
'''Plot evaluation result during training. Only for demonstration purpose as it's quite
|
"""Plot evaluation result during training. Only for demonstration purpose as it's quite
|
||||||
slow to draw.
|
slow to draw.
|
||||||
|
|
||||||
'''
|
"""
|
||||||
|
|
||||||
def __init__(self, rounds):
|
def __init__(self, rounds):
|
||||||
self.fig = plt.figure()
|
self.fig = plt.figure()
|
||||||
self.ax = self.fig.add_subplot(111)
|
self.ax = self.fig.add_subplot(111)
|
||||||
@ -31,16 +32,16 @@ class Plotting(xgb.callback.TrainingCallback):
|
|||||||
plt.ion()
|
plt.ion()
|
||||||
|
|
||||||
def _get_key(self, data, metric):
|
def _get_key(self, data, metric):
|
||||||
return f'{data}-{metric}'
|
return f"{data}-{metric}"
|
||||||
|
|
||||||
def after_iteration(self, model, epoch, evals_log):
|
def after_iteration(self, model, epoch, evals_log):
|
||||||
'''Update the plot.'''
|
"""Update the plot."""
|
||||||
if not self.lines:
|
if not self.lines:
|
||||||
for data, metric in evals_log.items():
|
for data, metric in evals_log.items():
|
||||||
for metric_name, log in metric.items():
|
for metric_name, log in metric.items():
|
||||||
key = self._get_key(data, metric_name)
|
key = self._get_key(data, metric_name)
|
||||||
expanded = log + [0] * (self.rounds - len(log))
|
expanded = log + [0] * (self.rounds - len(log))
|
||||||
self.lines[key], = self.ax.plot(self.x, expanded, label=key)
|
(self.lines[key],) = self.ax.plot(self.x, expanded, label=key)
|
||||||
self.ax.legend()
|
self.ax.legend()
|
||||||
else:
|
else:
|
||||||
# https://pythonspot.com/matplotlib-update-plot/
|
# https://pythonspot.com/matplotlib-update-plot/
|
||||||
@ -55,8 +56,8 @@ class Plotting(xgb.callback.TrainingCallback):
|
|||||||
|
|
||||||
|
|
||||||
def custom_callback():
|
def custom_callback():
|
||||||
'''Demo for defining a custom callback function that plots evaluation result during
|
"""Demo for defining a custom callback function that plots evaluation result during
|
||||||
training.'''
|
training."""
|
||||||
X, y = load_breast_cancer(return_X_y=True)
|
X, y = load_breast_cancer(return_X_y=True)
|
||||||
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
|
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
|
||||||
|
|
||||||
@ -69,14 +70,16 @@ def custom_callback():
|
|||||||
# Pass it to the `callbacks` parameter as a list.
|
# Pass it to the `callbacks` parameter as a list.
|
||||||
xgb.train(
|
xgb.train(
|
||||||
{
|
{
|
||||||
'objective': 'binary:logistic',
|
"objective": "binary:logistic",
|
||||||
'eval_metric': ['error', 'rmse'],
|
"eval_metric": ["error", "rmse"],
|
||||||
'tree_method': 'gpu_hist'
|
"tree_method": "hist",
|
||||||
|
"device": "cuda",
|
||||||
},
|
},
|
||||||
D_train,
|
D_train,
|
||||||
evals=[(D_train, 'Train'), (D_valid, 'Valid')],
|
evals=[(D_train, "Train"), (D_valid, "Valid")],
|
||||||
num_boost_round=num_boost_round,
|
num_boost_round=num_boost_round,
|
||||||
callbacks=[plotting])
|
callbacks=[plotting],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def check_point_callback():
|
def check_point_callback():
|
||||||
@ -89,10 +92,10 @@ def check_point_callback():
|
|||||||
if i == 0:
|
if i == 0:
|
||||||
continue
|
continue
|
||||||
if as_pickle:
|
if as_pickle:
|
||||||
path = os.path.join(tmpdir, 'model_' + str(i) + '.pkl')
|
path = os.path.join(tmpdir, "model_" + str(i) + ".pkl")
|
||||||
else:
|
else:
|
||||||
path = os.path.join(tmpdir, 'model_' + str(i) + '.json')
|
path = os.path.join(tmpdir, "model_" + str(i) + ".json")
|
||||||
assert(os.path.exists(path))
|
assert os.path.exists(path)
|
||||||
|
|
||||||
X, y = load_breast_cancer(return_X_y=True)
|
X, y = load_breast_cancer(return_X_y=True)
|
||||||
m = xgb.DMatrix(X, y)
|
m = xgb.DMatrix(X, y)
|
||||||
@ -100,31 +103,36 @@ def check_point_callback():
|
|||||||
with tempfile.TemporaryDirectory() as tmpdir:
|
with tempfile.TemporaryDirectory() as tmpdir:
|
||||||
# Use callback class from xgboost.callback
|
# Use callback class from xgboost.callback
|
||||||
# Feel free to subclass/customize it to suit your need.
|
# Feel free to subclass/customize it to suit your need.
|
||||||
check_point = xgb.callback.TrainingCheckPoint(directory=tmpdir,
|
check_point = xgb.callback.TrainingCheckPoint(
|
||||||
iterations=rounds,
|
directory=tmpdir, iterations=rounds, name="model"
|
||||||
name='model')
|
)
|
||||||
xgb.train({'objective': 'binary:logistic'}, m,
|
xgb.train(
|
||||||
|
{"objective": "binary:logistic"},
|
||||||
|
m,
|
||||||
num_boost_round=10,
|
num_boost_round=10,
|
||||||
verbose_eval=False,
|
verbose_eval=False,
|
||||||
callbacks=[check_point])
|
callbacks=[check_point],
|
||||||
|
)
|
||||||
check(False)
|
check(False)
|
||||||
|
|
||||||
# This version of checkpoint saves everything including parameters and
|
# This version of checkpoint saves everything including parameters and
|
||||||
# model. See: doc/tutorials/saving_model.rst
|
# model. See: doc/tutorials/saving_model.rst
|
||||||
check_point = xgb.callback.TrainingCheckPoint(directory=tmpdir,
|
check_point = xgb.callback.TrainingCheckPoint(
|
||||||
iterations=rounds,
|
directory=tmpdir, iterations=rounds, as_pickle=True, name="model"
|
||||||
as_pickle=True,
|
)
|
||||||
name='model')
|
xgb.train(
|
||||||
xgb.train({'objective': 'binary:logistic'}, m,
|
{"objective": "binary:logistic"},
|
||||||
|
m,
|
||||||
num_boost_round=10,
|
num_boost_round=10,
|
||||||
verbose_eval=False,
|
verbose_eval=False,
|
||||||
callbacks=[check_point])
|
callbacks=[check_point],
|
||||||
|
)
|
||||||
check(True)
|
check(True)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument('--plot', default=1, type=int)
|
parser.add_argument("--plot", default=1, type=int)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
check_point_callback()
|
check_point_callback()
|
||||||
|
|||||||
@ -63,7 +63,8 @@ def load_cat_in_the_dat() -> tuple[pd.DataFrame, pd.Series]:
|
|||||||
|
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
"tree_method": "gpu_hist",
|
"tree_method": "hist",
|
||||||
|
"device": "cuda",
|
||||||
"n_estimators": 32,
|
"n_estimators": 32,
|
||||||
"colsample_bylevel": 0.7,
|
"colsample_bylevel": 0.7,
|
||||||
}
|
}
|
||||||
|
|||||||
@ -58,13 +58,13 @@ def main() -> None:
|
|||||||
# Specify `enable_categorical` to True, also we use onehot encoding based split
|
# Specify `enable_categorical` to True, also we use onehot encoding based split
|
||||||
# here for demonstration. For details see the document of `max_cat_to_onehot`.
|
# here for demonstration. For details see the document of `max_cat_to_onehot`.
|
||||||
reg = xgb.XGBRegressor(
|
reg = xgb.XGBRegressor(
|
||||||
tree_method="gpu_hist", enable_categorical=True, max_cat_to_onehot=5
|
tree_method="hist", enable_categorical=True, max_cat_to_onehot=5, device="cuda"
|
||||||
)
|
)
|
||||||
reg.fit(X, y, eval_set=[(X, y)])
|
reg.fit(X, y, eval_set=[(X, y)])
|
||||||
|
|
||||||
# Pass in already encoded data
|
# Pass in already encoded data
|
||||||
X_enc, y_enc = make_categorical(100, 10, 4, True)
|
X_enc, y_enc = make_categorical(100, 10, 4, True)
|
||||||
reg_enc = xgb.XGBRegressor(tree_method="gpu_hist")
|
reg_enc = xgb.XGBRegressor(tree_method="hist", device="cuda")
|
||||||
reg_enc.fit(X_enc, y_enc, eval_set=[(X_enc, y_enc)])
|
reg_enc.fit(X_enc, y_enc, eval_set=[(X_enc, y_enc)])
|
||||||
|
|
||||||
reg_results = np.array(reg.evals_result()["validation_0"]["rmse"])
|
reg_results = np.array(reg.evals_result()["validation_0"]["rmse"])
|
||||||
|
|||||||
@ -76,9 +76,7 @@ def softprob_obj(predt: np.ndarray, data: xgb.DMatrix):
|
|||||||
grad[r, c] = g
|
grad[r, c] = g
|
||||||
hess[r, c] = h
|
hess[r, c] = h
|
||||||
|
|
||||||
# Right now (XGBoost 1.0.0), reshaping is necessary
|
# After 2.1.0, pass the gradient as it is.
|
||||||
grad = grad.reshape((kRows * kClasses, 1))
|
|
||||||
hess = hess.reshape((kRows * kClasses, 1))
|
|
||||||
return grad, hess
|
return grad, hess
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -22,7 +22,10 @@ import xgboost
|
|||||||
|
|
||||||
|
|
||||||
def make_batches(
|
def make_batches(
|
||||||
n_samples_per_batch: int, n_features: int, n_batches: int, tmpdir: str,
|
n_samples_per_batch: int,
|
||||||
|
n_features: int,
|
||||||
|
n_batches: int,
|
||||||
|
tmpdir: str,
|
||||||
) -> List[Tuple[str, str]]:
|
) -> List[Tuple[str, str]]:
|
||||||
files: List[Tuple[str, str]] = []
|
files: List[Tuple[str, str]] = []
|
||||||
rng = np.random.RandomState(1994)
|
rng = np.random.RandomState(1994)
|
||||||
@ -38,6 +41,7 @@ def make_batches(
|
|||||||
|
|
||||||
class Iterator(xgboost.DataIter):
|
class Iterator(xgboost.DataIter):
|
||||||
"""A custom iterator for loading files in batches."""
|
"""A custom iterator for loading files in batches."""
|
||||||
|
|
||||||
def __init__(self, file_paths: List[Tuple[str, str]]):
|
def __init__(self, file_paths: List[Tuple[str, str]]):
|
||||||
self._file_paths = file_paths
|
self._file_paths = file_paths
|
||||||
self._it = 0
|
self._it = 0
|
||||||
@ -82,10 +86,11 @@ def main(tmpdir: str) -> xgboost.Booster:
|
|||||||
missing = np.NaN
|
missing = np.NaN
|
||||||
Xy = xgboost.DMatrix(it, missing=missing, enable_categorical=False)
|
Xy = xgboost.DMatrix(it, missing=missing, enable_categorical=False)
|
||||||
|
|
||||||
# Other tree methods including ``hist`` and ``gpu_hist`` also work, see tutorial in
|
# ``approx`` is also supported, but less efficient due to sketching. GPU behaves
|
||||||
|
# differently than CPU tree methods as it uses a hybrid approach. See tutorial in
|
||||||
# doc for details.
|
# doc for details.
|
||||||
booster = xgboost.train(
|
booster = xgboost.train(
|
||||||
{"tree_method": "approx", "max_depth": 2},
|
{"tree_method": "hist", "max_depth": 4},
|
||||||
Xy,
|
Xy,
|
||||||
evals=[(Xy, "Train")],
|
evals=[(Xy, "Train")],
|
||||||
num_boost_round=10,
|
num_boost_round=10,
|
||||||
|
|||||||
@ -104,7 +104,8 @@ def ranking_demo(args: argparse.Namespace) -> None:
|
|||||||
qid_test = qid_test[sorted_idx]
|
qid_test = qid_test[sorted_idx]
|
||||||
|
|
||||||
ranker = xgb.XGBRanker(
|
ranker = xgb.XGBRanker(
|
||||||
tree_method="gpu_hist",
|
tree_method="hist",
|
||||||
|
device="cuda",
|
||||||
lambdarank_pair_method="topk",
|
lambdarank_pair_method="topk",
|
||||||
lambdarank_num_pair_per_sample=13,
|
lambdarank_num_pair_per_sample=13,
|
||||||
eval_metric=["ndcg@1", "ndcg@8"],
|
eval_metric=["ndcg@1", "ndcg@8"],
|
||||||
@ -161,7 +162,8 @@ def click_data_demo(args: argparse.Namespace) -> None:
|
|||||||
|
|
||||||
ranker = xgb.XGBRanker(
|
ranker = xgb.XGBRanker(
|
||||||
n_estimators=512,
|
n_estimators=512,
|
||||||
tree_method="gpu_hist",
|
tree_method="hist",
|
||||||
|
device="cuda",
|
||||||
learning_rate=0.01,
|
learning_rate=0.01,
|
||||||
reg_lambda=1.5,
|
reg_lambda=1.5,
|
||||||
subsample=0.8,
|
subsample=0.8,
|
||||||
|
|||||||
@ -68,22 +68,21 @@ def rmse_model(plot_result: bool, strategy: str) -> None:
|
|||||||
def custom_rmse_model(plot_result: bool, strategy: str) -> None:
|
def custom_rmse_model(plot_result: bool, strategy: str) -> None:
|
||||||
"""Train using Python implementation of Squared Error."""
|
"""Train using Python implementation of Squared Error."""
|
||||||
|
|
||||||
# As the experimental support status, custom objective doesn't support matrix as
|
|
||||||
# gradient and hessian, which will be changed in future release.
|
|
||||||
def gradient(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray:
|
def gradient(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray:
|
||||||
"""Compute the gradient squared error."""
|
"""Compute the gradient squared error."""
|
||||||
y = dtrain.get_label().reshape(predt.shape)
|
y = dtrain.get_label().reshape(predt.shape)
|
||||||
return (predt - y).reshape(y.size)
|
return predt - y
|
||||||
|
|
||||||
def hessian(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray:
|
def hessian(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray:
|
||||||
"""Compute the hessian for squared error."""
|
"""Compute the hessian for squared error."""
|
||||||
return np.ones(predt.shape).reshape(predt.size)
|
return np.ones(predt.shape)
|
||||||
|
|
||||||
def squared_log(
|
def squared_log(
|
||||||
predt: np.ndarray, dtrain: xgb.DMatrix
|
predt: np.ndarray, dtrain: xgb.DMatrix
|
||||||
) -> Tuple[np.ndarray, np.ndarray]:
|
) -> Tuple[np.ndarray, np.ndarray]:
|
||||||
grad = gradient(predt, dtrain)
|
grad = gradient(predt, dtrain)
|
||||||
hess = hessian(predt, dtrain)
|
hess = hessian(predt, dtrain)
|
||||||
|
# both numpy.ndarray and cupy.ndarray works.
|
||||||
return grad, hess
|
return grad, hess
|
||||||
|
|
||||||
def rmse(predt: np.ndarray, dtrain: xgb.DMatrix) -> Tuple[str, float]:
|
def rmse(predt: np.ndarray, dtrain: xgb.DMatrix) -> Tuple[str, float]:
|
||||||
|
|||||||
@ -28,17 +28,18 @@ BATCHES = 32
|
|||||||
|
|
||||||
|
|
||||||
class IterForDMatrixDemo(xgboost.core.DataIter):
|
class IterForDMatrixDemo(xgboost.core.DataIter):
|
||||||
'''A data iterator for XGBoost DMatrix.
|
"""A data iterator for XGBoost DMatrix.
|
||||||
|
|
||||||
`reset` and `next` are required for any data iterator, other functions here
|
`reset` and `next` are required for any data iterator, other functions here
|
||||||
are utilites for demonstration's purpose.
|
are utilites for demonstration's purpose.
|
||||||
|
|
||||||
'''
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
'''Generate some random data for demostration.
|
"""Generate some random data for demostration.
|
||||||
|
|
||||||
Actual data can be anything that is currently supported by XGBoost.
|
Actual data can be anything that is currently supported by XGBoost.
|
||||||
'''
|
"""
|
||||||
self.rows = ROWS_PER_BATCH
|
self.rows = ROWS_PER_BATCH
|
||||||
self.cols = COLS
|
self.cols = COLS
|
||||||
rng = cupy.random.RandomState(1994)
|
rng = cupy.random.RandomState(1994)
|
||||||
@ -59,27 +60,26 @@ class IterForDMatrixDemo(xgboost.core.DataIter):
|
|||||||
return cupy.concatenate(self._weights)
|
return cupy.concatenate(self._weights)
|
||||||
|
|
||||||
def data(self):
|
def data(self):
|
||||||
'''Utility function for obtaining current batch of data.'''
|
"""Utility function for obtaining current batch of data."""
|
||||||
return self._data[self.it]
|
return self._data[self.it]
|
||||||
|
|
||||||
def labels(self):
|
def labels(self):
|
||||||
'''Utility function for obtaining current batch of label.'''
|
"""Utility function for obtaining current batch of label."""
|
||||||
return self._labels[self.it]
|
return self._labels[self.it]
|
||||||
|
|
||||||
def weights(self):
|
def weights(self):
|
||||||
return self._weights[self.it]
|
return self._weights[self.it]
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
'''Reset the iterator'''
|
"""Reset the iterator"""
|
||||||
self.it = 0
|
self.it = 0
|
||||||
|
|
||||||
def next(self, input_data):
|
def next(self, input_data):
|
||||||
'''Yield next batch of data.'''
|
"""Yield next batch of data."""
|
||||||
if self.it == len(self._data):
|
if self.it == len(self._data):
|
||||||
# Return 0 when there's no more batch.
|
# Return 0 when there's no more batch.
|
||||||
return 0
|
return 0
|
||||||
input_data(data=self.data(), label=self.labels(),
|
input_data(data=self.data(), label=self.labels(), weight=self.weights())
|
||||||
weight=self.weights())
|
|
||||||
self.it += 1
|
self.it += 1
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
@ -103,18 +103,19 @@ def main():
|
|||||||
|
|
||||||
assert m_with_it.num_col() == m.num_col()
|
assert m_with_it.num_col() == m.num_col()
|
||||||
assert m_with_it.num_row() == m.num_row()
|
assert m_with_it.num_row() == m.num_row()
|
||||||
# Tree meethod must be one of the `hist` or `gpu_hist`. We use `gpu_hist` for GPU
|
# Tree meethod must be `hist`.
|
||||||
# input here.
|
|
||||||
reg_with_it = xgboost.train(
|
reg_with_it = xgboost.train(
|
||||||
{"tree_method": "gpu_hist"}, m_with_it, num_boost_round=rounds
|
{"tree_method": "hist", "device": "cuda"}, m_with_it, num_boost_round=rounds
|
||||||
)
|
)
|
||||||
predict_with_it = reg_with_it.predict(m_with_it)
|
predict_with_it = reg_with_it.predict(m_with_it)
|
||||||
|
|
||||||
reg = xgboost.train({"tree_method": "gpu_hist"}, m, num_boost_round=rounds)
|
reg = xgboost.train(
|
||||||
|
{"tree_method": "hist", "device": "cuda"}, m, num_boost_round=rounds
|
||||||
|
)
|
||||||
predict = reg.predict(m)
|
predict = reg.predict(m)
|
||||||
|
|
||||||
numpy.testing.assert_allclose(predict_with_it, predict, rtol=1e6)
|
numpy.testing.assert_allclose(predict_with_it, predict, rtol=1e6)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|||||||
@ -7,6 +7,11 @@ Quantile Regression
|
|||||||
The script is inspired by this awesome example in sklearn:
|
The script is inspired by this awesome example in sklearn:
|
||||||
https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html
|
https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
The feature is only supported using the Python package. In addition, quantile
|
||||||
|
crossing can happen due to limitation in the algorithm.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
import argparse
|
import argparse
|
||||||
from typing import Dict
|
from typing import Dict
|
||||||
|
|||||||
@ -24,7 +24,7 @@ def main():
|
|||||||
Xy = xgb.DMatrix(X_train, y_train)
|
Xy = xgb.DMatrix(X_train, y_train)
|
||||||
evals_result: xgb.callback.EvaluationMonitor.EvalsLog = {}
|
evals_result: xgb.callback.EvaluationMonitor.EvalsLog = {}
|
||||||
booster = xgb.train(
|
booster = xgb.train(
|
||||||
{"tree_method": "gpu_hist", "max_depth": 6},
|
{"tree_method": "hist", "max_depth": 6, "device": "cuda"},
|
||||||
Xy,
|
Xy,
|
||||||
num_boost_round=n_rounds,
|
num_boost_round=n_rounds,
|
||||||
evals=[(Xy, "Train")],
|
evals=[(Xy, "Train")],
|
||||||
@ -33,8 +33,8 @@ def main():
|
|||||||
SHAP = booster.predict(Xy, pred_contribs=True)
|
SHAP = booster.predict(Xy, pred_contribs=True)
|
||||||
|
|
||||||
# Refresh the leaf value and tree statistic
|
# Refresh the leaf value and tree statistic
|
||||||
X_refresh = X[X.shape[0] // 2:]
|
X_refresh = X[X.shape[0] // 2 :]
|
||||||
y_refresh = y[y.shape[0] // 2:]
|
y_refresh = y[y.shape[0] // 2 :]
|
||||||
Xy_refresh = xgb.DMatrix(X_refresh, y_refresh)
|
Xy_refresh = xgb.DMatrix(X_refresh, y_refresh)
|
||||||
# The model will adapt to other half of the data by changing leaf value (no change in
|
# The model will adapt to other half of the data by changing leaf value (no change in
|
||||||
# split condition) with refresh_leaf set to True.
|
# split condition) with refresh_leaf set to True.
|
||||||
@ -87,7 +87,7 @@ def main():
|
|||||||
np.testing.assert_allclose(
|
np.testing.assert_allclose(
|
||||||
np.array(prune_result["Original"]["rmse"]),
|
np.array(prune_result["Original"]["rmse"]),
|
||||||
np.array(prune_result["Train"]["rmse"]),
|
np.array(prune_result["Train"]["rmse"]),
|
||||||
atol=1e-5
|
atol=1e-5,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
1
demo/nvflare/.gitignore
vendored
Normal file
1
demo/nvflare/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
!config
|
||||||
23
demo/nvflare/config/config_fed_client.json
Normal file
23
demo/nvflare/config/config_fed_client.json
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
{
|
||||||
|
"format_version": 2,
|
||||||
|
"executors": [
|
||||||
|
{
|
||||||
|
"tasks": [
|
||||||
|
"train"
|
||||||
|
],
|
||||||
|
"executor": {
|
||||||
|
"path": "trainer.XGBoostTrainer",
|
||||||
|
"args": {
|
||||||
|
"server_address": "localhost:9091",
|
||||||
|
"world_size": 2,
|
||||||
|
"server_cert_path": "server-cert.pem",
|
||||||
|
"client_key_path": "client-key.pem",
|
||||||
|
"client_cert_path": "client-cert.pem",
|
||||||
|
"use_gpus": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"task_result_filters": [],
|
||||||
|
"task_data_filters": []
|
||||||
|
}
|
||||||
22
demo/nvflare/config/config_fed_server.json
Normal file
22
demo/nvflare/config/config_fed_server.json
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
{
|
||||||
|
"format_version": 2,
|
||||||
|
"server": {
|
||||||
|
"heart_beat_timeout": 600
|
||||||
|
},
|
||||||
|
"task_data_filters": [],
|
||||||
|
"task_result_filters": [],
|
||||||
|
"workflows": [
|
||||||
|
{
|
||||||
|
"id": "server_workflow",
|
||||||
|
"path": "controller.XGBoostController",
|
||||||
|
"args": {
|
||||||
|
"port": 9091,
|
||||||
|
"world_size": 2,
|
||||||
|
"server_key_path": "server-key.pem",
|
||||||
|
"server_cert_path": "server-cert.pem",
|
||||||
|
"client_cert_path": "client-cert.pem"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"components": []
|
||||||
|
}
|
||||||
@ -6,7 +6,7 @@ This directory contains a demo of Horizontal Federated Learning using
|
|||||||
## Training with CPU only
|
## Training with CPU only
|
||||||
|
|
||||||
To run the demo, first build XGBoost with the federated learning plugin enabled (see the
|
To run the demo, first build XGBoost with the federated learning plugin enabled (see the
|
||||||
[README](../../plugin/federated/README.md)).
|
[README](../../../plugin/federated/README.md)).
|
||||||
|
|
||||||
Install NVFlare (note that currently NVFlare only supports Python 3.8):
|
Install NVFlare (note that currently NVFlare only supports Python 3.8):
|
||||||
```shell
|
```shell
|
||||||
|
|||||||
@ -70,8 +70,7 @@ class XGBoostTrainer(Executor):
|
|||||||
param = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'}
|
param = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'}
|
||||||
if self._use_gpus:
|
if self._use_gpus:
|
||||||
self.log_info(fl_ctx, f'Training with GPU {rank}')
|
self.log_info(fl_ctx, f'Training with GPU {rank}')
|
||||||
param['tree_method'] = 'gpu_hist'
|
param['device'] = f"cuda:{rank}"
|
||||||
param['gpu_id'] = rank
|
|
||||||
|
|
||||||
# Specify validations set to watch performance
|
# Specify validations set to watch performance
|
||||||
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
|
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
|
||||||
|
|||||||
@ -16,7 +16,7 @@ split -n l/${world_size} --numeric-suffixes=1 -a 1 ../../data/agaricus.txt.test
|
|||||||
|
|
||||||
nvflare poc -n 2 --prepare
|
nvflare poc -n 2 --prepare
|
||||||
mkdir -p /tmp/nvflare/poc/admin/transfer/horizontal-xgboost
|
mkdir -p /tmp/nvflare/poc/admin/transfer/horizontal-xgboost
|
||||||
cp -fr config custom /tmp/nvflare/poc/admin/transfer/horizontal-xgboost
|
cp -fr ../config custom /tmp/nvflare/poc/admin/transfer/horizontal-xgboost
|
||||||
cp server-*.pem client-cert.pem /tmp/nvflare/poc/server/
|
cp server-*.pem client-cert.pem /tmp/nvflare/poc/server/
|
||||||
for (( site=1; site<=world_size; site++ )); do
|
for (( site=1; site<=world_size; site++ )); do
|
||||||
cp server-cert.pem client-*.pem /tmp/nvflare/poc/site-"$site"/
|
cp server-cert.pem client-*.pem /tmp/nvflare/poc/site-"$site"/
|
||||||
|
|||||||
@ -6,7 +6,7 @@ This directory contains a demo of Vertical Federated Learning using
|
|||||||
## Training with CPU only
|
## Training with CPU only
|
||||||
|
|
||||||
To run the demo, first build XGBoost with the federated learning plugin enabled (see the
|
To run the demo, first build XGBoost with the federated learning plugin enabled (see the
|
||||||
[README](../../plugin/federated/README.md)).
|
[README](../../../plugin/federated/README.md)).
|
||||||
|
|
||||||
Install NVFlare (note that currently NVFlare only supports Python 3.8):
|
Install NVFlare (note that currently NVFlare only supports Python 3.8):
|
||||||
```shell
|
```shell
|
||||||
|
|||||||
@ -16,7 +16,7 @@ class SupportedTasks(object):
|
|||||||
|
|
||||||
class XGBoostTrainer(Executor):
|
class XGBoostTrainer(Executor):
|
||||||
def __init__(self, server_address: str, world_size: int, server_cert_path: str,
|
def __init__(self, server_address: str, world_size: int, server_cert_path: str,
|
||||||
client_key_path: str, client_cert_path: str):
|
client_key_path: str, client_cert_path: str, use_gpus: bool):
|
||||||
"""Trainer for federated XGBoost.
|
"""Trainer for federated XGBoost.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -32,6 +32,7 @@ class XGBoostTrainer(Executor):
|
|||||||
self._server_cert_path = server_cert_path
|
self._server_cert_path = server_cert_path
|
||||||
self._client_key_path = client_key_path
|
self._client_key_path = client_key_path
|
||||||
self._client_cert_path = client_cert_path
|
self._client_cert_path = client_cert_path
|
||||||
|
self._use_gpus = use_gpus
|
||||||
|
|
||||||
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext,
|
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext,
|
||||||
abort_signal: Signal) -> Shareable:
|
abort_signal: Signal) -> Shareable:
|
||||||
@ -81,6 +82,8 @@ class XGBoostTrainer(Executor):
|
|||||||
'objective': 'binary:logistic',
|
'objective': 'binary:logistic',
|
||||||
'eval_metric': 'auc',
|
'eval_metric': 'auc',
|
||||||
}
|
}
|
||||||
|
if self._use_gpus:
|
||||||
|
self.log_info(fl_ctx, 'GPUs are not currently supported by vertical federated XGBoost')
|
||||||
|
|
||||||
# specify validations set to watch performance
|
# specify validations set to watch performance
|
||||||
watchlist = [(dtest, "eval"), (dtrain, "train")]
|
watchlist = [(dtest, "eval"), (dtrain, "train")]
|
||||||
|
|||||||
@ -56,7 +56,7 @@ fi
|
|||||||
|
|
||||||
nvflare poc -n 2 --prepare
|
nvflare poc -n 2 --prepare
|
||||||
mkdir -p /tmp/nvflare/poc/admin/transfer/vertical-xgboost
|
mkdir -p /tmp/nvflare/poc/admin/transfer/vertical-xgboost
|
||||||
cp -fr config custom /tmp/nvflare/poc/admin/transfer/vertical-xgboost
|
cp -fr ../config custom /tmp/nvflare/poc/admin/transfer/vertical-xgboost
|
||||||
cp server-*.pem client-cert.pem /tmp/nvflare/poc/server/
|
cp server-*.pem client-cert.pem /tmp/nvflare/poc/server/
|
||||||
for (( site=1; site<=world_size; site++ )); do
|
for (( site=1; site<=world_size; site++ )); do
|
||||||
cp server-cert.pem client-*.pem /tmp/nvflare/poc/site-"${site}"/
|
cp server-cert.pem client-*.pem /tmp/nvflare/poc/site-"${site}"/
|
||||||
|
|||||||
@ -1,47 +0,0 @@
|
|||||||
Using XGBoost with RAPIDS Memory Manager (RMM) plugin (EXPERIMENTAL)
|
|
||||||
====================================================================
|
|
||||||
[RAPIDS Memory Manager (RMM)](https://github.com/rapidsai/rmm) library provides a collection of
|
|
||||||
efficient memory allocators for NVIDIA GPUs. It is now possible to use XGBoost with memory
|
|
||||||
allocators provided by RMM, by enabling the RMM integration plugin.
|
|
||||||
|
|
||||||
The demos in this directory highlights one RMM allocator in particular: **the pool sub-allocator**.
|
|
||||||
This allocator addresses the slow speed of `cudaMalloc()` by allocating a large chunk of memory
|
|
||||||
upfront. Subsequent allocations will draw from the pool of already allocated memory and thus avoid
|
|
||||||
the overhead of calling `cudaMalloc()` directly. See
|
|
||||||
[this GTC talk slides](https://on-demand.gputechconf.com/gtc/2015/presentation/S5530-Stephen-Jones.pdf)
|
|
||||||
for more details.
|
|
||||||
|
|
||||||
Before running the demos, ensure that XGBoost is compiled with the RMM plugin enabled. To do this,
|
|
||||||
run CMake with option `-DPLUGIN_RMM=ON` (`-DUSE_CUDA=ON` also required):
|
|
||||||
```
|
|
||||||
cmake .. -DUSE_CUDA=ON -DUSE_NCCL=ON -DPLUGIN_RMM=ON
|
|
||||||
make -j4
|
|
||||||
```
|
|
||||||
CMake will attempt to locate the RMM library in your build environment. You may choose to build
|
|
||||||
RMM from the source, or install it using the Conda package manager. If CMake cannot find RMM, you
|
|
||||||
should specify the location of RMM with the CMake prefix:
|
|
||||||
```
|
|
||||||
# If using Conda:
|
|
||||||
cmake .. -DUSE_CUDA=ON -DUSE_NCCL=ON -DPLUGIN_RMM=ON -DCMAKE_PREFIX_PATH=$CONDA_PREFIX
|
|
||||||
# If using RMM installed with a custom location
|
|
||||||
cmake .. -DUSE_CUDA=ON -DUSE_NCCL=ON -DPLUGIN_RMM=ON -DCMAKE_PREFIX_PATH=/path/to/rmm
|
|
||||||
```
|
|
||||||
|
|
||||||
# Informing XGBoost about RMM pool
|
|
||||||
|
|
||||||
When XGBoost is compiled with RMM, most of the large size allocation will go through RMM
|
|
||||||
allocators, but some small allocations in performance critical areas are using a different
|
|
||||||
caching allocator so that we can have better control over memory allocation behavior.
|
|
||||||
Users can override this behavior and force the use of rmm for all allocations by setting
|
|
||||||
the global configuration ``use_rmm``:
|
|
||||||
|
|
||||||
``` python
|
|
||||||
with xgb.config_context(use_rmm=True):
|
|
||||||
clf = xgb.XGBClassifier(tree_method="gpu_hist")
|
|
||||||
```
|
|
||||||
|
|
||||||
Depending on the choice of memory pool size or type of allocator, this may have negative
|
|
||||||
performance impact.
|
|
||||||
|
|
||||||
* [Using RMM with a single GPU](./rmm_singlegpu.py)
|
|
||||||
* [Using RMM with a local Dask cluster consisting of multiple GPUs](./rmm_mgpu_with_dask.py)
|
|
||||||
51
demo/rmm_plugin/README.rst
Normal file
51
demo/rmm_plugin/README.rst
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
Using XGBoost with RAPIDS Memory Manager (RMM) plugin (EXPERIMENTAL)
|
||||||
|
====================================================================
|
||||||
|
|
||||||
|
`RAPIDS Memory Manager (RMM) <https://github.com/rapidsai/rmm>`__ library provides a
|
||||||
|
collection of efficient memory allocators for NVIDIA GPUs. It is now possible to use
|
||||||
|
XGBoost with memory allocators provided by RMM, by enabling the RMM integration plugin.
|
||||||
|
|
||||||
|
The demos in this directory highlights one RMM allocator in particular: **the pool
|
||||||
|
sub-allocator**. This allocator addresses the slow speed of ``cudaMalloc()`` by
|
||||||
|
allocating a large chunk of memory upfront. Subsequent allocations will draw from the pool
|
||||||
|
of already allocated memory and thus avoid the overhead of calling ``cudaMalloc()``
|
||||||
|
directly. See `this GTC talk slides
|
||||||
|
<https://on-demand.gputechconf.com/gtc/2015/presentation/S5530-Stephen-Jones.pdf>`_ for
|
||||||
|
more details.
|
||||||
|
|
||||||
|
Before running the demos, ensure that XGBoost is compiled with the RMM plugin enabled. To do this,
|
||||||
|
run CMake with option ``-DPLUGIN_RMM=ON`` (``-DUSE_CUDA=ON`` also required):
|
||||||
|
|
||||||
|
.. code-block:: sh
|
||||||
|
|
||||||
|
cmake .. -DUSE_CUDA=ON -DUSE_NCCL=ON -DPLUGIN_RMM=ON
|
||||||
|
make -j$(nproc)
|
||||||
|
|
||||||
|
CMake will attempt to locate the RMM library in your build environment. You may choose to build
|
||||||
|
RMM from the source, or install it using the Conda package manager. If CMake cannot find RMM, you
|
||||||
|
should specify the location of RMM with the CMake prefix:
|
||||||
|
|
||||||
|
.. code-block:: sh
|
||||||
|
|
||||||
|
# If using Conda:
|
||||||
|
cmake .. -DUSE_CUDA=ON -DUSE_NCCL=ON -DPLUGIN_RMM=ON -DCMAKE_PREFIX_PATH=$CONDA_PREFIX
|
||||||
|
# If using RMM installed with a custom location
|
||||||
|
cmake .. -DUSE_CUDA=ON -DUSE_NCCL=ON -DPLUGIN_RMM=ON -DCMAKE_PREFIX_PATH=/path/to/rmm
|
||||||
|
|
||||||
|
********************************
|
||||||
|
Informing XGBoost about RMM pool
|
||||||
|
********************************
|
||||||
|
|
||||||
|
When XGBoost is compiled with RMM, most of the large size allocation will go through RMM
|
||||||
|
allocators, but some small allocations in performance critical areas are using a different
|
||||||
|
caching allocator so that we can have better control over memory allocation behavior.
|
||||||
|
Users can override this behavior and force the use of rmm for all allocations by setting
|
||||||
|
the global configuration ``use_rmm``:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
with xgb.config_context(use_rmm=True):
|
||||||
|
clf = xgb.XGBClassifier(tree_method="hist", device="cuda")
|
||||||
|
|
||||||
|
Depending on the choice of memory pool size or type of allocator, this may have negative
|
||||||
|
performance impact.
|
||||||
@ -1,3 +1,7 @@
|
|||||||
|
"""
|
||||||
|
Using rmm with Dask
|
||||||
|
===================
|
||||||
|
"""
|
||||||
import dask
|
import dask
|
||||||
from dask.distributed import Client
|
from dask.distributed import Client
|
||||||
from dask_cuda import LocalCUDACluster
|
from dask_cuda import LocalCUDACluster
|
||||||
@ -11,25 +15,33 @@ def main(client):
|
|||||||
# xgb.set_config(use_rmm=True)
|
# xgb.set_config(use_rmm=True)
|
||||||
|
|
||||||
X, y = make_classification(n_samples=10000, n_informative=5, n_classes=3)
|
X, y = make_classification(n_samples=10000, n_informative=5, n_classes=3)
|
||||||
# In pratice one should prefer loading the data with dask collections instead of using
|
# In pratice one should prefer loading the data with dask collections instead of
|
||||||
# `from_array`.
|
# using `from_array`.
|
||||||
X = dask.array.from_array(X)
|
X = dask.array.from_array(X)
|
||||||
y = dask.array.from_array(y)
|
y = dask.array.from_array(y)
|
||||||
dtrain = xgb.dask.DaskDMatrix(client, X, label=y)
|
dtrain = xgb.dask.DaskDMatrix(client, X, label=y)
|
||||||
|
|
||||||
params = {'max_depth': 8, 'eta': 0.01, 'objective': 'multi:softprob', 'num_class': 3,
|
params = {
|
||||||
'tree_method': 'gpu_hist', 'eval_metric': 'merror'}
|
"max_depth": 8,
|
||||||
output = xgb.dask.train(client, params, dtrain, num_boost_round=100,
|
"eta": 0.01,
|
||||||
evals=[(dtrain, 'train')])
|
"objective": "multi:softprob",
|
||||||
bst = output['booster']
|
"num_class": 3,
|
||||||
history = output['history']
|
"tree_method": "hist",
|
||||||
for i, e in enumerate(history['train']['merror']):
|
"eval_metric": "merror",
|
||||||
print(f'[{i}] train-merror: {e}')
|
"device": "cuda",
|
||||||
|
}
|
||||||
|
output = xgb.dask.train(
|
||||||
|
client, params, dtrain, num_boost_round=100, evals=[(dtrain, "train")]
|
||||||
|
)
|
||||||
|
bst = output["booster"]
|
||||||
|
history = output["history"]
|
||||||
|
for i, e in enumerate(history["train"]["merror"]):
|
||||||
|
print(f"[{i}] train-merror: {e}")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
# To use RMM pool allocator with a GPU Dask cluster, just add rmm_pool_size option to
|
# To use RMM pool allocator with a GPU Dask cluster, just add rmm_pool_size option
|
||||||
# LocalCUDACluster constructor.
|
# to LocalCUDACluster constructor.
|
||||||
with LocalCUDACluster(rmm_pool_size='2GB') as cluster:
|
with LocalCUDACluster(rmm_pool_size="2GB") as cluster:
|
||||||
with Client(cluster) as client:
|
with Client(cluster) as client:
|
||||||
main(client)
|
main(client)
|
||||||
|
|||||||
@ -1,3 +1,7 @@
|
|||||||
|
"""
|
||||||
|
Using rmm on a single node device
|
||||||
|
=================================
|
||||||
|
"""
|
||||||
import rmm
|
import rmm
|
||||||
from sklearn.datasets import make_classification
|
from sklearn.datasets import make_classification
|
||||||
|
|
||||||
@ -16,7 +20,8 @@ params = {
|
|||||||
"eta": 0.01,
|
"eta": 0.01,
|
||||||
"objective": "multi:softprob",
|
"objective": "multi:softprob",
|
||||||
"num_class": 3,
|
"num_class": 3,
|
||||||
"tree_method": "gpu_hist",
|
"tree_method": "hist",
|
||||||
|
"device": "cuda",
|
||||||
}
|
}
|
||||||
# XGBoost will automatically use the RMM pool allocator
|
# XGBoost will automatically use the RMM pool allocator
|
||||||
bst = xgb.train(params, dtrain, num_boost_round=100, evals=[(dtrain, "train")])
|
bst = xgb.train(params, dtrain, num_boost_round=100, evals=[(dtrain, "train")])
|
||||||
|
|||||||
@ -98,17 +98,24 @@ def download_wheels(
|
|||||||
return filenames
|
return filenames
|
||||||
|
|
||||||
|
|
||||||
def make_pysrc_wheel(release: str, outdir: str) -> None:
|
def make_pysrc_wheel(
|
||||||
|
release: str, rc: Optional[str], rc_ver: Optional[int], outdir: str
|
||||||
|
) -> None:
|
||||||
"""Make Python source distribution."""
|
"""Make Python source distribution."""
|
||||||
dist = os.path.join(outdir, "dist")
|
dist = os.path.abspath(os.path.normpath(os.path.join(outdir, "dist")))
|
||||||
if not os.path.exists(dist):
|
if not os.path.exists(dist):
|
||||||
os.mkdir(dist)
|
os.mkdir(dist)
|
||||||
|
|
||||||
with DirectoryExcursion(os.path.join(ROOT, "python-package")):
|
with DirectoryExcursion(os.path.join(ROOT, "python-package")):
|
||||||
subprocess.check_call(["python", "-m", "build", "--sdist"])
|
subprocess.check_call(["python", "-m", "build", "--sdist"])
|
||||||
src = os.path.join(DIST, f"xgboost-{release}.tar.gz")
|
if rc is not None:
|
||||||
|
name = f"xgboost-{release}{rc}{rc_ver}.tar.gz"
|
||||||
|
else:
|
||||||
|
name = f"xgboost-{release}.tar.gz"
|
||||||
|
src = os.path.join(DIST, name)
|
||||||
subprocess.check_call(["twine", "check", src])
|
subprocess.check_call(["twine", "check", src])
|
||||||
shutil.move(src, os.path.join(dist, f"xgboost-{release}.tar.gz"))
|
target = os.path.join(dist, name)
|
||||||
|
shutil.move(src, target)
|
||||||
|
|
||||||
|
|
||||||
def download_py_packages(
|
def download_py_packages(
|
||||||
@ -172,7 +179,9 @@ def download_r_packages(
|
|||||||
hashes = []
|
hashes = []
|
||||||
with DirectoryExcursion(os.path.join(outdir, "r-packages")):
|
with DirectoryExcursion(os.path.join(outdir, "r-packages")):
|
||||||
for f in filenames:
|
for f in filenames:
|
||||||
ret = subprocess.run(["sha256sum", os.path.basename(f)], capture_output=True)
|
ret = subprocess.run(
|
||||||
|
["sha256sum", os.path.basename(f)], capture_output=True
|
||||||
|
)
|
||||||
h = ret.stdout.decode().strip()
|
h = ret.stdout.decode().strip()
|
||||||
hashes.append(h)
|
hashes.append(h)
|
||||||
return urls, hashes
|
return urls, hashes
|
||||||
@ -306,7 +315,7 @@ def main(args: argparse.Namespace) -> None:
|
|||||||
hashes.extend(hr)
|
hashes.extend(hr)
|
||||||
|
|
||||||
# Python source wheel
|
# Python source wheel
|
||||||
make_pysrc_wheel(release, args.outdir)
|
make_pysrc_wheel(release, rc, rc_ver, args.outdir)
|
||||||
|
|
||||||
# Python binary wheels
|
# Python binary wheels
|
||||||
download_py_packages(branch, major, minor, commit_hash, args.outdir)
|
download_py_packages(branch, major, minor, commit_hash, args.outdir)
|
||||||
|
|||||||
2
doc/.gitignore
vendored
2
doc/.gitignore
vendored
@ -6,3 +6,5 @@ doxygen
|
|||||||
parser.py
|
parser.py
|
||||||
*.pyc
|
*.pyc
|
||||||
web-data
|
web-data
|
||||||
|
# generated by doxygen
|
||||||
|
tmp
|
||||||
@ -1,102 +1,103 @@
|
|||||||
|
# Understand your dataset with XGBoost
|
||||||
|
|
||||||
Understand your dataset with XGBoost
|
## Introduction
|
||||||
====================================
|
|
||||||
|
|
||||||
Introduction
|
The purpose of this vignette is to show you how to use **XGBoost** to
|
||||||
------------
|
discover and understand your own dataset better.
|
||||||
|
|
||||||
The purpose of this Vignette is to show you how to use **XGBoost** to discover and understand your own dataset better.
|
This vignette is not about predicting anything (see [XGBoost
|
||||||
|
presentation](https://github.com/dmlc/xgboost/blob/master/R-package/vignettes/xgboostPresentation.Rmd)).
|
||||||
This Vignette is not about predicting anything (see [XGBoost presentation](https://github.com/dmlc/xgboost/blob/master/R-package/vignettes/xgboostPresentation.Rmd)). We will explain how to use **XGBoost** to highlight the *link* between the *features* of your data and the *outcome*.
|
We will explain how to use **XGBoost** to highlight the *link* between
|
||||||
|
the *features* of your data and the *outcome*.
|
||||||
|
|
||||||
Package loading:
|
Package loading:
|
||||||
|
|
||||||
|
require(xgboost)
|
||||||
```r
|
require(Matrix)
|
||||||
require(xgboost)
|
require(data.table)
|
||||||
require(Matrix)
|
if (!require('vcd')) {
|
||||||
require(data.table)
|
install.packages('vcd')
|
||||||
if (!require('vcd')) install.packages('vcd')
|
}
|
||||||
```
|
|
||||||
|
|
||||||
> **VCD** package is used for one of its embedded dataset only.
|
> **VCD** package is used for one of its embedded dataset only.
|
||||||
|
|
||||||
Preparation of the dataset
|
## Preparation of the dataset
|
||||||
--------------------------
|
|
||||||
|
|
||||||
### Numeric VS categorical variables
|
|
||||||
|
|
||||||
|
### Numeric v.s. categorical variables
|
||||||
|
|
||||||
**XGBoost** manages only `numeric` vectors.
|
**XGBoost** manages only `numeric` vectors.
|
||||||
|
|
||||||
What to do when you have *categorical* data?
|
What to do when you have *categorical* data?
|
||||||
|
|
||||||
A *categorical* variable has a fixed number of different values. For instance, if a variable called *Colour* can have only one of these three values, *red*, *blue* or *green*, then *Colour* is a *categorical* variable.
|
A *categorical* variable has a fixed number of different values. For
|
||||||
|
instance, if a variable called *Colour* can have only one of these three
|
||||||
|
values, *red*, *blue* or *green*, then *Colour* is a *categorical*
|
||||||
|
variable.
|
||||||
|
|
||||||
> In **R**, a *categorical* variable is called `factor`.
|
> In **R**, a *categorical* variable is called `factor`.
|
||||||
>
|
>
|
||||||
> Type `?factor` in the console for more information.
|
> Type `?factor` in the console for more information.
|
||||||
|
|
||||||
To answer the question above we will convert *categorical* variables to `numeric` one.
|
To answer the question above we will convert *categorical* variables to
|
||||||
|
`numeric` ones.
|
||||||
|
|
||||||
### Conversion from categorical to numeric variables
|
### Conversion from categorical to numeric variables
|
||||||
|
|
||||||
#### Looking at the raw data
|
#### Looking at the raw data
|
||||||
|
|
||||||
In this Vignette we will see how to transform a *dense* `data.frame` (*dense* = few zeroes in the matrix) with *categorical* variables to a very *sparse* matrix (*sparse* = lots of zero in the matrix) of `numeric` features.
|
+In this Vignette we will see how to transform a *dense* `data.frame`
|
||||||
|
(*dense* = the majority of the matrix is non-zero) with *categorical*
|
||||||
|
variables to a very *sparse* matrix (*sparse* = lots of zero entries in
|
||||||
|
the matrix) of `numeric` features.
|
||||||
|
|
||||||
The method we are going to see is usually called [one-hot encoding](http://en.wikipedia.org/wiki/One-hot).
|
The method we are going to see is usually called [one-hot
|
||||||
|
encoding](https://en.wikipedia.org/wiki/One-hot).
|
||||||
|
|
||||||
The first step is to load `Arthritis` dataset in memory and wrap it with `data.table` package.
|
The first step is to load the `Arthritis` dataset in memory and wrap it
|
||||||
|
with the `data.table` package.
|
||||||
|
|
||||||
|
data(Arthritis)
|
||||||
|
df <- data.table(Arthritis, keep.rownames = FALSE)
|
||||||
|
|
||||||
```r
|
> `data.table` is 100% compliant with **R** `data.frame` but its syntax
|
||||||
data(Arthritis)
|
> is more consistent and its performance for large dataset is [best in
|
||||||
df <- data.table(Arthritis, keep.rownames = FALSE)
|
> class](https://stackoverflow.com/questions/21435339/data-table-vs-dplyr-can-one-do-something-well-the-other-cant-or-does-poorly)
|
||||||
```
|
> (`dplyr` from **R** and `Pandas` from **Python**
|
||||||
|
> [included](https://github.com/Rdatatable/data.table/wiki/Benchmarks-%3A-Grouping)).
|
||||||
|
> Some parts of **XGBoost’s** **R** package use `data.table`.
|
||||||
|
|
||||||
> `data.table` is 100% compliant with **R** `data.frame` but its syntax is more consistent and its performance for large dataset is [best in class](http://stackoverflow.com/questions/21435339/data-table-vs-dplyr-can-one-do-something-well-the-other-cant-or-does-poorly) (`dplyr` from **R** and `Pandas` from **Python** [included](https://github.com/Rdatatable/data.table/wiki/Benchmarks-%3A-Grouping)). Some parts of **XGBoost** **R** package use `data.table`.
|
The first thing we want to do is to have a look to the first few lines
|
||||||
|
of the `data.table`:
|
||||||
|
|
||||||
The first thing we want to do is to have a look to the first lines of the `data.table`:
|
head(df)
|
||||||
|
|
||||||
|
## ID Treatment Sex Age Improved
|
||||||
```r
|
## 1: 57 Treated Male 27 Some
|
||||||
head(df)
|
## 2: 46 Treated Male 29 None
|
||||||
```
|
## 3: 77 Treated Male 30 None
|
||||||
|
## 4: 17 Treated Male 32 Marked
|
||||||
```
|
## 5: 36 Treated Male 46 Marked
|
||||||
## ID Treatment Sex Age Improved
|
## 6: 23 Treated Male 58 Marked
|
||||||
## 1: 57 Treated Male 27 Some
|
|
||||||
## 2: 46 Treated Male 29 None
|
|
||||||
## 3: 77 Treated Male 30 None
|
|
||||||
## 4: 17 Treated Male 32 Marked
|
|
||||||
## 5: 36 Treated Male 46 Marked
|
|
||||||
## 6: 23 Treated Male 58 Marked
|
|
||||||
```
|
|
||||||
|
|
||||||
Now we will check the format of each column.
|
Now we will check the format of each column.
|
||||||
|
|
||||||
|
str(df)
|
||||||
|
|
||||||
```r
|
## Classes 'data.table' and 'data.frame': 84 obs. of 5 variables:
|
||||||
str(df)
|
## $ ID : int 57 46 77 17 36 23 75 39 33 55 ...
|
||||||
```
|
## $ Treatment: Factor w/ 2 levels "Placebo","Treated": 2 2 2 2 2 2 2 2 2 2 ...
|
||||||
|
## $ Sex : Factor w/ 2 levels "Female","Male": 2 2 2 2 2 2 2 2 2 2 ...
|
||||||
```
|
## $ Age : int 27 29 30 32 46 58 59 59 63 63 ...
|
||||||
## Classes 'data.table' and 'data.frame': 84 obs. of 5 variables:
|
## $ Improved : Ord.factor w/ 3 levels "None"<"Some"<..: 2 1 1 3 3 3 1 3 1 1 ...
|
||||||
## $ ID : int 57 46 77 17 36 23 75 39 33 55 ...
|
## - attr(*, ".internal.selfref")=<externalptr>
|
||||||
## $ Treatment: Factor w/ 2 levels "Placebo","Treated": 2 2 2 2 2 2 2 2 2 2 ...
|
|
||||||
## $ Sex : Factor w/ 2 levels "Female","Male": 2 2 2 2 2 2 2 2 2 2 ...
|
|
||||||
## $ Age : int 27 29 30 32 46 58 59 59 63 63 ...
|
|
||||||
## $ Improved : Ord.factor w/ 3 levels "None"<"Some"<..: 2 1 1 3 3 3 1 3 1 1 ...
|
|
||||||
## - attr(*, ".internal.selfref")=<externalptr>
|
|
||||||
```
|
|
||||||
|
|
||||||
2 columns have `factor` type, one has `ordinal` type.
|
2 columns have `factor` type, one has `ordinal` type.
|
||||||
|
|
||||||
> `ordinal` variable :
|
> `ordinal` variable :
|
||||||
>
|
>
|
||||||
> * can take a limited number of values (like `factor`) ;
|
> - can take a limited number of values (like `factor`) ;
|
||||||
> * these values are ordered (unlike `factor`). Here these ordered values are: `Marked > Some > None`
|
> - these values are ordered (unlike `factor`). Here these ordered
|
||||||
|
> values are: `Marked > Some > None`
|
||||||
|
|
||||||
#### Creation of new features based on old ones
|
#### Creation of new features based on old ones
|
||||||
|
|
||||||
@ -104,368 +105,371 @@ We will add some new *categorical* features to see if it helps.
|
|||||||
|
|
||||||
##### Grouping per 10 years
|
##### Grouping per 10 years
|
||||||
|
|
||||||
For the first feature we create groups of age by rounding the real age.
|
For the first features we create groups of age by rounding the real age.
|
||||||
|
|
||||||
Note that we transform it to `factor` so the algorithm treat these age groups as independent values.
|
Note that we transform it to `factor` so the algorithm treats these age
|
||||||
|
groups as independent values.
|
||||||
|
|
||||||
Therefore, 20 is not closer to 30 than 60. To make it short, the distance between ages is lost in this transformation.
|
Therefore, 20 is not closer to 30 than 60. In other words, the distance
|
||||||
|
between ages is lost in this transformation.
|
||||||
|
|
||||||
|
head(df[, AgeDiscret := as.factor(round(Age / 10, 0))])
|
||||||
|
|
||||||
```r
|
## ID Treatment Sex Age Improved AgeDiscret
|
||||||
head(df[,AgeDiscret := as.factor(round(Age/10,0))])
|
## 1: 57 Treated Male 27 Some 3
|
||||||
```
|
## 2: 46 Treated Male 29 None 3
|
||||||
|
## 3: 77 Treated Male 30 None 3
|
||||||
|
## 4: 17 Treated Male 32 Marked 3
|
||||||
|
## 5: 36 Treated Male 46 Marked 5
|
||||||
|
## 6: 23 Treated Male 58 Marked 6
|
||||||
|
|
||||||
```
|
##### Randomly split into two groups
|
||||||
## ID Treatment Sex Age Improved AgeDiscret
|
|
||||||
## 1: 57 Treated Male 27 Some 3
|
|
||||||
## 2: 46 Treated Male 29 None 3
|
|
||||||
## 3: 77 Treated Male 30 None 3
|
|
||||||
## 4: 17 Treated Male 32 Marked 3
|
|
||||||
## 5: 36 Treated Male 46 Marked 5
|
|
||||||
## 6: 23 Treated Male 58 Marked 6
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Random split in two groups
|
The following is an even stronger simplification of the real age with an
|
||||||
|
arbitrary split at 30 years old. I choose this value **based on
|
||||||
|
nothing**. We will see later if simplifying the information based on
|
||||||
|
arbitrary values is a good strategy (you may already have an idea of how
|
||||||
|
well it will work…).
|
||||||
|
|
||||||
Following is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value **based on nothing**. We will see later if simplifying the information based on arbitrary values is a good strategy (you may already have an idea of how well it will work...).
|
head(df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))])
|
||||||
|
|
||||||
|
## ID Treatment Sex Age Improved AgeDiscret AgeCat
|
||||||
```r
|
## 1: 57 Treated Male 27 Some 3 Young
|
||||||
head(df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))])
|
## 2: 46 Treated Male 29 None 3 Young
|
||||||
```
|
## 3: 77 Treated Male 30 None 3 Young
|
||||||
|
## 4: 17 Treated Male 32 Marked 3 Old
|
||||||
```
|
## 5: 36 Treated Male 46 Marked 5 Old
|
||||||
## ID Treatment Sex Age Improved AgeDiscret AgeCat
|
## 6: 23 Treated Male 58 Marked 6 Old
|
||||||
## 1: 57 Treated Male 27 Some 3 Young
|
|
||||||
## 2: 46 Treated Male 29 None 3 Young
|
|
||||||
## 3: 77 Treated Male 30 None 3 Young
|
|
||||||
## 4: 17 Treated Male 32 Marked 3 Old
|
|
||||||
## 5: 36 Treated Male 46 Marked 5 Old
|
|
||||||
## 6: 23 Treated Male 58 Marked 6 Old
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Risks in adding correlated features
|
##### Risks in adding correlated features
|
||||||
|
|
||||||
These new features are highly correlated to the `Age` feature because they are simple transformations of this feature.
|
These new features are highly correlated to the `Age` feature because
|
||||||
|
they are simple transformations of this feature.
|
||||||
|
|
||||||
For many machine learning algorithms, using correlated features is not a good idea. It may sometimes make prediction less accurate, and most of the time make interpretation of the model almost impossible. GLM, for instance, assumes that the features are uncorrelated.
|
For many machine learning algorithms, using correlated features is not a
|
||||||
|
good idea. It may sometimes make prediction less accurate, and most of
|
||||||
|
the time make interpretation of the model almost impossible. GLM, for
|
||||||
|
instance, assumes that the features are uncorrelated.
|
||||||
|
|
||||||
Fortunately, decision tree algorithms (including boosted trees) are very robust to these features. Therefore we have nothing to do to manage this situation.
|
Fortunately, decision tree algorithms (including boosted trees) are very
|
||||||
|
robust to these features. Therefore we don’t have to do anything to
|
||||||
|
manage this situation.
|
||||||
|
|
||||||
##### Cleaning data
|
##### Cleaning data
|
||||||
|
|
||||||
We remove ID as there is nothing to learn from this feature (it would just add some noise).
|
We remove ID as there is nothing to learn from this feature (it would
|
||||||
|
just add some noise).
|
||||||
|
|
||||||
|
df[, ID := NULL]
|
||||||
```r
|
|
||||||
df[,ID:=NULL]
|
|
||||||
```
|
|
||||||
|
|
||||||
We will list the different values for the column `Treatment`:
|
We will list the different values for the column `Treatment`:
|
||||||
|
|
||||||
|
levels(df[, Treatment])
|
||||||
|
|
||||||
```r
|
## [1] "Placebo" "Treated"
|
||||||
levels(df[,Treatment])
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
#### Encoding categorical features
|
||||||
## [1] "Placebo" "Treated"
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
#### One-hot encoding
|
|
||||||
|
|
||||||
Next step, we will transform the categorical data to dummy variables.
|
Next step, we will transform the categorical data to dummy variables.
|
||||||
This is the [one-hot encoding](http://en.wikipedia.org/wiki/One-hot) step.
|
Several encoding methods exist, e.g., [one-hot
|
||||||
|
encoding](https://en.wikipedia.org/wiki/One-hot) is a common approach.
|
||||||
|
We will use the [dummy contrast
|
||||||
|
coding](https://stats.oarc.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/)
|
||||||
|
which is popular because it produces “full rank” encoding (also see
|
||||||
|
[this blog post by Max
|
||||||
|
Kuhn](http://appliedpredictivemodeling.com/blog/2013/10/23/the-basics-of-encoding-categorical-data-for-predictive-models)).
|
||||||
|
|
||||||
The purpose is to transform each value of each *categorical* feature in a *binary* feature `{0, 1}`.
|
The purpose is to transform each value of each *categorical* feature
|
||||||
|
into a *binary* feature `{0, 1}`.
|
||||||
|
|
||||||
For example, the column `Treatment` will be replaced by two columns, `Placebo`, and `Treated`. Each of them will be *binary*. Therefore, an observation which has the value `Placebo` in column `Treatment` before the transformation will have after the transformation the value `1` in the new column `Placebo` and the value `0` in the new column `Treated`. The column `Treatment` will disappear during the one-hot encoding.
|
For example, the column `Treatment` will be replaced by two columns,
|
||||||
|
`TreatmentPlacebo`, and `TreatmentTreated`. Each of them will be
|
||||||
|
*binary*. Therefore, an observation which has the value `Placebo` in
|
||||||
|
column `Treatment` before the transformation will have the value `1` in
|
||||||
|
the new column `TreatmentPlacebo` and the value `0` in the new column
|
||||||
|
`TreatmentTreated` after the transformation. The column
|
||||||
|
`TreatmentPlacebo` will disappear during the contrast encoding, as it
|
||||||
|
would be absorbed into a common constant intercept column.
|
||||||
|
|
||||||
Column `Improved` is excluded because it will be our `label` column, the one we want to predict.
|
Column `Improved` is excluded because it will be our `label` column, the
|
||||||
|
one we want to predict.
|
||||||
|
|
||||||
|
sparse_matrix <- sparse.model.matrix(Improved ~ ., data = df)[, -1]
|
||||||
|
head(sparse_matrix)
|
||||||
|
|
||||||
```r
|
## 6 x 9 sparse Matrix of class "dgCMatrix"
|
||||||
sparse_matrix <- sparse.model.matrix(Improved~.-1, data = df)
|
## TreatmentTreated SexMale Age AgeDiscret3 AgeDiscret4 AgeDiscret5 AgeDiscret6
|
||||||
head(sparse_matrix)
|
## 1 1 1 27 1 . . .
|
||||||
```
|
## 2 1 1 29 1 . . .
|
||||||
|
## 3 1 1 30 1 . . .
|
||||||
|
## 4 1 1 32 1 . . .
|
||||||
|
## 5 1 1 46 . . 1 .
|
||||||
|
## 6 1 1 58 . . . 1
|
||||||
|
## AgeDiscret7 AgeCatYoung
|
||||||
|
## 1 . 1
|
||||||
|
## 2 . 1
|
||||||
|
## 3 . 1
|
||||||
|
## 4 . .
|
||||||
|
## 5 . .
|
||||||
|
## 6 . .
|
||||||
|
|
||||||
```
|
> Formula `Improved ~ .` used above means transform all *categorical*
|
||||||
## 6 x 10 sparse Matrix of class "dgCMatrix"
|
> features but column `Improved` to binary values. The `-1` column
|
||||||
##
|
> selection removes the intercept column which is full of `1` (this
|
||||||
## 1 . 1 1 27 1 . . . . 1
|
> column is generated by the conversion). For more information, you can
|
||||||
## 2 . 1 1 29 1 . . . . 1
|
> type `?sparse.model.matrix` in the console.
|
||||||
## 3 . 1 1 30 1 . . . . 1
|
|
||||||
## 4 . 1 1 32 1 . . . . .
|
|
||||||
## 5 . 1 1 46 . . 1 . . .
|
|
||||||
## 6 . 1 1 58 . . . 1 . .
|
|
||||||
```
|
|
||||||
|
|
||||||
> Formulae `Improved~.-1` used above means transform all *categorical* features but column `Improved` to binary values. The `-1` is here to remove the first column which is full of `1` (this column is generated by the conversion). For more information, you can type `?sparse.model.matrix` in the console.
|
|
||||||
|
|
||||||
Create the output `numeric` vector (not as a sparse `Matrix`):
|
Create the output `numeric` vector (not as a sparse `Matrix`):
|
||||||
|
|
||||||
|
output_vector <- df[, Improved] == "Marked"
|
||||||
```r
|
|
||||||
output_vector = df[,Improved] == "Marked"
|
|
||||||
```
|
|
||||||
|
|
||||||
1. set `Y` vector to `0`;
|
1. set `Y` vector to `0`;
|
||||||
2. set `Y` to `1` for rows where `Improved == Marked` is `TRUE` ;
|
2. set `Y` to `1` for rows where `Improved == Marked` is `TRUE` ;
|
||||||
3. return `Y` vector.
|
3. return `Y` vector.
|
||||||
|
|
||||||
Build the model
|
## Build the model
|
||||||
---------------
|
|
||||||
|
|
||||||
The code below is very usual. For more information, you can look at the documentation of `xgboost` function (or at the vignette [XGBoost presentation](https://github.com/dmlc/xgboost/blob/master/R-package/vignettes/xgboostPresentation.Rmd)).
|
The code below is very usual. For more information, you can look at the
|
||||||
|
documentation of `xgboost` function (or at the vignette [XGBoost
|
||||||
|
presentation](https://github.com/dmlc/xgboost/blob/master/R-package/vignettes/xgboostPresentation.Rmd)).
|
||||||
|
|
||||||
|
bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 4,
|
||||||
|
eta = 1, nthread = 2, nrounds = 10, objective = "binary:logistic")
|
||||||
|
|
||||||
```r
|
## [1] train-logloss:0.485466
|
||||||
bst <- xgboost(data = sparse_matrix, label = output_vector, max.depth = 4,
|
## [2] train-logloss:0.438534
|
||||||
eta = 1, nthread = 2, nrounds = 10,objective = "binary:logistic")
|
## [3] train-logloss:0.412250
|
||||||
```
|
## [4] train-logloss:0.395828
|
||||||
|
## [5] train-logloss:0.384264
|
||||||
|
## [6] train-logloss:0.374028
|
||||||
|
## [7] train-logloss:0.365005
|
||||||
|
## [8] train-logloss:0.351233
|
||||||
|
## [9] train-logloss:0.341678
|
||||||
|
## [10] train-logloss:0.334465
|
||||||
|
|
||||||
```
|
You can see some `train-logloss: 0.XXXXX` lines followed by a number. It
|
||||||
## [0] train-error:0.202381
|
decreases. Each line shows how well the model explains the data. Lower
|
||||||
## [1] train-error:0.166667
|
is better.
|
||||||
## [2] train-error:0.166667
|
|
||||||
## [3] train-error:0.166667
|
|
||||||
## [4] train-error:0.154762
|
|
||||||
## [5] train-error:0.154762
|
|
||||||
## [6] train-error:0.154762
|
|
||||||
## [7] train-error:0.166667
|
|
||||||
## [8] train-error:0.166667
|
|
||||||
## [9] train-error:0.166667
|
|
||||||
```
|
|
||||||
|
|
||||||
You can see some `train-error: 0.XXXXX` lines followed by a number. It decreases. Each line shows how well the model explains your data. Lower is better.
|
A small value for training error may be a symptom of
|
||||||
|
[overfitting](https://en.wikipedia.org/wiki/Overfitting), meaning the
|
||||||
|
model will not accurately predict unseen values.
|
||||||
|
|
||||||
A model which fits too well may [overfit](http://en.wikipedia.org/wiki/Overfitting) (meaning it copy/paste too much the past, and won't be that good to predict the future).
|
## Feature importance
|
||||||
|
|
||||||
> Here you can see the numbers decrease until line 7 and then increase.
|
|
||||||
>
|
|
||||||
> It probably means we are overfitting. To fix that I should reduce the number of rounds to `nrounds = 4`. I will let things like that because I don't really care for the purpose of this example :-)
|
|
||||||
|
|
||||||
Feature importance
|
|
||||||
------------------
|
|
||||||
|
|
||||||
## Measure feature importance
|
## Measure feature importance
|
||||||
|
|
||||||
|
|
||||||
### Build the feature importance data.table
|
### Build the feature importance data.table
|
||||||
|
|
||||||
In the code below, `sparse_matrix@Dimnames[[2]]` represents the column names of the sparse matrix. These names are the original values of the features (remember, each binary column == one value of one *categorical* feature).
|
Remember, each binary column corresponds to a single value of one of
|
||||||
|
*categorical* features.
|
||||||
|
|
||||||
|
importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst)
|
||||||
|
head(importance)
|
||||||
|
|
||||||
```r
|
## Feature Gain Cover Frequency
|
||||||
importance <- xgb.importance(feature_names = sparse_matrix@Dimnames[[2]], model = bst)
|
## 1: Age 0.622031769 0.67251696 0.67241379
|
||||||
head(importance)
|
## 2: TreatmentTreated 0.285750540 0.11916651 0.10344828
|
||||||
```
|
## 3: SexMale 0.048744022 0.04522028 0.08620690
|
||||||
|
## 4: AgeDiscret6 0.016604639 0.04784639 0.05172414
|
||||||
|
## 5: AgeDiscret3 0.016373781 0.08028951 0.05172414
|
||||||
|
## 6: AgeDiscret4 0.009270557 0.02858801 0.01724138
|
||||||
|
|
||||||
```
|
> The column `Gain` provides the information we are looking for.
|
||||||
## Feature Gain Cover Frequency
|
|
||||||
## 1: Age 0.622031651 0.67251706 0.67241379
|
|
||||||
## 2: TreatmentPlacebo 0.285750607 0.11916656 0.10344828
|
|
||||||
## 3: SexMale 0.048744054 0.04522027 0.08620690
|
|
||||||
## 4: AgeDiscret6 0.016604647 0.04784637 0.05172414
|
|
||||||
## 5: AgeDiscret3 0.016373791 0.08028939 0.05172414
|
|
||||||
## 6: AgeDiscret4 0.009270558 0.02858801 0.01724138
|
|
||||||
```
|
|
||||||
|
|
||||||
> The column `Gain` provide the information we are looking for.
|
|
||||||
>
|
>
|
||||||
> As you can see, features are classified by `Gain`.
|
> As you can see, features are classified by `Gain`.
|
||||||
|
|
||||||
`Gain` is the improvement in accuracy brought by a feature to the branches it is on. The idea is that before adding a new split on a feature X to the branch there was some wrongly classified elements, after adding the split on this feature, there are two new branches, and each of these branch is more accurate (one branch saying if your observation is on this branch then it should be classified as `1`, and the other branch saying the exact opposite).
|
`Gain` is the improvement in accuracy brought by a feature to the
|
||||||
|
branches it is on. The idea is that before adding a new split on a
|
||||||
|
feature X to the branch there were some wrongly classified elements;
|
||||||
|
after adding the split on this feature, there are two new branches, and
|
||||||
|
each of these branches is more accurate (one branch saying if your
|
||||||
|
observation is on this branch then it should be classified as `1`, and
|
||||||
|
the other branch saying the exact opposite).
|
||||||
|
|
||||||
`Cover` measures the relative quantity of observations concerned by a feature.
|
`Cover` is related to the second order derivative (or Hessian) of the
|
||||||
|
loss function with respect to a particular variable; thus, a large value
|
||||||
|
indicates a variable has a large potential impact on the loss function
|
||||||
|
and so is important.
|
||||||
|
|
||||||
`Frequency` is a simpler way to measure the `Gain`. It just counts the number of times a feature is used in all generated trees. You should not use it (unless you know why you want to use it).
|
`Frequency` is a simpler way to measure the `Gain`. It just counts the
|
||||||
|
number of times a feature is used in all generated trees. You should not
|
||||||
#### Improvement in the interpretability of feature importance data.table
|
use it (unless you know why you want to use it).
|
||||||
|
|
||||||
We can go deeper in the analysis of the model. In the `data.table` above, we have discovered which features counts to predict if the illness will go or not. But we don't yet know the role of these features. For instance, one of the question we may want to answer would be: does receiving a placebo treatment helps to recover from the illness?
|
|
||||||
|
|
||||||
One simple solution is to count the co-occurrences of a feature and a class of the classification.
|
|
||||||
|
|
||||||
For that purpose we will execute the same function as above but using two more parameters, `data` and `label`.
|
|
||||||
|
|
||||||
|
|
||||||
```r
|
|
||||||
importanceRaw <- xgb.importance(feature_names = sparse_matrix@Dimnames[[2]], model = bst, data = sparse_matrix, label = output_vector)
|
|
||||||
|
|
||||||
# Cleaning for better display
|
|
||||||
importanceClean <- importanceRaw[,`:=`(Cover=NULL, Frequency=NULL)]
|
|
||||||
|
|
||||||
head(importanceClean)
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
|
||||||
## Feature Split Gain RealCover RealCover %
|
|
||||||
## 1: TreatmentPlacebo -1.00136e-05 0.28575061 7 0.2500000
|
|
||||||
## 2: Age 61.5 0.16374034 12 0.4285714
|
|
||||||
## 3: Age 39 0.08705750 8 0.2857143
|
|
||||||
## 4: Age 57.5 0.06947553 11 0.3928571
|
|
||||||
## 5: SexMale -1.00136e-05 0.04874405 4 0.1428571
|
|
||||||
## 6: Age 53.5 0.04620627 10 0.3571429
|
|
||||||
```
|
|
||||||
|
|
||||||
> In the table above we have removed two not needed columns and select only the first lines.
|
|
||||||
|
|
||||||
First thing you notice is the new column `Split`. It is the split applied to the feature on a branch of one of the tree. Each split is present, therefore a feature can appear several times in this table. Here we can see the feature `Age` is used several times with different splits.
|
|
||||||
|
|
||||||
How the split is applied to count the co-occurrences? It is always `<`. For instance, in the second line, we measure the number of persons under 61.5 years with the illness gone after the treatment.
|
|
||||||
|
|
||||||
The two other new columns are `RealCover` and `RealCover %`. In the first column it measures the number of observations in the dataset where the split is respected and the label marked as `1`. The second column is the percentage of the whole population that `RealCover` represents.
|
|
||||||
|
|
||||||
Therefore, according to our findings, getting a placebo doesn't seem to help but being younger than 61 years may help (seems logic).
|
|
||||||
|
|
||||||
> You may wonder how to interpret the `< 1.00001` on the first line. Basically, in a sparse `Matrix`, there is no `0`, therefore, looking for one hot-encoded categorical observations validating the rule `< 1.00001` is like just looking for `1` for this feature.
|
|
||||||
|
|
||||||
### Plotting the feature importance
|
### Plotting the feature importance
|
||||||
|
|
||||||
|
All these things are nice, but it would be even better to plot the
|
||||||
|
results.
|
||||||
|
|
||||||
All these things are nice, but it would be even better to plot the results.
|
xgb.plot.importance(importance_matrix = importance)
|
||||||
|
|
||||||
|
<img src="discoverYourData_files/figure-markdown_strict/unnamed-chunk-12-1.png" style="display: block; margin: auto;" />
|
||||||
|
|
||||||
```r
|
Running this line of code, you should get a bar chart showing the
|
||||||
xgb.plot.importance(importance_matrix = importanceRaw)
|
importance of the 6 features (containing the same data as the output we
|
||||||
```
|
saw earlier, but displaying it visually for easier consumption). Note
|
||||||
|
that `xgb.ggplot.importance` is also available for all the ggplot2 fans!
|
||||||
|
|
||||||
```
|
> Depending of the dataset and the learning parameters you may have more
|
||||||
## Error in xgb.plot.importance(importance_matrix = importanceRaw): Importance matrix is not correct (column names issue)
|
> than two clusters. Default value is to limit them to `10`, but you can
|
||||||
```
|
> increase this limit. Look at the function documentation for more
|
||||||
|
> information.
|
||||||
|
|
||||||
Feature have automatically been divided in 2 clusters: the interesting features... and the others.
|
According to the plot above, the most important features in this dataset
|
||||||
|
to predict if the treatment will work are :
|
||||||
|
|
||||||
> Depending of the dataset and the learning parameters you may have more than two clusters. Default value is to limit them to `10`, but you can increase this limit. Look at the function documentation for more information.
|
- An individual’s age;
|
||||||
|
- Having received a placebo or not;
|
||||||
According to the plot above, the most important features in this dataset to predict if the treatment will work are :
|
- Gender;
|
||||||
|
- Our generated feature AgeDiscret. We can see that its contribution
|
||||||
* the Age ;
|
is very low.
|
||||||
* having received a placebo or not ;
|
|
||||||
* the sex is third but already included in the not interesting features group ;
|
|
||||||
* then we see our generated features (AgeDiscret). We can see that their contribution is very low.
|
|
||||||
|
|
||||||
### Do these results make sense?
|
### Do these results make sense?
|
||||||
|
|
||||||
|
Let’s check some **Chi2** between each of these features and the label.
|
||||||
Let's check some **Chi2** between each of these features and the label.
|
|
||||||
|
|
||||||
Higher **Chi2** means better correlation.
|
Higher **Chi2** means better correlation.
|
||||||
|
|
||||||
|
c2 <- chisq.test(df$Age, output_vector)
|
||||||
|
print(c2)
|
||||||
|
|
||||||
```r
|
##
|
||||||
c2 <- chisq.test(df$Age, output_vector)
|
## Pearson's Chi-squared test
|
||||||
print(c2)
|
##
|
||||||
```
|
## data: df$Age and output_vector
|
||||||
|
## X-squared = 35.475, df = 35, p-value = 0.4458
|
||||||
|
|
||||||
```
|
The Pearson correlation between Age and illness disappearing is
|
||||||
##
|
**35.47**.
|
||||||
## Pearson's Chi-squared test
|
|
||||||
##
|
|
||||||
## data: df$Age and output_vector
|
|
||||||
## X-squared = 35.475, df = 35, p-value = 0.4458
|
|
||||||
```
|
|
||||||
|
|
||||||
Pearson correlation between Age and illness disappearing is **35.48**.
|
c2 <- chisq.test(df$AgeDiscret, output_vector)
|
||||||
|
print(c2)
|
||||||
|
|
||||||
|
##
|
||||||
|
## Pearson's Chi-squared test
|
||||||
|
##
|
||||||
|
## data: df$AgeDiscret and output_vector
|
||||||
|
## X-squared = 8.2554, df = 5, p-value = 0.1427
|
||||||
|
|
||||||
```r
|
Our first simplification of Age gives a Pearson correlation of **8.26**.
|
||||||
c2 <- chisq.test(df$AgeDiscret, output_vector)
|
|
||||||
print(c2)
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
c2 <- chisq.test(df$AgeCat, output_vector)
|
||||||
##
|
print(c2)
|
||||||
## Pearson's Chi-squared test
|
|
||||||
##
|
|
||||||
## data: df$AgeDiscret and output_vector
|
|
||||||
## X-squared = 8.2554, df = 5, p-value = 0.1427
|
|
||||||
```
|
|
||||||
|
|
||||||
Our first simplification of Age gives a Pearson correlation is **8.26**.
|
##
|
||||||
|
## Pearson's Chi-squared test with Yates' continuity correction
|
||||||
|
##
|
||||||
|
## data: df$AgeCat and output_vector
|
||||||
|
## X-squared = 2.3571, df = 1, p-value = 0.1247
|
||||||
|
|
||||||
|
The perfectly random split we did between young and old at 30 years old
|
||||||
|
has a low correlation of **2.36**. This suggests that, for the
|
||||||
|
particular illness we are studying, the age at which someone is
|
||||||
|
vulnerable to this disease is likely very different from 30.
|
||||||
|
|
||||||
```r
|
Moral of the story: don’t let your *gut* lower the quality of your
|
||||||
c2 <- chisq.test(df$AgeCat, output_vector)
|
model.
|
||||||
print(c2)
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
In *data science*, there is the word *science* :-)
|
||||||
##
|
|
||||||
## Pearson's Chi-squared test with Yates' continuity correction
|
|
||||||
##
|
|
||||||
## data: df$AgeCat and output_vector
|
|
||||||
## X-squared = 2.3571, df = 1, p-value = 0.1247
|
|
||||||
```
|
|
||||||
|
|
||||||
The perfectly random split I did between young and old at 30 years old have a low correlation of **2.36**. It's a result we may expect as may be in my mind > 30 years is being old (I am 32 and starting feeling old, this may explain that), but for the illness we are studying, the age to be vulnerable is not the same.
|
## Conclusion
|
||||||
|
|
||||||
Morality: don't let your *gut* lower the quality of your model.
|
As you can see, in general *destroying information by simplifying it
|
||||||
|
won’t improve your model*. **Chi2** just demonstrates that.
|
||||||
|
|
||||||
In *data science* expression, there is the word *science* :-)
|
But in more complex cases, creating a new feature from an existing one
|
||||||
|
may help the algorithm and improve the model.
|
||||||
|
|
||||||
Conclusion
|
+The case studied here is not complex enough to show that. Check [Kaggle
|
||||||
----------
|
website](https://www.kaggle.com/) for some challenging datasets.
|
||||||
|
|
||||||
As you can see, in general *destroying information by simplifying it won't improve your model*. **Chi2** just demonstrates that.
|
Moreover, you can see that even if we have added some new features which
|
||||||
|
are not very useful/highly correlated with other features, the boosting
|
||||||
|
tree algorithm was still able to choose the best one (which in this case
|
||||||
|
is the Age).
|
||||||
|
|
||||||
But in more complex cases, creating a new feature based on existing one which makes link with the outcome more obvious may help the algorithm and improve the model.
|
Linear models may not perform as well.
|
||||||
|
|
||||||
The case studied here is not enough complex to show that. Check [Kaggle website](http://www.kaggle.com/) for some challenging datasets. However it's almost always worse when you add some arbitrary rules.
|
## Special Note: What about Random Forests™?
|
||||||
|
|
||||||
Moreover, you can notice that even if we have added some not useful new features highly correlated with other features, the boosting tree algorithm have been able to choose the best one, which in this case is the Age.
|
As you may know, the [Random
|
||||||
|
Forests](https://en.wikipedia.org/wiki/Random_forest) algorithm is
|
||||||
|
cousin with boosting and both are part of the [ensemble
|
||||||
|
learning](https://en.wikipedia.org/wiki/Ensemble_learning) family.
|
||||||
|
|
||||||
Linear models may not be that smart in this scenario.
|
Both train several decision trees for one dataset. The *main* difference
|
||||||
|
is that in Random Forests, trees are independent and in boosting, the
|
||||||
|
`N+1`-st tree focuses its learning on the loss (<=> what has not
|
||||||
|
been well modeled by the tree `N`).
|
||||||
|
|
||||||
Special Note: What about Random Forests™?
|
This difference can have an impact on a edge case in feature importance
|
||||||
-----------------------------------------
|
analysis: *correlated features*.
|
||||||
|
|
||||||
As you may know, [Random Forests](http://en.wikipedia.org/wiki/Random_forest) algorithm is cousin with boosting and both are part of the [ensemble learning](http://en.wikipedia.org/wiki/Ensemble_learning) family.
|
Imagine two features perfectly correlated, feature `A` and feature `B`.
|
||||||
|
For one specific tree, if the algorithm needs one of them, it will
|
||||||
|
choose randomly (true in both boosting and Random Forests).
|
||||||
|
|
||||||
Both train several decision trees for one dataset. The *main* difference is that in Random Forests, trees are independent and in boosting, the tree `N+1` focus its learning on the loss (<=> what has not been well modeled by the tree `N`).
|
However, in Random Forests this random choice will be done for each
|
||||||
|
tree, because each tree is independent from the others. Therefore,
|
||||||
|
approximately (and depending on your parameters) 50% of the trees will
|
||||||
|
choose feature `A` and the other 50% will choose feature `B`. So the
|
||||||
|
*importance* of the information contained in `A` and `B` (which is the
|
||||||
|
same, because they are perfectly correlated) is diluted in `A` and `B`.
|
||||||
|
So you won’t easily know this information is important to predict what
|
||||||
|
you want to predict! It is even worse when you have 10 correlated
|
||||||
|
features…
|
||||||
|
|
||||||
This difference have an impact on a corner case in feature importance analysis: the *correlated features*.
|
In boosting, when a specific link between feature and outcome have been
|
||||||
|
learned by the algorithm, it will try to not refocus on it (in theory it
|
||||||
|
is what happens, reality is not always that simple). Therefore, all the
|
||||||
|
importance will be on feature `A` or on feature `B` (but not both). You
|
||||||
|
will know that one feature has an important role in the link between the
|
||||||
|
observations and the label. It is still up to you to search for the
|
||||||
|
correlated features to the one detected as important if you need to know
|
||||||
|
all of them.
|
||||||
|
|
||||||
Imagine two features perfectly correlated, feature `A` and feature `B`. For one specific tree, if the algorithm needs one of them, it will choose randomly (true in both boosting and Random Forests).
|
If you want to try Random Forests algorithm, you can tweak XGBoost
|
||||||
|
parameters!
|
||||||
|
|
||||||
However, in Random Forests this random choice will be done for each tree, because each tree is independent from the others. Therefore, approximatively, depending of your parameters, 50% of the trees will choose feature `A` and the other 50% will choose feature `B`. So the *importance* of the information contained in `A` and `B` (which is the same, because they are perfectly correlated) is diluted in `A` and `B`. So you won't easily know this information is important to predict what you want to predict! It is even worse when you have 10 correlated features...
|
For instance, to compute a model with 1000 trees, with a 0.5 factor on
|
||||||
|
sampling rows and columns:
|
||||||
|
|
||||||
In boosting, when a specific link between feature and outcome have been learned by the algorithm, it will try to not refocus on it (in theory it is what happens, reality is not always that simple). Therefore, all the importance will be on feature `A` or on feature `B` (but not both). You will know that one feature have an important role in the link between the observations and the label. It is still up to you to search for the correlated features to the one detected as important if you need to know all of them.
|
data(agaricus.train, package = 'xgboost')
|
||||||
|
data(agaricus.test, package = 'xgboost')
|
||||||
|
train <- agaricus.train
|
||||||
|
test <- agaricus.test
|
||||||
|
|
||||||
If you want to try Random Forests algorithm, you can tweak XGBoost parameters!
|
#Random Forest - 1000 trees
|
||||||
|
bst <- xgboost(
|
||||||
|
data = train$data
|
||||||
|
, label = train$label
|
||||||
|
, max_depth = 4
|
||||||
|
, num_parallel_tree = 1000
|
||||||
|
, subsample = 0.5
|
||||||
|
, colsample_bytree = 0.5
|
||||||
|
, nrounds = 1
|
||||||
|
, objective = "binary:logistic"
|
||||||
|
)
|
||||||
|
|
||||||
**Warning**: this is still an experimental parameter.
|
## [1] train-logloss:0.456201
|
||||||
|
|
||||||
For instance, to compute a model with 1000 trees, with a 0.5 factor on sampling rows and columns:
|
#Boosting - 3 rounds
|
||||||
|
bst <- xgboost(
|
||||||
|
data = train$data
|
||||||
|
, label = train$label
|
||||||
|
, max_depth = 4
|
||||||
|
, nrounds = 3
|
||||||
|
, objective = "binary:logistic"
|
||||||
|
)
|
||||||
|
|
||||||
|
## [1] train-logloss:0.444882
|
||||||
```r
|
## [2] train-logloss:0.302428
|
||||||
data(agaricus.train, package='xgboost')
|
## [3] train-logloss:0.212847
|
||||||
data(agaricus.test, package='xgboost')
|
|
||||||
train <- agaricus.train
|
|
||||||
test <- agaricus.test
|
|
||||||
|
|
||||||
#Random Forest - 1000 trees
|
|
||||||
bst <- xgboost(data = train$data, label = train$label, max.depth = 4, num_parallel_tree = 1000, subsample = 0.5, colsample_bytree =0.5, nrounds = 1, objective = "binary:logistic")
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
|
||||||
## [0] train-error:0.002150
|
|
||||||
```
|
|
||||||
|
|
||||||
```r
|
|
||||||
#Boosting - 3 rounds
|
|
||||||
bst <- xgboost(data = train$data, label = train$label, max.depth = 4, nrounds = 3, objective = "binary:logistic")
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
|
||||||
## [0] train-error:0.006142
|
|
||||||
## [1] train-error:0.006756
|
|
||||||
## [2] train-error:0.001228
|
|
||||||
```
|
|
||||||
|
|
||||||
> Note that the parameter `round` is set to `1`.
|
> Note that the parameter `round` is set to `1`.
|
||||||
|
|
||||||
> [**Random Forests**](https://www.stat.berkeley.edu/~breiman/RandomForests/cc_papers.htm) is a trademark of Leo Breiman and Adele Cutler and is licensed exclusively to Salford Systems for the commercial release of the software.
|
> [**Random
|
||||||
|
> Forests**](https://www.stat.berkeley.edu/~breiman/RandomForests/cc_papers.htm)
|
||||||
|
> is a trademark of Leo Breiman and Adele Cutler and is licensed
|
||||||
|
> exclusively to Salford Systems for the commercial release of the
|
||||||
|
> software.
|
||||||
|
|||||||
@ -119,7 +119,7 @@ An up-to-date version of the CUDA toolkit is required.
|
|||||||
|
|
||||||
.. note:: Checking your compiler version
|
.. note:: Checking your compiler version
|
||||||
|
|
||||||
CUDA is really picky about supported compilers, a table for the compatible compilers for the latests CUDA version on Linux can be seen `here <https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html>`_.
|
CUDA is really picky about supported compilers, a table for the compatible compilers for the latest CUDA version on Linux can be seen `here <https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html>`_.
|
||||||
|
|
||||||
Some distros package a compatible ``gcc`` version with CUDA. If you run into compiler errors with ``nvcc``, try specifying the correct compiler with ``-DCMAKE_CXX_COMPILER=/path/to/correct/g++ -DCMAKE_C_COMPILER=/path/to/correct/gcc``. On Arch Linux, for example, both binaries can be found under ``/opt/cuda/bin/``.
|
Some distros package a compatible ``gcc`` version with CUDA. If you run into compiler errors with ``nvcc``, try specifying the correct compiler with ``-DCMAKE_CXX_COMPILER=/path/to/correct/g++ -DCMAKE_C_COMPILER=/path/to/correct/gcc``. On Arch Linux, for example, both binaries can be found under ``/opt/cuda/bin/``.
|
||||||
|
|
||||||
@ -259,7 +259,7 @@ There are several ways to build and install the package from source:
|
|||||||
|
|
||||||
import sys
|
import sys
|
||||||
import pathlib
|
import pathlib
|
||||||
libpath = pathlib.Path(sys.prefix).joinpath("lib", "libxgboost.so")
|
libpath = pathlib.Path(sys.base_prefix).joinpath("lib", "libxgboost.so")
|
||||||
assert libpath.exists()
|
assert libpath.exists()
|
||||||
|
|
||||||
Then pass ``use_system_libxgboost=True`` option to ``pip install``:
|
Then pass ``use_system_libxgboost=True`` option to ``pip install``:
|
||||||
|
|||||||
@ -33,6 +33,8 @@ DMatrix
|
|||||||
.. doxygengroup:: DMatrix
|
.. doxygengroup:: DMatrix
|
||||||
:project: xgboost
|
:project: xgboost
|
||||||
|
|
||||||
|
.. _c_streaming:
|
||||||
|
|
||||||
Streaming
|
Streaming
|
||||||
---------
|
---------
|
||||||
|
|
||||||
|
|||||||
11
doc/conf.py
11
doc/conf.py
@ -19,7 +19,6 @@ import sys
|
|||||||
import tarfile
|
import tarfile
|
||||||
import urllib.request
|
import urllib.request
|
||||||
import warnings
|
import warnings
|
||||||
from subprocess import call
|
|
||||||
from urllib.error import HTTPError
|
from urllib.error import HTTPError
|
||||||
|
|
||||||
from sh.contrib import git
|
from sh.contrib import git
|
||||||
@ -148,12 +147,20 @@ extensions = [
|
|||||||
|
|
||||||
sphinx_gallery_conf = {
|
sphinx_gallery_conf = {
|
||||||
# path to your example scripts
|
# path to your example scripts
|
||||||
"examples_dirs": ["../demo/guide-python", "../demo/dask", "../demo/aft_survival"],
|
"examples_dirs": [
|
||||||
|
"../demo/guide-python",
|
||||||
|
"../demo/dask",
|
||||||
|
"../demo/aft_survival",
|
||||||
|
"../demo/gpu_acceleration",
|
||||||
|
"../demo/rmm_plugin"
|
||||||
|
],
|
||||||
# path to where to save gallery generated output
|
# path to where to save gallery generated output
|
||||||
"gallery_dirs": [
|
"gallery_dirs": [
|
||||||
"python/examples",
|
"python/examples",
|
||||||
"python/dask-examples",
|
"python/dask-examples",
|
||||||
"python/survival-examples",
|
"python/survival-examples",
|
||||||
|
"python/gpu-examples",
|
||||||
|
"python/rmm-examples",
|
||||||
],
|
],
|
||||||
"matplotlib_animations": True,
|
"matplotlib_animations": True,
|
||||||
}
|
}
|
||||||
|
|||||||
@ -32,7 +32,7 @@ GitHub Actions is also used to build Python wheels targeting MacOS Intel and App
|
|||||||
``python_wheels`` pipeline sets up environment variables prefixed ``CIBW_*`` to indicate the target
|
``python_wheels`` pipeline sets up environment variables prefixed ``CIBW_*`` to indicate the target
|
||||||
OS and processor. The pipeline then invokes the script ``build_python_wheels.sh``, which in turns
|
OS and processor. The pipeline then invokes the script ``build_python_wheels.sh``, which in turns
|
||||||
calls ``cibuildwheel`` to build the wheel. The ``cibuildwheel`` is a library that sets up a
|
calls ``cibuildwheel`` to build the wheel. The ``cibuildwheel`` is a library that sets up a
|
||||||
suitable Python environment for each OS and processor target. Since we don't have Apple Silion
|
suitable Python environment for each OS and processor target. Since we don't have Apple Silicon
|
||||||
machine in GitHub Actions, cross-compilation is needed; ``cibuildwheel`` takes care of the complex
|
machine in GitHub Actions, cross-compilation is needed; ``cibuildwheel`` takes care of the complex
|
||||||
task of cross-compiling a Python wheel. (Note that ``cibuildwheel`` will call
|
task of cross-compiling a Python wheel. (Note that ``cibuildwheel`` will call
|
||||||
``pip wheel``. Since XGBoost has a native library component, we created a customized build
|
``pip wheel``. Since XGBoost has a native library component, we created a customized build
|
||||||
@ -131,7 +131,7 @@ set up a credential pair in order to provision resources on AWS. See
|
|||||||
Worker Image Pipeline
|
Worker Image Pipeline
|
||||||
=====================
|
=====================
|
||||||
Building images for worker machines used to be a chore: you'd provision an EC2 machine, SSH into it, and
|
Building images for worker machines used to be a chore: you'd provision an EC2 machine, SSH into it, and
|
||||||
manually install the necessary packages. This process is not only laborous but also error-prone. You may
|
manually install the necessary packages. This process is not only laborious but also error-prone. You may
|
||||||
forget to install a package or change a system configuration.
|
forget to install a package or change a system configuration.
|
||||||
|
|
||||||
No more. Now we have an automated pipeline for building images for worker machines.
|
No more. Now we have an automated pipeline for building images for worker machines.
|
||||||
|
|||||||
@ -100,7 +100,7 @@ two automatic checks to enforce coding style conventions. To expedite the code r
|
|||||||
|
|
||||||
Linter
|
Linter
|
||||||
======
|
======
|
||||||
We use `pylint <https://github.com/PyCQA/pylint>`_ and `cpplint <https://github.com/cpplint/cpplint>`_ to enforce style convention and find potential errors. Linting is especially useful for Python, as we can catch many errors that would have otherwise occured at run-time.
|
We use `pylint <https://github.com/PyCQA/pylint>`_ and `cpplint <https://github.com/cpplint/cpplint>`_ to enforce style convention and find potential errors. Linting is especially useful for Python, as we can catch many errors that would have otherwise occurred at run-time.
|
||||||
|
|
||||||
To run this check locally, run the following command from the top level source tree:
|
To run this check locally, run the following command from the top level source tree:
|
||||||
|
|
||||||
|
|||||||
@ -29,7 +29,7 @@ The Project Management Committee (PMC) of the XGBoost project appointed `Open So
|
|||||||
|
|
||||||
All expenses incurred for hosting CI will be submitted to the fiscal host with receipts. Only the expenses in the following categories will be approved for reimbursement:
|
All expenses incurred for hosting CI will be submitted to the fiscal host with receipts. Only the expenses in the following categories will be approved for reimbursement:
|
||||||
|
|
||||||
* Cloud exprenses for the cloud test farm (https://buildkite.com/xgboost)
|
* Cloud expenses for the cloud test farm (https://buildkite.com/xgboost)
|
||||||
* Cost of domain https://xgboost-ci.net
|
* Cost of domain https://xgboost-ci.net
|
||||||
* Monthly cost of using BuildKite
|
* Monthly cost of using BuildKite
|
||||||
* Hosting cost of the User Forum (https://discuss.xgboost.ai)
|
* Hosting cost of the User Forum (https://discuss.xgboost.ai)
|
||||||
|
|||||||
@ -169,7 +169,7 @@ supply a specified SANITIZER_PATH.
|
|||||||
|
|
||||||
How to use sanitizers with CUDA support
|
How to use sanitizers with CUDA support
|
||||||
=======================================
|
=======================================
|
||||||
Runing XGBoost on CUDA with address sanitizer (asan) will raise memory error.
|
Running XGBoost on CUDA with address sanitizer (asan) will raise memory error.
|
||||||
To use asan with CUDA correctly, you need to configure asan via ASAN_OPTIONS
|
To use asan with CUDA correctly, you need to configure asan via ASAN_OPTIONS
|
||||||
environment variable:
|
environment variable:
|
||||||
|
|
||||||
|
|||||||
@ -63,7 +63,7 @@ XGBoost supports missing values by default.
|
|||||||
In tree algorithms, branch directions for missing values are learned during training.
|
In tree algorithms, branch directions for missing values are learned during training.
|
||||||
Note that the gblinear booster treats missing values as zeros.
|
Note that the gblinear booster treats missing values as zeros.
|
||||||
|
|
||||||
When the ``missing`` parameter is specifed, values in the input predictor that is equal to
|
When the ``missing`` parameter is specified, values in the input predictor that is equal to
|
||||||
``missing`` will be treated as missing and removed. By default it's set to ``NaN``.
|
``missing`` will be treated as missing and removed. By default it's set to ``NaN``.
|
||||||
|
|
||||||
**************************************
|
**************************************
|
||||||
|
|||||||
@ -14,53 +14,46 @@ Most of the algorithms in XGBoost including training, prediction and evaluation
|
|||||||
|
|
||||||
Usage
|
Usage
|
||||||
=====
|
=====
|
||||||
Specify the ``tree_method`` parameter as ``gpu_hist``. For details around the ``tree_method`` parameter, see :doc:`tree method </treemethod>`.
|
|
||||||
|
|
||||||
Supported parameters
|
|
||||||
--------------------
|
|
||||||
|
|
||||||
GPU accelerated prediction is enabled by default for the above mentioned ``tree_method`` parameters but can be switched to CPU prediction by setting ``predictor`` to ``cpu_predictor``. This could be useful if you want to conserve GPU memory. Likewise when using CPU algorithms, GPU accelerated prediction can be enabled by setting ``predictor`` to ``gpu_predictor``.
|
|
||||||
|
|
||||||
The device ordinal (which GPU to use if you have many of them) can be selected using the
|
|
||||||
``gpu_id`` parameter, which defaults to 0 (the first device reported by CUDA runtime).
|
|
||||||
|
|
||||||
|
To enable GPU acceleration, specify the ``device`` parameter as ``cuda``. In addition, the device ordinal (which GPU to use if you have multiple devices in the same node) can be specified using the ``cuda:<ordinal>`` syntax, where ``<ordinal>`` is an integer that represents the device ordinal. XGBoost defaults to 0 (the first device reported by CUDA runtime).
|
||||||
|
|
||||||
The GPU algorithms currently work with CLI, Python, R, and JVM packages. See :doc:`/install` for details.
|
The GPU algorithms currently work with CLI, Python, R, and JVM packages. See :doc:`/install` for details.
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
:caption: Python example
|
:caption: Python example
|
||||||
|
|
||||||
param['gpu_id'] = 0
|
params = dict()
|
||||||
param['tree_method'] = 'gpu_hist'
|
params["device"] = "cuda"
|
||||||
|
params["tree_method"] = "hist"
|
||||||
|
Xy = xgboost.QuantileDMatrix(X, y)
|
||||||
|
xgboost.train(params, Xy)
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
:caption: With Scikit-Learn interface
|
:caption: With the Scikit-Learn interface
|
||||||
|
|
||||||
XGBRegressor(tree_method='gpu_hist', gpu_id=0)
|
|
||||||
|
|
||||||
|
XGBRegressor(tree_method="hist", device="cuda")
|
||||||
|
|
||||||
GPU-Accelerated SHAP values
|
GPU-Accelerated SHAP values
|
||||||
=============================
|
=============================
|
||||||
XGBoost makes use of `GPUTreeShap <https://github.com/rapidsai/gputreeshap>`_ as a backend for computing shap values when the GPU predictor is selected.
|
XGBoost makes use of `GPUTreeShap <https://github.com/rapidsai/gputreeshap>`_ as a backend for computing shap values when the GPU is used.
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
model.set_param({"predictor": "gpu_predictor"})
|
booster.set_param({"device": "cuda:0"})
|
||||||
shap_values = model.predict(dtrain, pred_contribs=True)
|
shap_values = booster.predict(dtrain, pred_contribs=True)
|
||||||
shap_interaction_values = model.predict(dtrain, pred_interactions=True)
|
shap_interaction_values = model.predict(dtrain, pred_interactions=True)
|
||||||
|
|
||||||
See examples `here
|
See :ref:`sphx_glr_python_gpu-examples_tree_shap.py` for a worked example.
|
||||||
<https://github.com/dmlc/xgboost/tree/master/demo/gpu_acceleration>`__.
|
|
||||||
|
|
||||||
Multi-node Multi-GPU Training
|
Multi-node Multi-GPU Training
|
||||||
=============================
|
=============================
|
||||||
|
|
||||||
XGBoost supports fully distributed GPU training using `Dask <https://dask.org/>`_, ``Spark`` and ``PySpark``. For getting started with Dask see our tutorial :doc:`/tutorials/dask` and worked examples `here <https://github.com/dmlc/xgboost/tree/master/demo/dask>`__, also Python documentation :ref:`dask_api` for complete reference. For usage with ``Spark`` using Scala see :doc:`/jvm/xgboost4j_spark_gpu_tutorial`. Lastly for distributed GPU training with ``PySpark``, see :doc:`/tutorials/spark_estimator`.
|
XGBoost supports fully distributed GPU training using `Dask <https://dask.org/>`_, ``Spark`` and ``PySpark``. For getting started with Dask see our tutorial :doc:`/tutorials/dask` and worked examples :doc:`/python/dask-examples/index`, also Python documentation :ref:`dask_api` for complete reference. For usage with ``Spark`` using Scala see :doc:`/jvm/xgboost4j_spark_gpu_tutorial`. Lastly for distributed GPU training with ``PySpark``, see :doc:`/tutorials/spark_estimator`.
|
||||||
|
|
||||||
|
|
||||||
Memory usage
|
Memory usage
|
||||||
============
|
============
|
||||||
The following are some guidelines on the device memory usage of the `gpu_hist` tree method.
|
The following are some guidelines on the device memory usage of the ``hist`` tree method on GPU.
|
||||||
|
|
||||||
Memory inside xgboost training is generally allocated for two reasons - storing the dataset and working memory.
|
Memory inside xgboost training is generally allocated for two reasons - storing the dataset and working memory.
|
||||||
|
|
||||||
@ -73,12 +66,13 @@ If you are getting out-of-memory errors on a big dataset, try the or :py:class:`
|
|||||||
|
|
||||||
CPU-GPU Interoperability
|
CPU-GPU Interoperability
|
||||||
========================
|
========================
|
||||||
XGBoost models trained on GPUs can be used on CPU-only systems to generate predictions. For information about how to save and load an XGBoost model, see :doc:`/tutorials/saving_model`.
|
|
||||||
|
The model can be used on any device regardless of the one used to train it. For instance, a model trained using GPU can still work on a CPU-only machine and vice versa. For more information about model serialization, see :doc:`/tutorials/saving_model`.
|
||||||
|
|
||||||
|
|
||||||
Developer notes
|
Developer notes
|
||||||
===============
|
===============
|
||||||
The application may be profiled with annotations by specifying USE_NTVX to cmake. Regions covered by the 'Monitor' class in CUDA code will automatically appear in the nsight profiler when `verbosity` is set to 3.
|
The application may be profiled with annotations by specifying ``USE_NTVX`` to cmake. Regions covered by the 'Monitor' class in CUDA code will automatically appear in the nsight profiler when `verbosity` is set to 3.
|
||||||
|
|
||||||
**********
|
**********
|
||||||
References
|
References
|
||||||
|
|||||||
@ -3,10 +3,10 @@ Installation Guide
|
|||||||
##################
|
##################
|
||||||
|
|
||||||
XGBoost provides binary packages for some language bindings. The binary packages support
|
XGBoost provides binary packages for some language bindings. The binary packages support
|
||||||
the GPU algorithm (``gpu_hist``) on machines with NVIDIA GPUs. Please note that **training
|
the GPU algorithm (``device=cuda:0``) on machines with NVIDIA GPUs. Please note that
|
||||||
with multiple GPUs is only supported for Linux platform**. See :doc:`gpu/index`. Also we
|
**training with multiple GPUs is only supported for Linux platform**. See
|
||||||
have both stable releases and nightly builds, see below for how to install them. For
|
:doc:`gpu/index`. Also we have both stable releases and nightly builds, see below for how
|
||||||
building from source, visit :doc:`this page </build>`.
|
to install them. For building from source, visit :doc:`this page </build>`.
|
||||||
|
|
||||||
.. contents:: Contents
|
.. contents:: Contents
|
||||||
|
|
||||||
@ -189,7 +189,7 @@ This will check out the latest stable version from the Maven Central.
|
|||||||
|
|
||||||
For the latest release version number, please check `release page <https://github.com/dmlc/xgboost/releases>`_.
|
For the latest release version number, please check `release page <https://github.com/dmlc/xgboost/releases>`_.
|
||||||
|
|
||||||
To enable the GPU algorithm (``tree_method='gpu_hist'``), use artifacts ``xgboost4j-gpu_2.12`` and ``xgboost4j-spark-gpu_2.12`` instead (note the ``gpu`` suffix).
|
To enable the GPU algorithm (``device='cuda'``), use artifacts ``xgboost4j-gpu_2.12`` and ``xgboost4j-spark-gpu_2.12`` instead (note the ``gpu`` suffix).
|
||||||
|
|
||||||
|
|
||||||
.. note:: Windows not supported in the JVM package
|
.. note:: Windows not supported in the JVM package
|
||||||
@ -325,4 +325,4 @@ The SNAPSHOT JARs are hosted by the XGBoost project. Every commit in the ``maste
|
|||||||
|
|
||||||
You can browse the file listing of the Maven repository at https://s3-us-west-2.amazonaws.com/xgboost-maven-repo/list.html.
|
You can browse the file listing of the Maven repository at https://s3-us-west-2.amazonaws.com/xgboost-maven-repo/list.html.
|
||||||
|
|
||||||
To enable the GPU algorithm (``tree_method='gpu_hist'``), use artifacts ``xgboost4j-gpu_2.12`` and ``xgboost4j-spark-gpu_2.12`` instead (note the ``gpu`` suffix).
|
To enable the GPU algorithm (``device='cuda'``), use artifacts ``xgboost4j-gpu_2.12`` and ``xgboost4j-spark-gpu_2.12`` instead (note the ``gpu`` suffix).
|
||||||
|
|||||||
@ -129,7 +129,7 @@ With parameters and data, you are able to train a booster model.
|
|||||||
|
|
||||||
booster.saveModel("model.bin");
|
booster.saveModel("model.bin");
|
||||||
|
|
||||||
* Generaing model dump with feature map
|
* Generating model dump with feature map
|
||||||
|
|
||||||
.. code-block:: java
|
.. code-block:: java
|
||||||
|
|
||||||
|
|||||||
@ -121,7 +121,7 @@ To train a XGBoost model for classification, we need to claim a XGBoostClassifie
|
|||||||
"objective" -> "multi:softprob",
|
"objective" -> "multi:softprob",
|
||||||
"num_class" -> 3,
|
"num_class" -> 3,
|
||||||
"num_round" -> 100,
|
"num_round" -> 100,
|
||||||
"tree_method" -> "gpu_hist",
|
"device" -> "cuda",
|
||||||
"num_workers" -> 1)
|
"num_workers" -> 1)
|
||||||
|
|
||||||
val featuresNames = schema.fieldNames.filter(name => name != labelName)
|
val featuresNames = schema.fieldNames.filter(name => name != labelName)
|
||||||
@ -130,15 +130,14 @@ To train a XGBoost model for classification, we need to claim a XGBoostClassifie
|
|||||||
.setFeaturesCol(featuresNames)
|
.setFeaturesCol(featuresNames)
|
||||||
.setLabelCol(labelName)
|
.setLabelCol(labelName)
|
||||||
|
|
||||||
The available parameters for training a XGBoost model can be found in :doc:`here </parameter>`.
|
The ``device`` parameter is for informing XGBoost that CUDA devices should be used instead of CPU. Unlike the single-node mode, GPUs are managed by spark instead of by XGBoost. Therefore, explicitly specified device ordinal like ``cuda:1`` is not support.
|
||||||
Similar to the XGBoost4J-Spark package, in addition to the default set of parameters,
|
|
||||||
XGBoost4J-Spark-GPU also supports the camel-case variant of these parameters to be
|
The available parameters for training a XGBoost model can be found in :doc:`here </parameter>`. Similar to the XGBoost4J-Spark package, in addition to the default set of parameters, XGBoost4J-Spark-GPU also supports the camel-case variant of these parameters to be consistent with Spark's MLlib naming convention.
|
||||||
consistent with Spark's MLlib naming convention.
|
|
||||||
|
|
||||||
Specifically, each parameter in :doc:`this page </parameter>` has its equivalent form in
|
Specifically, each parameter in :doc:`this page </parameter>` has its equivalent form in
|
||||||
XGBoost4J-Spark-GPU with camel case. For example, to set ``max_depth`` for each tree, you can pass
|
XGBoost4J-Spark-GPU with camel case. For example, to set ``max_depth`` for each tree, you
|
||||||
parameter just like what we did in the above code snippet (as ``max_depth`` wrapped in a Map), or
|
can pass parameter just like what we did in the above code snippet (as ``max_depth``
|
||||||
you can do it through setters in XGBoostClassifer:
|
wrapped in a Map), or you can do it through setters in XGBoostClassifer:
|
||||||
|
|
||||||
.. code-block:: scala
|
.. code-block:: scala
|
||||||
|
|
||||||
|
|||||||
@ -390,39 +390,6 @@ Then we can load this model with single node Python XGBoost:
|
|||||||
bst = xgb.Booster({'nthread': 4})
|
bst = xgb.Booster({'nthread': 4})
|
||||||
bst.load_model(nativeModelPath)
|
bst.load_model(nativeModelPath)
|
||||||
|
|
||||||
.. note:: Using HDFS and S3 for exporting the models with nativeBooster.saveModel()
|
|
||||||
|
|
||||||
When interacting with other language bindings, XGBoost also supports saving-models-to and loading-models-from file systems other than the local one. You can use HDFS and S3 by prefixing the path with ``hdfs://`` and ``s3://`` respectively. However, for this capability, you must do **one** of the following:
|
|
||||||
|
|
||||||
1. Build XGBoost4J-Spark with the steps described in :ref:`here <install_jvm_packages>`, but turning `USE_HDFS <https://github.com/dmlc/xgboost/blob/e939192978a0c152ad7b49b744630e99d54cffa8/jvm-packages/create_jni.py#L18>`_ (or USE_S3, etc. in the same place) switch on. With this approach, you can reuse the above code example by replacing "nativeModelPath" with a HDFS path.
|
|
||||||
|
|
||||||
- However, if you build with USE_HDFS, etc. you have to ensure that the involved shared object file, e.g. libhdfs.so, is put in the LIBRARY_PATH of your cluster. To avoid the complicated cluster environment configuration, choose the other option.
|
|
||||||
|
|
||||||
2. Use bindings of HDFS, S3, etc. to pass model files around. Here are the steps (taking HDFS as an example):
|
|
||||||
|
|
||||||
- Create a new file with
|
|
||||||
|
|
||||||
.. code-block:: scala
|
|
||||||
|
|
||||||
val outputStream = fs.create("hdfs_path")
|
|
||||||
|
|
||||||
where "fs" is an instance of `org.apache.hadoop.fs.FileSystem <https://hadoop.apache.org/docs/stable/api/org/apache/hadoop/fs/FileSystem.html>`_ class in Hadoop.
|
|
||||||
|
|
||||||
- Pass the returned OutputStream in the first step to nativeBooster.saveModel():
|
|
||||||
|
|
||||||
.. code-block:: scala
|
|
||||||
|
|
||||||
xgbClassificationModel.nativeBooster.saveModel(outputStream)
|
|
||||||
|
|
||||||
- Download file in other languages from HDFS and load with the pre-built (without the requirement of libhdfs.so) version of XGBoost. (The function "download_from_hdfs" is a helper function to be implemented by the user)
|
|
||||||
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
import xgboost as xgb
|
|
||||||
bst = xgb.Booster({'nthread': 4})
|
|
||||||
local_path = download_from_hdfs("hdfs_path")
|
|
||||||
bst.load_model(local_path)
|
|
||||||
|
|
||||||
.. note:: Consistency issue between XGBoost4J-Spark and other bindings
|
.. note:: Consistency issue between XGBoost4J-Spark and other bindings
|
||||||
|
|
||||||
There is a consistency issue between XGBoost4J-Spark and other language bindings of XGBoost.
|
There is a consistency issue between XGBoost4J-Spark and other language bindings of XGBoost.
|
||||||
|
|||||||
@ -34,6 +34,20 @@ General Parameters
|
|||||||
|
|
||||||
- Which booster to use. Can be ``gbtree``, ``gblinear`` or ``dart``; ``gbtree`` and ``dart`` use tree based models while ``gblinear`` uses linear functions.
|
- Which booster to use. Can be ``gbtree``, ``gblinear`` or ``dart``; ``gbtree`` and ``dart`` use tree based models while ``gblinear`` uses linear functions.
|
||||||
|
|
||||||
|
* ``device`` [default= ``cpu``]
|
||||||
|
|
||||||
|
.. versionadded:: 2.0.0
|
||||||
|
|
||||||
|
- Device for XGBoost to run. User can set it to one of the following values:
|
||||||
|
|
||||||
|
+ ``cpu``: Use CPU.
|
||||||
|
+ ``cuda``: Use a GPU (CUDA device).
|
||||||
|
+ ``cuda:<ordinal>``: ``<ordinal>`` is an integer that specifies the ordinal of the GPU (which GPU do you want to use if you have more than one devices).
|
||||||
|
+ ``gpu``: Default GPU device selection from the list of available and supported devices. Only ``cuda`` devices are supported currently.
|
||||||
|
+ ``gpu:<ordinal>``: Default GPU device selection from the list of available and supported devices. Only ``cuda`` devices are supported currently.
|
||||||
|
|
||||||
|
For more information about GPU acceleration, see :doc:`/gpu/index`. In distributed environments, ordinal selection is handled by distributed frameworks instead of XGBoost. As a result, using ``cuda:<ordinal>`` will result in an error. Use ``cuda`` instead.
|
||||||
|
|
||||||
* ``verbosity`` [default=1]
|
* ``verbosity`` [default=1]
|
||||||
|
|
||||||
- Verbosity of printing messages. Valid values are 0 (silent), 1 (warning), 2 (info), 3
|
- Verbosity of printing messages. Valid values are 0 (silent), 1 (warning), 2 (info), 3
|
||||||
@ -44,7 +58,7 @@ General Parameters
|
|||||||
* ``validate_parameters`` [default to ``false``, except for Python, R and CLI interface]
|
* ``validate_parameters`` [default to ``false``, except for Python, R and CLI interface]
|
||||||
|
|
||||||
- When set to True, XGBoost will perform validation of input parameters to check whether
|
- When set to True, XGBoost will perform validation of input parameters to check whether
|
||||||
a parameter is used or not.
|
a parameter is used or not. A warning is emitted when there's unknown parameter.
|
||||||
|
|
||||||
* ``nthread`` [default to maximum number of threads available if not set]
|
* ``nthread`` [default to maximum number of threads available if not set]
|
||||||
|
|
||||||
@ -55,10 +69,6 @@ General Parameters
|
|||||||
|
|
||||||
- Flag to disable default metric. Set to 1 or ``true`` to disable.
|
- Flag to disable default metric. Set to 1 or ``true`` to disable.
|
||||||
|
|
||||||
* ``num_feature`` [set automatically by XGBoost, no need to be set by user]
|
|
||||||
|
|
||||||
- Feature dimension used in boosting, set to maximum dimension of the feature
|
|
||||||
|
|
||||||
Parameters for Tree Booster
|
Parameters for Tree Booster
|
||||||
===========================
|
===========================
|
||||||
* ``eta`` [default=0.3, alias: ``learning_rate``]
|
* ``eta`` [default=0.3, alias: ``learning_rate``]
|
||||||
@ -99,7 +109,7 @@ Parameters for Tree Booster
|
|||||||
- ``gradient_based``: the selection probability for each training instance is proportional to the
|
- ``gradient_based``: the selection probability for each training instance is proportional to the
|
||||||
*regularized absolute value* of gradients (more specifically, :math:`\sqrt{g^2+\lambda h^2}`).
|
*regularized absolute value* of gradients (more specifically, :math:`\sqrt{g^2+\lambda h^2}`).
|
||||||
``subsample`` may be set to as low as 0.1 without loss of model accuracy. Note that this
|
``subsample`` may be set to as low as 0.1 without loss of model accuracy. Note that this
|
||||||
sampling method is only supported when ``tree_method`` is set to ``gpu_hist``; other tree
|
sampling method is only supported when ``tree_method`` is set to ``hist`` and the device is ``cuda``; other tree
|
||||||
methods only support ``uniform`` sampling.
|
methods only support ``uniform`` sampling.
|
||||||
|
|
||||||
* ``colsample_bytree``, ``colsample_bylevel``, ``colsample_bynode`` [default=1]
|
* ``colsample_bytree``, ``colsample_bylevel``, ``colsample_bynode`` [default=1]
|
||||||
@ -131,26 +141,15 @@ Parameters for Tree Booster
|
|||||||
* ``tree_method`` string [default= ``auto``]
|
* ``tree_method`` string [default= ``auto``]
|
||||||
|
|
||||||
- The tree construction algorithm used in XGBoost. See description in the `reference paper <http://arxiv.org/abs/1603.02754>`_ and :doc:`treemethod`.
|
- The tree construction algorithm used in XGBoost. See description in the `reference paper <http://arxiv.org/abs/1603.02754>`_ and :doc:`treemethod`.
|
||||||
- XGBoost supports ``approx``, ``hist`` and ``gpu_hist`` for distributed training. Experimental support for external memory is available for ``approx`` and ``gpu_hist``.
|
|
||||||
|
|
||||||
- Choices: ``auto``, ``exact``, ``approx``, ``hist``, ``gpu_hist``, this is a
|
- Choices: ``auto``, ``exact``, ``approx``, ``hist``, this is a combination of commonly
|
||||||
combination of commonly used updaters. For other updaters like ``refresh``, set the
|
used updaters. For other updaters like ``refresh``, set the parameter ``updater``
|
||||||
parameter ``updater`` directly.
|
directly.
|
||||||
|
|
||||||
- ``auto``: Use heuristic to choose the fastest method.
|
- ``auto``: Same as the ``hist`` tree method.
|
||||||
|
|
||||||
- For small dataset, exact greedy (``exact``) will be used.
|
|
||||||
- For larger dataset, approximate algorithm (``approx``) will be chosen. It's
|
|
||||||
recommended to try ``hist`` and ``gpu_hist`` for higher performance with large
|
|
||||||
dataset.
|
|
||||||
(``gpu_hist``)has support for ``external memory``.
|
|
||||||
|
|
||||||
- Because old behavior is always use exact greedy in single machine, user will get a
|
|
||||||
message when approximate algorithm is chosen to notify this choice.
|
|
||||||
- ``exact``: Exact greedy algorithm. Enumerates all split candidates.
|
- ``exact``: Exact greedy algorithm. Enumerates all split candidates.
|
||||||
- ``approx``: Approximate greedy algorithm using quantile sketch and gradient histogram.
|
- ``approx``: Approximate greedy algorithm using quantile sketch and gradient histogram.
|
||||||
- ``hist``: Faster histogram optimized approximate greedy algorithm.
|
- ``hist``: Faster histogram optimized approximate greedy algorithm.
|
||||||
- ``gpu_hist``: GPU implementation of ``hist`` algorithm.
|
|
||||||
|
|
||||||
* ``scale_pos_weight`` [default=1]
|
* ``scale_pos_weight`` [default=1]
|
||||||
|
|
||||||
@ -163,7 +162,8 @@ Parameters for Tree Booster
|
|||||||
- ``grow_colmaker``: non-distributed column-based construction of trees.
|
- ``grow_colmaker``: non-distributed column-based construction of trees.
|
||||||
- ``grow_histmaker``: distributed tree construction with row-based data splitting based on global proposal of histogram counting.
|
- ``grow_histmaker``: distributed tree construction with row-based data splitting based on global proposal of histogram counting.
|
||||||
- ``grow_quantile_histmaker``: Grow tree using quantized histogram.
|
- ``grow_quantile_histmaker``: Grow tree using quantized histogram.
|
||||||
- ``grow_gpu_hist``: Grow tree with GPU.
|
- ``grow_gpu_hist``: Enabled when ``tree_method`` is set to ``hist`` along with ``device=cuda``.
|
||||||
|
- ``grow_gpu_approx``: Enabled when ``tree_method`` is set to ``approx`` along with ``device=cuda``.
|
||||||
- ``sync``: synchronizes trees in all distributed nodes.
|
- ``sync``: synchronizes trees in all distributed nodes.
|
||||||
- ``refresh``: refreshes tree's statistics and/or leaf values based on the current data. Note that no random subsampling of data rows is performed.
|
- ``refresh``: refreshes tree's statistics and/or leaf values based on the current data. Note that no random subsampling of data rows is performed.
|
||||||
- ``prune``: prunes the splits where loss < min_split_loss (or gamma) and nodes that have depth greater than ``max_depth``.
|
- ``prune``: prunes the splits where loss < min_split_loss (or gamma) and nodes that have depth greater than ``max_depth``.
|
||||||
@ -183,7 +183,7 @@ Parameters for Tree Booster
|
|||||||
* ``grow_policy`` [default= ``depthwise``]
|
* ``grow_policy`` [default= ``depthwise``]
|
||||||
|
|
||||||
- Controls a way new nodes are added to the tree.
|
- Controls a way new nodes are added to the tree.
|
||||||
- Currently supported only if ``tree_method`` is set to ``hist``, ``approx`` or ``gpu_hist``.
|
- Currently supported only if ``tree_method`` is set to ``hist`` or ``approx``.
|
||||||
- Choices: ``depthwise``, ``lossguide``
|
- Choices: ``depthwise``, ``lossguide``
|
||||||
|
|
||||||
- ``depthwise``: split at nodes closest to the root.
|
- ``depthwise``: split at nodes closest to the root.
|
||||||
@ -195,22 +195,10 @@ Parameters for Tree Booster
|
|||||||
|
|
||||||
* ``max_bin``, [default=256]
|
* ``max_bin``, [default=256]
|
||||||
|
|
||||||
- Only used if ``tree_method`` is set to ``hist``, ``approx`` or ``gpu_hist``.
|
- Only used if ``tree_method`` is set to ``hist`` or ``approx``.
|
||||||
- Maximum number of discrete bins to bucket continuous features.
|
- Maximum number of discrete bins to bucket continuous features.
|
||||||
- Increasing this number improves the optimality of splits at the cost of higher computation time.
|
- Increasing this number improves the optimality of splits at the cost of higher computation time.
|
||||||
|
|
||||||
* ``predictor``, [default= ``auto``]
|
|
||||||
|
|
||||||
- The type of predictor algorithm to use. Provides the same results but allows the use of GPU or CPU.
|
|
||||||
|
|
||||||
- ``auto``: Configure predictor based on heuristics.
|
|
||||||
- ``cpu_predictor``: Multicore CPU prediction algorithm.
|
|
||||||
- ``gpu_predictor``: Prediction using GPU. Used when ``tree_method`` is ``gpu_hist``.
|
|
||||||
When ``predictor`` is set to default value ``auto``, the ``gpu_hist`` tree method is
|
|
||||||
able to provide GPU based prediction without copying training data to GPU memory.
|
|
||||||
If ``gpu_predictor`` is explicitly specified, then all data is copied into GPU, only
|
|
||||||
recommended for performing prediction tasks.
|
|
||||||
|
|
||||||
* ``num_parallel_tree``, [default=1]
|
* ``num_parallel_tree``, [default=1]
|
||||||
|
|
||||||
- Number of parallel trees constructed during each iteration. This option is used to support boosted random forest.
|
- Number of parallel trees constructed during each iteration. This option is used to support boosted random forest.
|
||||||
@ -238,6 +226,15 @@ Parameters for Tree Booster
|
|||||||
- ``one_output_per_tree``: One model for each target.
|
- ``one_output_per_tree``: One model for each target.
|
||||||
- ``multi_output_tree``: Use multi-target trees.
|
- ``multi_output_tree``: Use multi-target trees.
|
||||||
|
|
||||||
|
* ``max_cached_hist_node``, [default = 65536]
|
||||||
|
|
||||||
|
Maximum number of cached nodes for CPU histogram.
|
||||||
|
|
||||||
|
.. versionadded:: 2.0.0
|
||||||
|
|
||||||
|
- For most of the cases this parameter should not be set except for growing deep trees
|
||||||
|
on CPU.
|
||||||
|
|
||||||
.. _cat-param:
|
.. _cat-param:
|
||||||
|
|
||||||
Parameters for Categorical Feature
|
Parameters for Categorical Feature
|
||||||
@ -332,7 +329,7 @@ Parameters for Linear Booster (``booster=gblinear``)
|
|||||||
- Choice of algorithm to fit linear model
|
- Choice of algorithm to fit linear model
|
||||||
|
|
||||||
- ``shotgun``: Parallel coordinate descent algorithm based on shotgun algorithm. Uses 'hogwild' parallelism and therefore produces a nondeterministic solution on each run.
|
- ``shotgun``: Parallel coordinate descent algorithm based on shotgun algorithm. Uses 'hogwild' parallelism and therefore produces a nondeterministic solution on each run.
|
||||||
- ``coord_descent``: Ordinary coordinate descent algorithm. Also multithreaded but still produces a deterministic solution.
|
- ``coord_descent``: Ordinary coordinate descent algorithm. Also multithreaded but still produces a deterministic solution. When the ``device`` parameter is set to ``cuda`` or ``gpu``, a GPU variant would be used.
|
||||||
|
|
||||||
* ``feature_selector`` [default= ``cyclic``]
|
* ``feature_selector`` [default= ``cyclic``]
|
||||||
|
|
||||||
@ -357,7 +354,7 @@ Specify the learning task and the corresponding learning objective. The objectiv
|
|||||||
|
|
||||||
- ``reg:squarederror``: regression with squared loss.
|
- ``reg:squarederror``: regression with squared loss.
|
||||||
- ``reg:squaredlogerror``: regression with squared log loss :math:`\frac{1}{2}[log(pred + 1) - log(label + 1)]^2`. All input labels are required to be greater than -1. Also, see metric ``rmsle`` for possible issue with this objective.
|
- ``reg:squaredlogerror``: regression with squared log loss :math:`\frac{1}{2}[log(pred + 1) - log(label + 1)]^2`. All input labels are required to be greater than -1. Also, see metric ``rmsle`` for possible issue with this objective.
|
||||||
- ``reg:logistic``: logistic regression.
|
- ``reg:logistic``: logistic regression, output probability
|
||||||
- ``reg:pseudohubererror``: regression with Pseudo Huber loss, a twice differentiable alternative to absolute loss.
|
- ``reg:pseudohubererror``: regression with Pseudo Huber loss, a twice differentiable alternative to absolute loss.
|
||||||
- ``reg:absoluteerror``: Regression with L1 error. When tree model is used, leaf value is refreshed after tree construction. If used in distributed training, the leaf value is calculated as the mean value from all workers, which is not guaranteed to be optimal.
|
- ``reg:absoluteerror``: Regression with L1 error. When tree model is used, leaf value is refreshed after tree construction. If used in distributed training, the leaf value is calculated as the mean value from all workers, which is not guaranteed to be optimal.
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user