Compare commits
151 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8aaabce7c9 | ||
|
|
14543176d1 | ||
|
|
afa6e086cc | ||
|
|
636ab6b522 | ||
|
|
6daa6ee4e0 | ||
|
|
4979991d5b | ||
|
|
02faddc5f3 | ||
|
|
844d7c1d5b | ||
|
|
3728855ce9 | ||
|
|
ef26bc45bf | ||
|
|
660be66207 | ||
|
|
92913aaf7f | ||
|
|
e4f5b6c84f | ||
|
|
f27b6f9ba6 | ||
|
|
c355ab65ed | ||
|
|
564b22cee5 | ||
|
|
a734f52807 | ||
|
|
73142041b9 | ||
|
|
9c1103e06c | ||
|
|
fcbedcedf8 | ||
|
|
29a4cfe400 | ||
|
|
397d8f0ee7 | ||
|
|
b809f5d8b8 | ||
|
|
ccd30e4491 | ||
|
|
b2827a80e1 | ||
|
|
d6d1035950 | ||
|
|
e1f22baf8c | ||
|
|
c245eb8755 | ||
|
|
93df871c8c | ||
|
|
c69a19e2b1 | ||
|
|
cfee9fae91 | ||
|
|
bb29ce2818 | ||
|
|
a2d86b8e4b | ||
|
|
e268fb0093 | ||
|
|
6a169cd41a | ||
|
|
468b1594d3 | ||
|
|
0676a19e70 | ||
|
|
ec02f40d42 | ||
|
|
8b04736b81 | ||
|
|
ca4e05660e | ||
|
|
a2f54963b6 | ||
|
|
1b1969f20d | ||
|
|
2809fb8b6f | ||
|
|
c90119eb67 | ||
|
|
88b64c8162 | ||
|
|
04f69b43e6 | ||
|
|
449ab79e0c | ||
|
|
b56c902841 | ||
|
|
a3db79df22 | ||
|
|
093e2227e3 | ||
|
|
4a0c8ef237 | ||
|
|
1334aca437 | ||
|
|
866a477319 | ||
|
|
bd653fad4c | ||
|
|
7d52c0b8c2 | ||
|
|
dc2950fd90 | ||
|
|
6671b42dd4 | ||
|
|
ad826e913f | ||
|
|
8bc595ea1e | ||
|
|
a1085396e2 | ||
|
|
9097e8f0d9 | ||
|
|
c362125d7b | ||
|
|
0012f2ef93 | ||
|
|
30e94ddd04 | ||
|
|
15800107ad | ||
|
|
a9313802ea | ||
|
|
5fc5ec539d | ||
|
|
939973630d | ||
|
|
86beb68ce8 | ||
|
|
459b175dc6 | ||
|
|
c218d8ffbf | ||
|
|
d0b86c75d9 | ||
|
|
29c6ad943a | ||
|
|
e86030c360 | ||
|
|
babcb996e7 | ||
|
|
15f40e51e9 | ||
|
|
6601a641d7 | ||
|
|
7f980e9f83 | ||
|
|
27a8e36fc3 | ||
|
|
13b10a6370 | ||
|
|
780de49ddb | ||
|
|
4942da64ae | ||
|
|
7146b91d5a | ||
|
|
dcf439932a | ||
|
|
1de36cdf1e | ||
|
|
d2231fc840 | ||
|
|
cd7d6f7d59 | ||
|
|
4b7e2b7bff | ||
|
|
abca9908ba | ||
|
|
3cf665d3ec | ||
|
|
760d5d0c3c | ||
|
|
8ca06ab329 | ||
|
|
b51124c158 | ||
|
|
761a5dbdfc | ||
|
|
21b671aa06 | ||
|
|
668e432e2d | ||
|
|
fc88105620 | ||
|
|
ab7a46a1a4 | ||
|
|
b745b7acce | ||
|
|
bb8c8df39d | ||
|
|
45a97ddf32 | ||
|
|
3ad4333b0e | ||
|
|
7a99f8f27f | ||
|
|
a931589c96 | ||
|
|
a38e7bd19c | ||
|
|
0dd97c206b | ||
|
|
1ba6706167 | ||
|
|
8d06878bf9 | ||
|
|
9775da02d9 | ||
|
|
5dc8e894c9 | ||
|
|
71a8b8c65a | ||
|
|
1b97eaf7a7 | ||
|
|
b81f8cbbc0 | ||
|
|
2ba8c13b69 | ||
|
|
9a5efffebe | ||
|
|
2d76d40dfd | ||
|
|
a461a9a90a | ||
|
|
0fd455e162 | ||
|
|
f2b8cd2922 | ||
|
|
e0509b3307 | ||
|
|
b0ed3f0a66 | ||
|
|
655cf17b60 | ||
|
|
70a91ec3ba | ||
|
|
cfae247231 | ||
|
|
d6b31df449 | ||
|
|
7ac7e8778f | ||
|
|
8aa8ef1031 | ||
|
|
bc96ceb8b2 | ||
|
|
b2b2c4e231 | ||
|
|
9f77c18b0d | ||
|
|
0110754a76 | ||
|
|
e433a379e4 | ||
|
|
7e32af5c21 | ||
|
|
ed2465cce4 | ||
|
|
8ca9744b07 | ||
|
|
c35cdecddd | ||
|
|
24ad9dec0b | ||
|
|
911a902835 | ||
|
|
29eeea709a | ||
|
|
2e0067e790 | ||
|
|
94828a7c0c | ||
|
|
84e395d91e | ||
|
|
595a00466d | ||
|
|
e4b74c4d22 | ||
|
|
c74216f22c | ||
|
|
71e7e3b96f | ||
|
|
a5cc112eea | ||
|
|
ed0216642f | ||
|
|
856b81c727 | ||
|
|
d7b45fbcaf | ||
|
|
fa26313feb |
38
.clang-tidy
38
.clang-tidy
@@ -1,21 +1,21 @@
|
||||
Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
|
||||
CheckOptions:
|
||||
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.StructCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypeAliasCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypedefCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypeTemplateParameterCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.MemberCase, value: lower_case }
|
||||
- { key: readability-identifier-naming.PrivateMemberSuffix, value: '_' }
|
||||
- { key: readability-identifier-naming.ProtectedMemberSuffix, value: '_' }
|
||||
- { key: readability-identifier-naming.EnumCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.EnumConstant, value: CamelCase }
|
||||
- { key: readability-identifier-naming.EnumConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.GlobalConstantCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.GlobalConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.StaticConstantCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.StaticConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.ConstexprVariableCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.ConstexprVariablePrefix, value: k }
|
||||
- { key: readability-identifier-naming.FunctionCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.NamespaceCase, value: lower_case }
|
||||
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.StructCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypeAliasCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypedefCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypeTemplateParameterCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.MemberCase, value: lower_case }
|
||||
- { key: readability-identifier-naming.PrivateMemberSuffix, value: '_' }
|
||||
- { key: readability-identifier-naming.ProtectedMemberSuffix, value: '_' }
|
||||
- { key: readability-identifier-naming.EnumCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.EnumConstant, value: CamelCase }
|
||||
- { key: readability-identifier-naming.EnumConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.GlobalConstantCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.GlobalConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.StaticConstantCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.StaticConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.ConstexprVariableCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.ConstexprVariablePrefix, value: k }
|
||||
- { key: readability-identifier-naming.FunctionCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.NamespaceCase, value: lower_case }
|
||||
|
||||
1
.github/FUNDING.yml
vendored
Normal file
1
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
open_collective: xgboost
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -65,7 +65,6 @@ nb-configuration*
|
||||
.pydevproject
|
||||
.settings/
|
||||
build
|
||||
config.mk
|
||||
/xgboost
|
||||
*.data
|
||||
build_plugin
|
||||
@@ -101,3 +100,6 @@ R-package/src/Makevars
|
||||
.idea
|
||||
*.iml
|
||||
/cmake-build-debug/
|
||||
|
||||
# GDB
|
||||
.gdb_history
|
||||
10
.travis.yml
10
.travis.yml
@@ -6,7 +6,7 @@ os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
osx_image: xcode10.3
|
||||
osx_image: xcode10.1
|
||||
dist: bionic
|
||||
|
||||
# Use Build Matrix to do lint and build seperately
|
||||
@@ -21,6 +21,10 @@ env:
|
||||
# cmake test
|
||||
- TASK=cmake_test
|
||||
|
||||
global:
|
||||
- secure: "PR16i9F8QtNwn99C5NDp8nptAS+97xwDtXEJJfEiEVhxPaaRkOp0MPWhogCaK0Eclxk1TqkgWbdXFknwGycX620AzZWa/A1K3gAs+GrpzqhnPMuoBJ0Z9qxXTbSJvCyvMbYwVrjaxc/zWqdMU8waWz8A7iqKGKs/SqbQ3rO6v7c="
|
||||
- secure: "dAGAjBokqm/0nVoLMofQni/fWIBcYSmdq4XvCBX1ZAMDsWnuOfz/4XCY6h2lEI1rVHZQ+UdZkc9PioOHGPZh5BnvE49/xVVWr9c4/61lrDOlkD01ZjSAeoV0fAZq+93V/wPl4QV+MM+Sem9hNNzFSbN5VsQLAiWCSapWsLdKzqA="
|
||||
|
||||
matrix:
|
||||
exclude:
|
||||
- os: linux
|
||||
@@ -44,7 +48,7 @@ addons:
|
||||
update: true
|
||||
|
||||
before_install:
|
||||
- source dmlc-core/scripts/travis/travis_setup_env.sh
|
||||
- source tests/travis/travis_setup_env.sh
|
||||
- if [ "${TASK}" != "python_sdist_test" ]; then export PYTHONPATH=${PYTHONPATH}:${PWD}/python-package; fi
|
||||
- echo "MAVEN_OPTS='-Xmx2g -XX:MaxPermSize=1024m -XX:ReservedCodeCacheSize=512m -Dorg.slf4j.simpleLogger.defaultLogLevel=error'" > ~/.mavenrc
|
||||
|
||||
@@ -60,7 +64,7 @@ cache:
|
||||
- ${HOME}/.cache/pip
|
||||
|
||||
before_cache:
|
||||
- dmlc-core/scripts/travis/travis_before_cache.sh
|
||||
- tests/travis/travis_before_cache.sh
|
||||
|
||||
after_failure:
|
||||
- tests/travis/travis_after_failure.sh
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
cmake_minimum_required(VERSION 3.12)
|
||||
project(xgboost LANGUAGES CXX C VERSION 1.0.0)
|
||||
cmake_minimum_required(VERSION 3.13)
|
||||
project(xgboost LANGUAGES CXX C VERSION 1.1.0)
|
||||
include(cmake/Utils.cmake)
|
||||
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
|
||||
cmake_policy(SET CMP0022 NEW)
|
||||
cmake_policy(SET CMP0079 NEW)
|
||||
cmake_policy(SET CMP0063 NEW)
|
||||
|
||||
if ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13))
|
||||
cmake_policy(SET CMP0077 NEW)
|
||||
@@ -23,6 +25,7 @@ set_default_configuration_release()
|
||||
#-- Options
|
||||
option(BUILD_C_DOC "Build documentation for C APIs using Doxygen." OFF)
|
||||
option(USE_OPENMP "Build with OpenMP support." ON)
|
||||
option(BUILD_STATIC_LIB "Build static library" OFF)
|
||||
## Bindings
|
||||
option(JVM_BINDINGS "Build JVM bindings" OFF)
|
||||
option(R_LIB "Build shared library for R package" OFF)
|
||||
@@ -34,6 +37,7 @@ option(USE_DMLC_GTEST "Use google tests bundled with dmlc-core submodule" OFF)
|
||||
option(USE_NVTX "Build with cuda profiling annotations. Developers only." OFF)
|
||||
set(NVTX_HEADER_DIR "" CACHE PATH "Path to the stand-alone nvtx header")
|
||||
option(RABIT_MOCK "Build rabit with mock" OFF)
|
||||
option(HIDE_CXX_SYMBOLS "Build shared library and hide all C++ symbols" OFF)
|
||||
## CUDA
|
||||
option(USE_CUDA "Build with GPU acceleration" OFF)
|
||||
option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF)
|
||||
@@ -93,6 +97,8 @@ if (USE_CUDA)
|
||||
message(STATUS "CUDA GEN_CODE: ${GEN_CODE}")
|
||||
endif (USE_CUDA)
|
||||
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
if (USE_OPENMP)
|
||||
if (APPLE)
|
||||
# Require CMake 3.16+ on Mac OSX, as previous versions of CMake had trouble locating
|
||||
@@ -122,6 +128,16 @@ if (RABIT_MOCK)
|
||||
else()
|
||||
list(APPEND LINKED_LIBRARIES_PRIVATE rabit)
|
||||
endif(RABIT_MOCK)
|
||||
foreach(lib rabit rabit_base rabit_empty rabit_mock rabit_mock_static)
|
||||
# Explicitly link dmlc to rabit, so that configured header (build_config.h)
|
||||
# from dmlc is correctly applied to rabit.
|
||||
if (TARGET ${lib})
|
||||
target_link_libraries(${lib} dmlc ${CMAKE_THREAD_LIBS_INIT})
|
||||
if (HIDE_CXX_SYMBOLS) # Hide all C++ symbols from Rabit
|
||||
set_target_properties(${lib} PROPERTIES CXX_VISIBILITY_PRESET hidden)
|
||||
endif (HIDE_CXX_SYMBOLS)
|
||||
endif (TARGET ${lib})
|
||||
endforeach()
|
||||
|
||||
# Exports some R specific definitions and objects
|
||||
if (R_LIB)
|
||||
@@ -129,12 +145,25 @@ if (R_LIB)
|
||||
endif (R_LIB)
|
||||
|
||||
# core xgboost
|
||||
list(APPEND LINKED_LIBRARIES_PRIVATE Threads::Threads ${CMAKE_THREAD_LIBS_INIT})
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/plugin)
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/src)
|
||||
target_link_libraries(objxgboost PUBLIC dmlc)
|
||||
set(XGBOOST_OBJ_SOURCES "${XGBOOST_OBJ_SOURCES};$<TARGET_OBJECTS:objxgboost>")
|
||||
|
||||
#-- Shared library
|
||||
add_library(xgboost SHARED ${XGBOOST_OBJ_SOURCES})
|
||||
#-- library
|
||||
if (BUILD_STATIC_LIB)
|
||||
add_library(xgboost STATIC ${XGBOOST_OBJ_SOURCES})
|
||||
else (BUILD_STATIC_LIB)
|
||||
add_library(xgboost SHARED ${XGBOOST_OBJ_SOURCES})
|
||||
endif (BUILD_STATIC_LIB)
|
||||
|
||||
#-- Hide all C++ symbols
|
||||
if (HIDE_CXX_SYMBOLS)
|
||||
set_target_properties(objxgboost PROPERTIES CXX_VISIBILITY_PRESET hidden)
|
||||
set_target_properties(xgboost PROPERTIES CXX_VISIBILITY_PRESET hidden)
|
||||
endif (HIDE_CXX_SYMBOLS)
|
||||
|
||||
target_include_directories(xgboost
|
||||
INTERFACE
|
||||
$<INSTALL_INTERFACE:${CMAKE_INSTALL_PREFIX}/include>
|
||||
|
||||
@@ -16,8 +16,8 @@ The Project Management Committee(PMC) consists group of active committers that m
|
||||
- Nan is a software engineer in Uber. He contributed mostly in JVM packages.
|
||||
* [Jiaming Yuan](https://github.com/trivialfis)
|
||||
- Jiaming contributed to the GPU algorithms. He has also introduced new abstractions to improve the quality of the C++ codebase.
|
||||
* [Hyunsu Cho](http://hyunsu-cho.io/), Amazon AI
|
||||
- Hyunsu is an applied scientist in Amazon AI. He is the maintainer of the XGBoost Python package. He also manages the Jenkins continuous integration system (https://xgboost-ci.net/). He is the initial author of the CPU 'hist' updater.
|
||||
* [Hyunsu Cho](http://hyunsu-cho.io/), NVIDIA
|
||||
- Hyunsu is the maintainer of the XGBoost Python package. He also manages the Jenkins continuous integration system (https://xgboost-ci.net/). He is the initial author of the CPU 'hist' updater.
|
||||
* [Rory Mitchell](https://github.com/RAMitchell), University of Waikato
|
||||
- Rory is a Ph.D. student at University of Waikato. He is the original creator of the GPU training algorithms. He improved the CMake build system and continuous integration.
|
||||
* [Hongliang Liu](https://github.com/phunterlau)
|
||||
|
||||
83
Jenkinsfile
vendored
83
Jenkinsfile
vendored
@@ -63,6 +63,7 @@ pipeline {
|
||||
parallel ([
|
||||
'build-cpu': { BuildCPU() },
|
||||
'build-cpu-rabit-mock': { BuildCPUMock() },
|
||||
'build-cpu-non-omp': { BuildCPUNonOmp() },
|
||||
'build-gpu-cuda9.0': { BuildCUDA(cuda_version: '9.0') },
|
||||
'build-gpu-cuda10.0': { BuildCUDA(cuda_version: '10.0') },
|
||||
'build-gpu-cuda10.1': { BuildCUDA(cuda_version: '10.1') },
|
||||
@@ -88,13 +89,23 @@ pipeline {
|
||||
'test-jvm-jdk8': { CrossTestJVMwithJDK(jdk_version: '8', spark_version: '2.4.3') },
|
||||
'test-jvm-jdk11': { CrossTestJVMwithJDK(jdk_version: '11') },
|
||||
'test-jvm-jdk12': { CrossTestJVMwithJDK(jdk_version: '12') },
|
||||
'test-r-3.4.4': { TestR(use_r35: false) },
|
||||
'test-r-3.5.3': { TestR(use_r35: true) }
|
||||
])
|
||||
}
|
||||
milestone ordinal: 4
|
||||
}
|
||||
}
|
||||
stage('Jenkins Linux: Deploy') {
|
||||
agent none
|
||||
steps {
|
||||
script {
|
||||
parallel ([
|
||||
'deploy-jvm-packages': { DeployJVMPackages(spark_version: '2.4.3') }
|
||||
])
|
||||
}
|
||||
milestone ordinal: 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,7 +130,7 @@ def ClangTidy() {
|
||||
echo "Running clang-tidy job..."
|
||||
def container_type = "clang_tidy"
|
||||
def docker_binary = "docker"
|
||||
def dockerArgs = "--build-arg CUDA_VERSION=9.2"
|
||||
def dockerArgs = "--build-arg CUDA_VERSION=10.1"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} ${dockerArgs} python3 tests/ci_build/tidy.py
|
||||
"""
|
||||
@@ -176,17 +187,22 @@ def BuildCPU() {
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} rm -fv dmlc-core/include/dmlc/build_config_default.h
|
||||
# This step is not necessary, but here we include it, to ensure that DMLC_CORE_USE_CMAKE flag is correctly propagated
|
||||
# We want to make sure that we use the configured header build/dmlc/build_config.h instead of include/dmlc/build_config_default.h.
|
||||
# See discussion at https://github.com/dmlc/xgboost/issues/5510
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh
|
||||
${dockerRun} ${container_type} ${docker_binary} build/testxgboost
|
||||
"""
|
||||
// Sanitizer test
|
||||
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e ASAN_SYMBOLIZER_PATH=/usr/bin/llvm-symbolizer -e ASAN_OPTIONS=symbolize=1 -e UBSAN_OPTIONS=print_stacktrace=1:log_path=ubsan_error.log --cap-add SYS_PTRACE'"
|
||||
def docker_args = "--build-arg CMAKE_VERSION=3.12"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak;undefined" \
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak;undefined" \
|
||||
-DCMAKE_BUILD_TYPE=Debug -DSANITIZER_PATH=/usr/lib/x86_64-linux-gnu/
|
||||
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} build/testxgboost
|
||||
"""
|
||||
|
||||
stash name: 'xgboost_cli', includes: 'xgboost'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
@@ -200,12 +216,28 @@ def BuildCPUMock() {
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_mock_cmake.sh
|
||||
"""
|
||||
echo 'Stashing rabit C++ test executable (xgboost)...'
|
||||
echo 'Stashing rabit C++ test executable (xgboost)...'
|
||||
stash name: 'xgboost_rabit_tests', includes: 'xgboost'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def BuildCPUNonOmp() {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Build CPU without OpenMP"
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh -DUSE_OPENMP=OFF
|
||||
"""
|
||||
echo "Running Non-OpenMP C++ test..."
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} build/testxgboost
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def BuildCUDA(args) {
|
||||
node('linux && cpu') {
|
||||
@@ -215,9 +247,9 @@ def BuildCUDA(args) {
|
||||
def docker_binary = "docker"
|
||||
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_CUDA=ON -DUSE_NCCL=ON -DOPEN_MP:BOOL=ON
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_CUDA=ON -DUSE_NCCL=ON -DOPEN_MP:BOOL=ON -DHIDE_CXX_SYMBOLS=ON
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal"
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} python3 tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux1_x86_64
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} python3 tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux2010_x86_64
|
||||
"""
|
||||
// Stash wheel for CUDA 9.0 target
|
||||
if (args.cuda_version == '9.0') {
|
||||
@@ -244,7 +276,7 @@ def BuildJVMPackages(args) {
|
||||
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_packages.sh ${args.spark_version}
|
||||
"""
|
||||
echo 'Stashing XGBoost4J JAR...'
|
||||
stash name: 'xgboost4j_jar', includes: 'jvm-packages/xgboost4j/target/*.jar,jvm-packages/xgboost4j-spark/target/*.jar,jvm-packages/xgboost4j-example/target/*.jar'
|
||||
stash name: 'xgboost4j_jar', includes: "jvm-packages/xgboost4j/target/*.jar,jvm-packages/xgboost4j-spark/target/*.jar,jvm-packages/xgboost4j-example/target/*.jar"
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
@@ -268,11 +300,13 @@ def TestPythonCPU() {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'xgboost_whl_cuda9'
|
||||
unstash name: 'srcs'
|
||||
unstash name: 'xgboost_cli'
|
||||
echo "Test Python CPU"
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu-py35
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
@@ -292,19 +326,25 @@ def TestPythonGPU(args) {
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh mgpu
|
||||
"""
|
||||
if (args.cuda_version != '9.0') {
|
||||
echo "Running tests with cuDF..."
|
||||
sh """
|
||||
${dockerRun} cudf ${docker_binary} ${docker_args} tests/ci_build/test_python.sh mgpu-cudf
|
||||
"""
|
||||
}
|
||||
} else {
|
||||
echo "Using a single GPU"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh gpu
|
||||
"""
|
||||
if (args.cuda_version != '9.0') {
|
||||
echo "Running tests with cuDF..."
|
||||
sh """
|
||||
${dockerRun} cudf ${docker_binary} ${docker_args} tests/ci_build/test_python.sh cudf
|
||||
"""
|
||||
}
|
||||
}
|
||||
// For CUDA 10.0 target, run cuDF tests too
|
||||
if (args.cuda_version == '10.0') {
|
||||
echo "Running tests with cuDF..."
|
||||
sh """
|
||||
${dockerRun} cudf ${docker_binary} ${docker_args} tests/ci_build/test_python.sh cudf
|
||||
"""
|
||||
}
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
@@ -379,3 +419,18 @@ def TestR(args) {
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def DeployJVMPackages(args) {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
|
||||
echo 'Deploying to xgboost-maven-repo S3 repo...'
|
||||
def container_type = "jvm"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/deploy_jvm_packages.sh ${args.spark_version}
|
||||
"""
|
||||
}
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,6 +96,7 @@ def BuildWin64() {
|
||||
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
|
||||
echo 'Stashing C++ test executable (testxgboost)...'
|
||||
stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost.exe'
|
||||
stash name: 'xgboost_cli', includes: 'xgboost.exe'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
@@ -104,12 +105,17 @@ def TestWin64CPU() {
|
||||
node('win64 && cpu') {
|
||||
unstash name: 'srcs'
|
||||
unstash name: 'xgboost_whl'
|
||||
unstash name: 'xgboost_cli'
|
||||
echo "Test Win64 CPU"
|
||||
echo "Installing Python wheel..."
|
||||
bat "conda activate && (python -m pip uninstall -y xgboost || cd .)"
|
||||
bat """
|
||||
conda activate && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i"
|
||||
"""
|
||||
echo "Installing Python dependencies..."
|
||||
bat """
|
||||
conda activate && conda upgrade scikit-learn pandas numpy
|
||||
"""
|
||||
echo "Running Python tests..."
|
||||
bat "conda activate && python -m pytest -v -s --fulltrace tests\\python"
|
||||
bat "conda activate && python -m pip uninstall -y xgboost"
|
||||
@@ -131,6 +137,10 @@ def TestWin64GPU(args) {
|
||||
bat """
|
||||
conda activate && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i"
|
||||
"""
|
||||
echo "Installing Python dependencies..."
|
||||
bat """
|
||||
conda activate && conda upgrade scikit-learn pandas numpy
|
||||
"""
|
||||
echo "Running Python tests..."
|
||||
bat """
|
||||
conda activate && python -m pytest -v -s --fulltrace -m "(not slow) and (not mgpu)" tests\\python-gpu
|
||||
|
||||
140
Makefile
140
Makefile
@@ -1,11 +1,3 @@
|
||||
ifndef config
|
||||
ifneq ("$(wildcard ./config.mk)","")
|
||||
config = config.mk
|
||||
else
|
||||
config = make/config.mk
|
||||
endif
|
||||
endif
|
||||
|
||||
ifndef DMLC_CORE
|
||||
DMLC_CORE = dmlc-core
|
||||
endif
|
||||
@@ -30,16 +22,6 @@ ifndef MAKE_OK
|
||||
endif
|
||||
$(warning MAKE [$(MAKE)] - $(if $(MAKE_OK),checked OK,PROBLEM))
|
||||
|
||||
ifeq ($(OS), Windows_NT)
|
||||
UNAME="Windows"
|
||||
else
|
||||
UNAME=$(shell uname)
|
||||
endif
|
||||
|
||||
include $(config)
|
||||
ifeq ($(USE_OPENMP), 0)
|
||||
export NO_OPENMP = 1
|
||||
endif
|
||||
include $(DMLC_CORE)/make/dmlc.mk
|
||||
|
||||
# set compiler defaults for OSX versus *nix
|
||||
@@ -62,75 +44,21 @@ export CXX = g++
|
||||
endif
|
||||
endif
|
||||
|
||||
export LDFLAGS= -pthread -lm $(ADD_LDFLAGS) $(DMLC_LDFLAGS)
|
||||
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++11 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS)
|
||||
CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include -I$(GTEST_PATH)/include
|
||||
#java include path
|
||||
export JAVAINCFLAGS = -I${JAVA_HOME}/include -I./java
|
||||
|
||||
ifeq ($(TEST_COVER), 1)
|
||||
CFLAGS += -g -O0 -fprofile-arcs -ftest-coverage
|
||||
else
|
||||
CFLAGS += -O3 -funroll-loops
|
||||
ifeq ($(USE_SSE), 1)
|
||||
CFLAGS += -msse2
|
||||
endif
|
||||
endif
|
||||
|
||||
ifndef LINT_LANG
|
||||
LINT_LANG= "all"
|
||||
endif
|
||||
|
||||
ifeq ($(UNAME), Windows)
|
||||
XGBOOST_DYLIB = lib/xgboost.dll
|
||||
JAVAINCFLAGS += -I${JAVA_HOME}/include/win32
|
||||
else
|
||||
ifeq ($(UNAME), Darwin)
|
||||
XGBOOST_DYLIB = lib/libxgboost.dylib
|
||||
CFLAGS += -fPIC
|
||||
else
|
||||
XGBOOST_DYLIB = lib/libxgboost.so
|
||||
CFLAGS += -fPIC
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(UNAME), Linux)
|
||||
LDFLAGS += -lrt
|
||||
JAVAINCFLAGS += -I${JAVA_HOME}/include/linux
|
||||
endif
|
||||
|
||||
ifeq ($(UNAME), Darwin)
|
||||
JAVAINCFLAGS += -I${JAVA_HOME}/include/darwin
|
||||
endif
|
||||
|
||||
OPENMP_FLAGS =
|
||||
ifeq ($(USE_OPENMP), 1)
|
||||
OPENMP_FLAGS = -fopenmp
|
||||
else
|
||||
OPENMP_FLAGS = -DDISABLE_OPENMP
|
||||
endif
|
||||
CFLAGS += $(OPENMP_FLAGS)
|
||||
|
||||
# specify tensor path
|
||||
.PHONY: clean all lint clean_all doxygen rcpplint pypack Rpack Rbuild Rcheck java pylint
|
||||
|
||||
all: lib/libxgboost.a $(XGBOOST_DYLIB) xgboost
|
||||
|
||||
$(DMLC_CORE)/libdmlc.a: $(wildcard $(DMLC_CORE)/src/*.cc $(DMLC_CORE)/src/*/*.cc)
|
||||
+ cd $(DMLC_CORE); "$(MAKE)" libdmlc.a config=$(ROOTDIR)/$(config); cd $(ROOTDIR)
|
||||
|
||||
$(RABIT)/lib/$(LIB_RABIT): $(wildcard $(RABIT)/src/*.cc)
|
||||
+ cd $(RABIT); "$(MAKE)" lib/$(LIB_RABIT) USE_SSE=$(USE_SSE); cd $(ROOTDIR)
|
||||
|
||||
jvm: jvm-packages/lib/libxgboost4j.so
|
||||
|
||||
SRC = $(wildcard src/*.cc src/*/*.cc)
|
||||
ALL_OBJ = $(patsubst src/%.cc, build/%.o, $(SRC))
|
||||
AMALGA_OBJ = amalgamation/xgboost-all0.o
|
||||
LIB_DEP = $(DMLC_CORE)/libdmlc.a $(RABIT)/lib/$(LIB_RABIT)
|
||||
ALL_DEP = $(filter-out build/cli_main.o, $(ALL_OBJ)) $(LIB_DEP)
|
||||
CLI_OBJ = build/cli_main.o
|
||||
include tests/cpp/xgboost_test.mk
|
||||
.PHONY: clean all lint clean_all doxygen rcpplint pypack Rpack Rbuild Rcheck
|
||||
|
||||
build/%.o: src/%.cc
|
||||
@mkdir -p $(@D)
|
||||
@@ -141,27 +69,6 @@ build/%.o: src/%.cc
|
||||
amalgamation/xgboost-all0.o: amalgamation/xgboost-all0.cc
|
||||
$(CXX) -c $(CFLAGS) $< -o $@
|
||||
|
||||
# Equivalent to lib/libxgboost_all.so
|
||||
lib/libxgboost_all.so: $(AMALGA_OBJ) $(LIB_DEP)
|
||||
@mkdir -p $(@D)
|
||||
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.o %.a, $^) $(LDFLAGS)
|
||||
|
||||
lib/libxgboost.a: $(ALL_DEP)
|
||||
@mkdir -p $(@D)
|
||||
ar crv $@ $(filter %.o, $?)
|
||||
|
||||
lib/xgboost.dll lib/libxgboost.so lib/libxgboost.dylib: $(ALL_DEP)
|
||||
@mkdir -p $(@D)
|
||||
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.o %a, $^) $(LDFLAGS)
|
||||
|
||||
jvm-packages/lib/libxgboost4j.so: jvm-packages/xgboost4j/src/native/xgboost4j.cpp $(ALL_DEP)
|
||||
@mkdir -p $(@D)
|
||||
$(CXX) $(CFLAGS) $(JAVAINCFLAGS) -shared -o $@ $(filter %.cpp %.o %.a, $^) $(LDFLAGS)
|
||||
|
||||
|
||||
xgboost: $(CLI_OBJ) $(ALL_DEP)
|
||||
$(CXX) $(CFLAGS) -o $@ $(filter %.o %.a, $^) $(LDFLAGS)
|
||||
|
||||
rcpplint:
|
||||
python3 dmlc-core/scripts/lint.py xgboost ${LINT_LANG} R-package/src
|
||||
|
||||
@@ -172,16 +79,6 @@ lint: rcpplint
|
||||
python-package/xgboost/src --pylint-rc ${PWD}/python-package/.pylintrc xgboost \
|
||||
${LINT_LANG} include src python-package
|
||||
|
||||
pylint:
|
||||
flake8 --ignore E501 python-package
|
||||
flake8 --ignore E501 tests/python
|
||||
|
||||
test: $(ALL_TEST)
|
||||
$(ALL_TEST)
|
||||
|
||||
check: test
|
||||
./tests/cpp/xgboost_test
|
||||
|
||||
ifeq ($(TEST_COVER), 1)
|
||||
cover: check
|
||||
@- $(foreach COV_OBJ, $(COVER_OBJ), \
|
||||
@@ -202,38 +99,9 @@ clean_all: clean
|
||||
cd $(DMLC_CORE); "$(MAKE)" clean; cd $(ROOTDIR)
|
||||
cd $(RABIT); "$(MAKE)" clean; cd $(ROOTDIR)
|
||||
|
||||
doxygen:
|
||||
doxygen doc/Doxyfile
|
||||
|
||||
# create standalone python tar file.
|
||||
pypack: ${XGBOOST_DYLIB}
|
||||
cp ${XGBOOST_DYLIB} python-package/xgboost
|
||||
cd python-package; tar cf xgboost.tar xgboost; cd ..
|
||||
|
||||
# create pip source dist (sdist) pack for PyPI
|
||||
pippack: clean_all
|
||||
rm -rf xgboost-python
|
||||
# remove symlinked directories in python-package/xgboost
|
||||
rm -rf python-package/xgboost/lib
|
||||
rm -rf python-package/xgboost/dmlc-core
|
||||
rm -rf python-package/xgboost/include
|
||||
rm -rf python-package/xgboost/make
|
||||
rm -rf python-package/xgboost/rabit
|
||||
rm -rf python-package/xgboost/src
|
||||
cp -r python-package xgboost-python
|
||||
cp -r CMakeLists.txt xgboost-python/xgboost/
|
||||
cp -r cmake xgboost-python/xgboost/
|
||||
cp -r plugin xgboost-python/xgboost/
|
||||
cp -r make xgboost-python/xgboost/
|
||||
cp -r src xgboost-python/xgboost/
|
||||
cp -r tests xgboost-python/xgboost/
|
||||
cp -r include xgboost-python/xgboost/
|
||||
cp -r dmlc-core xgboost-python/xgboost/
|
||||
cp -r rabit xgboost-python/xgboost/
|
||||
# Use setup_pip.py instead of setup.py
|
||||
mv xgboost-python/setup_pip.py xgboost-python/setup.py
|
||||
# Build sdist tarball
|
||||
cd xgboost-python; python setup.py sdist; mv dist/*.tar.gz ..; cd ..
|
||||
cd python-package; python setup.py sdist; mv dist/*.tar.gz ..; cd ..
|
||||
|
||||
# Script to make a clean installable R package.
|
||||
Rpack: clean_all
|
||||
@@ -254,9 +122,9 @@ Rpack: clean_all
|
||||
cp -r dmlc-core/include xgboost/src/dmlc-core/include
|
||||
cp -r dmlc-core/src xgboost/src/dmlc-core/src
|
||||
cp ./LICENSE xgboost
|
||||
# Modify PKGROOT in Makevars.in
|
||||
# Modify PKGROOT in Makevars.in
|
||||
cat R-package/src/Makevars.in|sed '2s/.*/PKGROOT=./' > xgboost/src/Makevars.in
|
||||
# Configure Makevars.win (Windows-specific Makevars, likely using MinGW)
|
||||
# Configure Makevars.win (Windows-specific Makevars, likely using MinGW)
|
||||
cp xgboost/src/Makevars.in xgboost/src/Makevars.win
|
||||
cat xgboost/src/Makevars.in| sed '3s/.*/ENABLE_STD_THREAD=0/' > xgboost/src/Makevars.win
|
||||
sed -i -e 's/@OPENMP_CXXFLAGS@/$$\(SHLIB_OPENMP_CXXFLAGS\)/g' xgboost/src/Makevars.win
|
||||
|
||||
308
NEWS.md
308
NEWS.md
@@ -3,6 +3,314 @@ XGBoost Change Log
|
||||
|
||||
This file records the changes in xgboost library in reverse chronological order.
|
||||
|
||||
## v1.0.0 (2020.02.19)
|
||||
This release marks a major milestone for the XGBoost project.
|
||||
|
||||
### Apache-style governance, contribution policy, and semantic versioning (#4646, #4659)
|
||||
* Starting with 1.0.0 release, the XGBoost Project is adopting Apache-style governance. The full community guideline is [available in the doc website](https://xgboost.readthedocs.io/en/release_1.0.0/contrib/community.html). Note that we now have Project Management Committee (PMC) who would steward the project on the long-term basis. The PMC is also entrusted to run and fund the project's continuous integration (CI) infrastructure (https://xgboost-ci.net).
|
||||
* We also adopt the [semantic versioning](https://semver.org/). See [our release versioning policy](https://xgboost.readthedocs.io/en/release_1.0.0/contrib/release.html).
|
||||
|
||||
### Better performance scaling for multi-core CPUs (#4502, #4529, #4716, #4851, #5008, #5107, #5138, #5156)
|
||||
* Poor performance scaling of the `hist` algorithm for multi-core CPUs has been under investigation (#3810). Previous effort #4529 was replaced with a series of pull requests (#5107, #5138, #5156) aimed at achieving the same performance benefits while keeping the C++ codebase legible. The latest performance benchmark results show [up to 5x speedup on Intel CPUs with many cores](https://github.com/dmlc/xgboost/pull/5156#issuecomment-580024413). Note: #5244, which concludes the effort, will become part of the upcoming release 1.1.0.
|
||||
|
||||
### Improved installation experience on Mac OSX (#4672, #5074, #5080, #5146, #5240)
|
||||
* It used to be quite complicated to install XGBoost on Mac OSX. XGBoost uses OpenMP to distribute work among multiple CPU cores, and Mac's default C++ compiler (Apple Clang) does not come with OpenMP. Existing work-around (using another C++ compiler) was complex and prone to fail with cryptic diagnosis (#4933, #4949, #4969).
|
||||
* Now it only takes two commands to install XGBoost: `brew install libomp` followed by `pip install xgboost`. The installed XGBoost will use all CPU cores.
|
||||
* Even better, XGBoost is now available from Homebrew: `brew install xgboost`. See Homebrew/homebrew-core#50467.
|
||||
* Previously, if you installed the XGBoost R package using the command `install.packages('xgboost')`, it could only use a single CPU core and you would experience slow training performance. With 1.0.0 release, the R package will use all CPU cores out of box.
|
||||
|
||||
### Distributed XGBoost now available on Kubernetes (#4621, #4939)
|
||||
* Check out the [tutorial for setting up distributed XGBoost on a Kubernetes cluster](https://xgboost.readthedocs.io/en/release_1.0.0/tutorials/kubernetes.html).
|
||||
|
||||
### Ruby binding for XGBoost (#4856)
|
||||
|
||||
### New Native Dask interface for multi-GPU and multi-node scaling (#4473, #4507, #4617, #4819, #4907, #4914, #4941, #4942, #4951, #4973, #5048, #5077, #5144, #5270)
|
||||
* XGBoost now integrates seamlessly with [Dask](https://dask.org/), a lightweight distributed framework for data processing. Together with the first-class support for cuDF data frames (see below), it is now easier than ever to create end-to-end data pipeline running on one or more NVIDIA GPUs.
|
||||
* Multi-GPU training with Dask is now up to 20% faster than the previous release (#4914, #4951).
|
||||
|
||||
### First-class support for cuDF data frames and cuPy arrays (#4737, #4745, #4794, #4850, #4891, #4902, #4918, #4927, #4928, #5053, #5189, #5194, #5206, #5219, #5225)
|
||||
* [cuDF](https://github.com/rapidsai/cudf) is a data frame library for loading and processing tabular data on NVIDIA GPUs. It provides a Pandas-like API.
|
||||
* [cuPy](https://github.com/cupy/cupy) implements a NumPy-compatible multi-dimensional array on NVIDIA GPUs.
|
||||
* Now users can keep the data on the GPU memory throughout the end-to-end data pipeline, obviating the need for copying data between the main memory and GPU memory.
|
||||
* XGBoost can accept any data structure that exposes `__array_interface__` signature, opening way to support other columar formats that are compatible with Apache Arrow.
|
||||
|
||||
### [Feature interaction constraint](https://xgboost.readthedocs.io/en/release_1.0.0/tutorials/feature_interaction_constraint.html) is now available with `approx` and `gpu_hist` algorithms (#4534, #4587, #4596, #5034).
|
||||
|
||||
### Learning to rank is now GPU accelerated (#4873, #5004, #5129)
|
||||
* Supported ranking objectives: NDGC, Map, Pairwise.
|
||||
* [Up to 2x improved training performance on GPUs](https://devblogs.nvidia.com/learning-to-rank-with-xgboost-and-gpu/).
|
||||
|
||||
### Enable `gamma` parameter for GPU training (#4874, #4953)
|
||||
* The `gamma` parameter specifies the minimum loss reduction required to add a new split in a tree. A larger value for `gamma` has the effect of pre-pruning the tree, by making harder to add splits.
|
||||
|
||||
### External memory for GPU training (#4486, #4526, #4747, #4833, #4879, #5014)
|
||||
* It is now possible to use NVIDIA GPUs even when the size of training data exceeds the available GPU memory. Note that the external memory support for GPU is still experimental. #5093 will further improve performance and will become part of the upcoming release 1.1.0.
|
||||
* RFC for enabling external memory with GPU algorithms: #4357
|
||||
|
||||
### Improve Scikit-Learn interface (#4558, #4842, #4929, #5049, #5151, #5130, #5227)
|
||||
* Many users of XGBoost enjoy the convenience and breadth of Scikit-Learn ecosystem. In this release, we revise the Scikit-Learn API of XGBoost (`XGBRegressor`, `XGBClassifier`, and `XGBRanker`) to achieve feature parity with the traditional XGBoost interface (`xgboost.train()`).
|
||||
* Insert check to validate data shapes.
|
||||
* Produce an error message if `eval_set` is not a tuple. An error message is better than silently crashing.
|
||||
* Allow using `numpy.RandomState` object.
|
||||
* Add `n_jobs` as an alias of `nthread`.
|
||||
* Roadmap: #5152
|
||||
|
||||
### XGBoost4J-Spark: Redesigning checkpointing mechanism
|
||||
* RFC is available at #4786
|
||||
* Clean up checkpoint file after a successful training job (#4754): The current implementation in XGBoost4J-Spark does not clean up the checkpoint file after a successful training job. If the user runs another job with the same checkpointing directory, she will get a wrong model because the second job will re-use the checkpoint file left over from the first job. To prevent this scenario, we propose to always clean up the checkpoint file after every successful training job.
|
||||
* Avoid Multiple Jobs for Checkpointing (#5082): The current method for checkpoint is to collect the booster produced at the last iteration of each checkpoint internal to Driver and persist it in HDFS. The major issue with this approach is that it needs to re-perform the data preparation for training if the user did not choose to cache the training dataset. To avoid re-performing data prep, we build external-memory checkpointing in the XGBoost4J layer as well.
|
||||
* Enable deterministic repartitioning when checkpoint is enabled (#4807): Distributed algorithm for gradient boosting assumes a fixed partition of the training data between multiple iterations. In previous versions, there was no guarantee that data partition would stay the same, especially when a worker goes down and some data had to recovered from previous checkpoint. In this release, we make data partition deterministic by using the data hash value of each data row in computing the partition.
|
||||
|
||||
### XGBoost4J-Spark: handle errors thrown by the native code (#4560)
|
||||
* All core logic of XGBoost is written in C++, so XGBoost4J-Spark internally uses the C++ code via Java Native Interface (JNI). #4560 adds a proper error handling for any errors or exceptions arising from the C++ code, so that the XGBoost Spark application can be torn down in an orderly fashion.
|
||||
|
||||
### XGBoost4J-Spark: Refine method to count the number of alive cores (#4858)
|
||||
* The `SparkParallelismTracker` class ensures that sufficient number of executor cores are alive. To that end, it is important to query the number of alive cores reliably.
|
||||
|
||||
### XGBoost4J: Add `BigDenseMatrix` to store more than `Integer.MAX_VALUE` elements (#4383)
|
||||
|
||||
### Robust model serialization with JSON (#4632, #4708, #4739, #4868, #4936, #4945, #4974, #5086, #5087, #5089, #5091, #5094, #5110, #5111, #5112, #5120, #5137, #5218, #5222, #5236, #5245, #5248, #5281)
|
||||
* In this release, we introduce an experimental support of using [JSON](https://www.json.org/json-en.html) for serializing (saving/loading) XGBoost models and related hyperparameters for training. We would like to eventually replace the old binary format with JSON, since it is an open format and parsers are available in many programming languages and platforms. See [the documentation for model I/O using JSON](https://xgboost.readthedocs.io/en/release_1.0.0/tutorials/saving_model.html). #3980 explains why JSON was chosen over other alternatives.
|
||||
* To maximize interoperability and compatibility of the serialized models, we now split serialization into two parts (#4855):
|
||||
1. Model, e.g. decision trees and strictly related metadata like `num_features`.
|
||||
2. Internal configuration, consisting of training parameters and other configurable parameters. For example, `max_delta_step`, `tree_method`, `objective`, `predictor`, `gpu_id`.
|
||||
|
||||
Previously, users often ran into issues where the model file produced by one machine could not load or run on another machine. For example, models trained using a machine with an NVIDIA GPU could not run on another machine without a GPU (#5291, #5234). The reason is that the old binary format saved some internal configuration that were not universally applicable to all machines, e.g. `predictor='gpu_predictor'`.
|
||||
|
||||
Now, model saving function (`Booster.save_model()` in Python) will save only the model, without internal configuration. This will guarantee that your model file would be used anywhere. Internal configuration will be serialized in limited circumstances such as:
|
||||
* Multiple nodes in a distributed system exchange model details over the network.
|
||||
* Model checkpointing, to recover from possible crashes.
|
||||
|
||||
This work proved to be useful for parameter validation as well (see below).
|
||||
* Starting with 1.0.0 release, we will use semantic versioning to indicate whether the model produced by one version of XGBoost would be compatible with another version of XGBoost. Any change in the major version indicates a breaking change in the serialization format.
|
||||
* We now provide a robust method to save and load scikit-learn related attributes (#5245). Previously, we used Python pickle to save Python attributes related to `XGBClassifier`, `XGBRegressor`, and `XGBRanker` objects. The attributes are necessary to properly interact with scikit-learn. See #4639 for more details. The use of pickling hampered interoperability, as a pickle from one machine may not necessarily work on another machine. Starting with this release, we use an alternative method to serialize the scikit-learn related attributes. The use of Python pickle is now discouraged (#5236, #5281).
|
||||
|
||||
### Parameter validation: detection of unused or incorrect parameters (#4553, #4577, #4738, #4801, #4961, #5101, #5157, #5167, #5256)
|
||||
* Mis-spelled training parameter is a common user mistake. In previous versions of XGBoost, mis-spelled parameters were silently ignored. Starting with 1.0.0 release, XGBoost will produce a warning message if there is any unused training parameters. Currently, parameter validation is available to R users and Python XGBoost API users. We are working to extend its support to scikit-learn users.
|
||||
* Configuration steps now have well-defined semantics (#4542, #4738), so we know exactly where and how the internal configurable parameters are changed.
|
||||
* The user can now use `save_config()` function to inspect all (used) training parameters. This is helpful for debugging model performance.
|
||||
|
||||
### Allow individual workers to recover from faults (#4808, #4966)
|
||||
* Status quo: if a worker fails, all workers are shut down and restarted, and learning resumes from the last checkpoint. This involves requesting resources from the scheduler (e.g. Spark) and shuffling all the data again from scratch. Both of these operations can be quite costly and block training for extended periods of time, especially if the training data is big and the number of worker nodes is in the hundreds.
|
||||
* The proposed solution is to recover the single node that failed, instead of shutting down all workers. The rest of the clusters wait until the single failed worker is bootstrapped and catches up with the rest.
|
||||
* See roadmap at #4753. Note that this is work in progress. In particular, the feature is not yet available from XGBoost4J-Spark.
|
||||
|
||||
### Accurate prediction for DART models
|
||||
* Use DART tree weights when computing SHAPs (#5050)
|
||||
* Don't drop trees during DART prediction by default (#5115)
|
||||
* Fix DART prediction in R (#5204)
|
||||
|
||||
### Make external memory more robust
|
||||
* Fix issues with training with external memory on cpu (#4487)
|
||||
* Fix crash with approx tree method on cpu (#4510)
|
||||
* Fix external memory race in `exact` (#4980). Note: `dmlc::ThreadedIter` is not actually thread-safe. We would like to re-design it in the long term.
|
||||
|
||||
### Major refactoring of the `DMatrix` class (#4686, #4744, #4748, #5044, #5092, #5108, #5188, #5198)
|
||||
* Goal 1: improve performance and reduce memory consumption. Right now, if the user trains a model with a NumPy array as training data, the array gets copies 2-3 times before training begins. We'd like to reduce duplication of the data matrix.
|
||||
* Goal 2: Expose a common interface to external data, unify the way DMatrix objects are constructed and simplify the process of adding new external data sources. This work is essential for ingesting cuPy arrays.
|
||||
* Goal 3: Handle missing values consistently.
|
||||
* RFC: #4354, Roadmap: #5143
|
||||
* This work is also relevant to external memory support on GPUs.
|
||||
|
||||
### Breaking: XGBoost Python package now requires Python 3.5 or newer (#5021, #5274)
|
||||
* Python 3.4 has reached its end-of-life on March 16, 2019, so we now require Python 3.5 or newer.
|
||||
|
||||
### Breaking: GPU algorithm now requires CUDA 9.0 and higher (#4527, #4580)
|
||||
|
||||
### Breaking: `n_gpus` parameter removed; multi-GPU training now requires a distributed framework (#4579, #4749, #4773, #4810, #4867, #4908)
|
||||
* #4531 proposed removing support for single-process multi-GPU training. Contributors would focus on multi-GPU support through distributed frameworks such as Dask and Spark, where the framework would be expected to assign a worker process for each GPU independently. By delegating GPU management and data movement to the distributed framework, we can greatly simplify the core XGBoost codebase, make multi-GPU training more robust, and reduce burden for future development.
|
||||
|
||||
### Breaking: Some deprecated features have been removed
|
||||
* ``gpu_exact`` training method (#4527, #4742, #4777). Use ``gpu_hist`` instead.
|
||||
* ``learning_rates`` parameter in Python (#5155). Use the callback API instead.
|
||||
* ``num_roots`` (#5059, #5165), since the current training code always uses a single root node.
|
||||
* GPU-specific objectives (#4690), such as `gpu:reg:linear`. Use objectives without `gpu:` prefix; GPU will be used automatically if your machine has one.
|
||||
|
||||
### Breaking: the C API function `XGBoosterPredict()` now asks for an extra parameter `training`.
|
||||
|
||||
### Breaking: We now use CMake exclusively to build XGBoost. `Makefile` is being sunset.
|
||||
* Exception: the R package uses Autotools, as the CRAN ecosystem did not yet adopt CMake widely.
|
||||
|
||||
### Performance improvements
|
||||
* Smarter choice of histogram construction for distributed `gpu_hist` (#4519)
|
||||
* Optimizations for quantization on device (#4572)
|
||||
* Introduce caching memory allocator to avoid latency associated with GPU memory allocation (#4554, #4615)
|
||||
* Optimize the initialization stage of the CPU `hist` algorithm for sparse datasets (#4625)
|
||||
* Prevent unnecessary data copies from GPU memory to the host (#4795)
|
||||
* Improve operation efficiency for single prediction (#5016)
|
||||
* Group builder modified for incremental building, to speed up building large `DMatrix` (#5098)
|
||||
|
||||
### Bug-fixes
|
||||
* Eliminate `FutureWarning: Series.base is deprecated` (#4337)
|
||||
* Ensure pandas DataFrame column names are treated as strings in type error message (#4481)
|
||||
* [jvm-packages] Add back `reg:linear` for scala, as it is only deprecated and not meant to be removed yet (#4490)
|
||||
* Fix library loading for Cygwin users (#4499)
|
||||
* Fix prediction from loaded pickle (#4516)
|
||||
* Enforce exclusion between `pred_interactions=True` and `pred_interactions=True` (#4522)
|
||||
* Do not return dangling reference to local `std::string` (#4543)
|
||||
* Set the appropriate device before freeing device memory (#4566)
|
||||
* Mark `SparsePageDmatrix` destructor default. (#4568)
|
||||
* Choose the appropriate tree method only when the tree method is 'auto' (#4571)
|
||||
* Fix `benchmark_tree.py` (#4593)
|
||||
* [jvm-packages] Fix silly bug in feature scoring (#4604)
|
||||
* Fix GPU predictor when the test data matrix has different number of features than the training data matrix used to train the model (#4613)
|
||||
* Fix external memory for get column batches. (#4622)
|
||||
* [R] Use built-in label when xgb.DMatrix is given to xgb.cv() (#4631)
|
||||
* Fix early stopping in the Python package (#4638)
|
||||
* Fix AUC error in distributed mode caused by imbalanced dataset (#4645, #4798)
|
||||
* [jvm-packages] Expose `setMissing` method in `XGBoostClassificationModel` / `XGBoostRegressionModel` (#4643)
|
||||
* Remove initializing stringstream reference. (#4788)
|
||||
* [R] `xgb.get.handle` now checks all class listed of `object` (#4800)
|
||||
* Do not use `gpu_predictor` unless data comes from GPU (#4836)
|
||||
* Fix data loading (#4862)
|
||||
* Workaround `isnan` across different environments. (#4883)
|
||||
* [jvm-packages] Handle Long-type parameter (#4885)
|
||||
* Don't `set_params` at the end of `set_state` (#4947). Ensure that the model does not change after pickling and unpickling multiple times.
|
||||
* C++ exceptions should not crash OpenMP loops (#4960)
|
||||
* Fix `usegpu` flag in DART. (#4984)
|
||||
* Run training with empty `DMatrix` (#4990, #5159)
|
||||
* Ensure that no two processes can use the same GPU (#4990)
|
||||
* Fix repeated split and 0 cover nodes (#5010)
|
||||
* Reset histogram hit counter between multiple data batches (#5035)
|
||||
* Fix `feature_name` crated from int64index dataframe. (#5081)
|
||||
* Don't use 0 for "fresh leaf" (#5084)
|
||||
* Throw error when user attempts to use multi-GPU training and XGBoost has not been compiled with NCCL (#5170)
|
||||
* Fix metric name loading (#5122)
|
||||
* Quick fix for memory leak in CPU `hist` algorithm (#5153)
|
||||
* Fix wrapping GPU ID and prevent data copying (#5160)
|
||||
* Fix signature of Span constructor (#5166)
|
||||
* Lazy initialization of device vector, so that XGBoost compiled with CUDA can run on a machine without any GPU (#5173)
|
||||
* Model loading should not change system locale (#5314)
|
||||
* Distributed training jobs would sometimes hang; revert Rabit to fix this regression (dmlc/rabit#132, #5237)
|
||||
|
||||
### API changes
|
||||
* Add support for cross-validation using query ID (#4474)
|
||||
* Enable feature importance property for DART model (#4525)
|
||||
* Add `rmsle` metric and `reg:squaredlogerror` objective (#4541)
|
||||
* All objective and evaluation metrics are now exposed to JVM packages (#4560)
|
||||
* `dump_model()` and `get_dump()` now support exporting in GraphViz language (#4602)
|
||||
* Support metrics `ndcg-` and `map-` (#4635)
|
||||
* [jvm-packages] Allow chaining prediction (transform) in XGBoost4J-Spark (#4667)
|
||||
* [jvm-packages] Add option to bypass missing value check in the Spark layer (#4805). Only use this option if you know what you are doing.
|
||||
* [jvm-packages] Add public group getter (#4838)
|
||||
* `XGDMatrixSetGroup` C API is now deprecated (#4864). Use `XGDMatrixSetUIntInfo` instead.
|
||||
* [R] Added new `train_folds` parameter to `xgb.cv()` (#5114)
|
||||
* Ingest meta information from Pandas DataFrame, such as data weights (#5216)
|
||||
|
||||
### Maintenance: Refactor code for legibility and maintainability
|
||||
* De-duplicate GPU parameters (#4454)
|
||||
* Simplify INI-style config reader using C++11 STL (#4478, #4521)
|
||||
* Refactor histogram building code for `gpu_hist` (#4528)
|
||||
* Overload device memory allocator, to enable instrumentation for compiling memory usage statistics (#4532)
|
||||
* Refactor out row partitioning logic from `gpu_hist` (#4554)
|
||||
* Remove an unused variable (#4588)
|
||||
* Implement tree model dump with code generator, to de-duplicate code for generating dumps in 3 different formats (#4602)
|
||||
* Remove `RowSet` class which is no longer being used (#4697)
|
||||
* Remove some unused functions as reported by cppcheck (#4743)
|
||||
* Mimic CUDA assert output in Span check (#4762)
|
||||
* [jvm-packages] Refactor `XGBoost.scala` to put all params processing in one place (#4815)
|
||||
* Add some comments for GPU row partitioner (#4832)
|
||||
* Span: use `size_t' for index_type, add `front' and `back'. (#4935)
|
||||
* Remove dead code in `exact` algorithm (#5034, #5105)
|
||||
* Unify integer types used for row and column indices (#5034)
|
||||
* Extract feature interaction constraint from `SplitEvaluator` class. (#5034)
|
||||
* [Breaking] De-duplicate paramters and docstrings in the constructors of Scikit-Learn models (#5130)
|
||||
* Remove benchmark code from GPU tests (#5141)
|
||||
* Clean up Python 2 compatibility code. (#5161)
|
||||
* Extensible binary serialization format for `DMatrix::MetaInfo` (#5187). This will be useful for implementing censored labels for survival analysis applications.
|
||||
* Cleanup clang-tidy warnings. (#5247)
|
||||
|
||||
### Maintenance: testing, continuous integration, build system
|
||||
* Use `yaml.safe_load` instead of `yaml.load`. (#4537)
|
||||
* Ensure GCC is at least 5.x (#4538)
|
||||
* Remove all mention of `reg:linear` from tests (#4544)
|
||||
* [jvm-packages] Upgrade to Scala 2.12 (#4574)
|
||||
* [jvm-packages] Update kryo dependency to 2.22 (#4575)
|
||||
* [CI] Specify account ID when logging into ECR Docker registry (#4584)
|
||||
* Use Sphinx 2.1+ to compile documentation (#4609)
|
||||
* Make Pandas optional for running Python unit tests (#4620)
|
||||
* Fix spark tests on machines with many cores (#4634)
|
||||
* [jvm-packages] Update local dev build process (#4640)
|
||||
* Add optional dependencies to setup.py (#4655)
|
||||
* [jvm-packages] Fix maven warnings (#4664)
|
||||
* Remove extraneous files from the R package, to comply with CRAN policy (#4699)
|
||||
* Remove VC-2013 support, since it is not C++11 compliant (#4701)
|
||||
* [CI] Fix broken installation of Pandas (#4704, #4722)
|
||||
* [jvm-packages] Clean up temporary files afer running tests (#4706)
|
||||
* Specify version macro in CMake. (#4730)
|
||||
* Include dmlc-tracker into XGBoost Python package (#4731)
|
||||
* [CI] Use long key ID for Ubuntu repository fingerprints. (#4783)
|
||||
* Remove plugin, cuda related code in automake & autoconf files (#4789)
|
||||
* Skip related tests when scikit-learn is not installed. (#4791)
|
||||
* Ignore vscode and clion files (#4866)
|
||||
* Use bundled Google Test by default (#4900)
|
||||
* [CI] Raise timeout threshold in Jenkins (#4938)
|
||||
* Copy CMake parameter from dmlc-core. (#4948)
|
||||
* Set correct file permission. (#4964)
|
||||
* [CI] Update lint configuration to support latest pylint convention (#4971)
|
||||
* [CI] Upload nightly builds to S3 (#4976, #4979)
|
||||
* Add asan.so.5 to cmake script. (#4999)
|
||||
* [CI] Fix Travis tests. (#5062)
|
||||
* [CI] Locate vcomp140.dll from System32 directory (#5078)
|
||||
* Implement training observer to dump internal states of objects (#5088). This will be useful for debugging.
|
||||
* Fix visual studio output library directories (#5119)
|
||||
* [jvm-packages] Comply with scala style convention + fix broken unit test (#5134)
|
||||
* [CI] Repair download URL for Maven 3.6.1 (#5139)
|
||||
* Don't use modernize-use-trailing-return-type in clang-tidy. (#5169)
|
||||
* Explicitly use UTF-8 codepage when using MSVC (#5197)
|
||||
* Add CMake option to run Undefined Behavior Sanitizer (UBSan) (#5211)
|
||||
* Make some GPU tests deterministic (#5229)
|
||||
* [R] Robust endian detection in CRAN xgboost build (#5232)
|
||||
* Support FreeBSD (#5233)
|
||||
* Make `pip install xgboost*.tar.gz` work by fixing build-python.sh (#5241)
|
||||
* Fix compilation error due to 64-bit integer narrowing to `size_t` (#5250)
|
||||
* Remove use of `std::cout` from R package, to comply with CRAN policy (#5261)
|
||||
* Update DMLC-Core submodule (#4674, #4688, #4726, #4924)
|
||||
* Update Rabit submodule (#4560, #4667, #4718, #4808, #4966, #5237)
|
||||
|
||||
### Usability Improvements, Documentation
|
||||
* Add Random Forest API to Python API doc (#4500)
|
||||
* Fix Python demo and doc. (#4545)
|
||||
* Remove doc about not supporting cuda 10.1 (#4578)
|
||||
* Address some sphinx warnings and errors, add doc for building doc. (#4589)
|
||||
* Add instruction to run formatting checks locally (#4591)
|
||||
* Fix docstring for `XGBModel.predict()` (#4592)
|
||||
* Doc and demo for customized metric and objective (#4598, #4608)
|
||||
* Add to documentation how to run tests locally (#4610)
|
||||
* Empty evaluation list in early stopping should produce meaningful error message (#4633)
|
||||
* Fixed year to 2019 in conf.py, helpers.h and LICENSE (#4661)
|
||||
* Minor updates to links and grammar (#4673)
|
||||
* Remove `silent` in doc (#4689)
|
||||
* Remove old Python trouble shooting doc (#4729)
|
||||
* Add `os.PathLike` support for file paths to DMatrix and Booster Python classes (#4757)
|
||||
* Update XGBoost4J-Spark doc (#4804)
|
||||
* Regular formatting for evaluation metrics (#4803)
|
||||
* [jvm-packages] Refine documentation for handling missing values in XGBoost4J-Spark (#4805)
|
||||
* Monitor for distributed envorinment (#4829). This is useful for identifying performance bottleneck.
|
||||
* Add check for length of weights and produce a good error message (#4872)
|
||||
* Fix DMatrix doc (#4884)
|
||||
* Export C++ headers in CMake installation (#4897)
|
||||
* Update license year in README.md to 2019 (#4940)
|
||||
* Fix incorrectly displayed Note in the doc (#4943)
|
||||
* Follow PEP 257 Docstring Conventions (#4959)
|
||||
* Document minimum version required for Google Test (#5001)
|
||||
* Add better error message for invalid feature names (#5024)
|
||||
* Some guidelines on device memory usage (#5038)
|
||||
* [doc] Some notes for external memory. (#5065)
|
||||
* Update document for `tree_method` (#5106)
|
||||
* Update demo for ranking. (#5154)
|
||||
* Add new lines for Spark XGBoost missing values section (#5180)
|
||||
* Fix simple typo: utilty -> utility (#5182)
|
||||
* Update R doc by roxygen2 (#5201)
|
||||
* [R] Direct user to use `set.seed()` instead of setting `seed` parameter (#5125)
|
||||
* Add Optuna badge to `README.md` (#5208)
|
||||
* Fix compilation error in `c-api-demo.c` (#5215)
|
||||
|
||||
### Acknowledgement
|
||||
**Contributors**: Nan Zhu (@CodingCat), Crissman Loomis (@Crissman), Cyprien Ricque (@Cyprien-Ricque), Evan Kepner (@EvanKepner), K.O. (@Hi-king), KaiJin Ji (@KerryJi), Peter Badida (@KeyWeeUsr), Kodi Arfer (@Kodiologist), Rory Mitchell (@RAMitchell), Egor Smirnov (@SmirnovEgorRu), Jacob Kim (@TheJacobKim), Vibhu Jawa (@VibhuJawa), Marcos (@astrowonk), Andy Adinets (@canonizer), Chen Qin (@chenqin), Christopher Cowden (@cowden), @cpfarrell, @david-cortes, Liangcai Li (@firestarman), @fuhaoda, Philip Hyunsu Cho (@hcho3), @here-nagini, Tong He (@hetong007), Michal Kurka (@michalkurka), Honza Sterba (@honzasterba), @iblumin, @koertkuipers, mattn (@mattn), Mingjie Tang (@merlintang), OrdoAbChao (@mglowacki100), Matthew Jones (@mt-jones), mitama (@nigimitama), Nathan Moore (@nmoorenz), Daniel Stahl (@phillyfan1138), Michaël Benesty (@pommedeterresautee), Rong Ou (@rongou), Sebastian (@sfahnens), Xu Xiao (@sperlingxx), @sriramch, Sean Owen (@srowen), Stephanie Yang (@stpyang), Yuan Tang (@terrytangyuan), Mathew Wicks (@thesuperzapper), Tim Gates (@timgates42), TinkleG (@tinkle1129), Oleksandr Pryimak (@trams), Jiaming Yuan (@trivialfis), Matvey Turkov (@turk0v), Bobby Wang (@wbo4958), yage (@yage99), @yellowdolphin
|
||||
|
||||
**Reviewers**: Nan Zhu (@CodingCat), Crissman Loomis (@Crissman), Cyprien Ricque (@Cyprien-Ricque), Evan Kepner (@EvanKepner), John Zedlewski (@JohnZed), KOLANICH (@KOLANICH), KaiJin Ji (@KerryJi), Kodi Arfer (@Kodiologist), Rory Mitchell (@RAMitchell), Egor Smirnov (@SmirnovEgorRu), Nikita Titov (@StrikerRUS), Jacob Kim (@TheJacobKim), Vibhu Jawa (@VibhuJawa), Andrew Kane (@ankane), Arno Candel (@arnocandel), Marcos (@astrowonk), Bryan Woods (@bryan-woods), Andy Adinets (@canonizer), Chen Qin (@chenqin), Thomas Franke (@coding-komek), Peter (@codingforfun), @cpfarrell, Joshua Patterson (@datametrician), @fuhaoda, Philip Hyunsu Cho (@hcho3), Tong He (@hetong007), Honza Sterba (@honzasterba), @iblumin, @jakirkham, Vadim Khotilovich (@khotilov), Keith Kraus (@kkraus14), @koertkuipers, @melonki, Mingjie Tang (@merlintang), OrdoAbChao (@mglowacki100), Daniel Mahler (@mhlr), Matthew Rocklin (@mrocklin), Matthew Jones (@mt-jones), Michaël Benesty (@pommedeterresautee), PSEUDOTENSOR / Jonathan McKinney (@pseudotensor), Rong Ou (@rongou), Vladimir (@sh1ng), Scott Lundberg (@slundberg), Xu Xiao (@sperlingxx), @sriramch, Pasha Stetsenko (@st-pasha), Stephanie Yang (@stpyang), Yuan Tang (@terrytangyuan), Mathew Wicks (@thesuperzapper), Theodore Vasiloudis (@thvasilo), TinkleG (@tinkle1129), Oleksandr Pryimak (@trams), Jiaming Yuan (@trivialfis), Bobby Wang (@wbo4958), yage (@yage99), @yellowdolphin, Yin Lou (@yinlou)
|
||||
|
||||
## v0.90 (2019.05.18)
|
||||
|
||||
### XGBoost Python package drops Python 2.x (#4379, #4381)
|
||||
|
||||
@@ -32,3 +32,7 @@ set_target_properties(
|
||||
set(XGBOOST_DEFINITIONS "${XGBOOST_DEFINITIONS};${R_DEFINITIONS}" PARENT_SCOPE)
|
||||
set(XGBOOST_OBJ_SOURCES $<TARGET_OBJECTS:xgboost-r> PARENT_SCOPE)
|
||||
set(LINKED_LIBRARIES_PRIVATE ${LINKED_LIBRARIES_PRIVATE} ${LIBR_CORE_LIBRARY} PARENT_SCOPE)
|
||||
|
||||
if (USE_OPENMP)
|
||||
target_link_libraries(xgboost-r PRIVATE OpenMP::OpenMP_CXX)
|
||||
endif ()
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
Package: xgboost
|
||||
Type: Package
|
||||
Title: Extreme Gradient Boosting
|
||||
Version: 1.0.0.1
|
||||
Date: 2019-07-23
|
||||
Version: 1.1.0.1
|
||||
Date: 2020-02-21
|
||||
Authors@R: c(
|
||||
person("Tianqi", "Chen", role = c("aut"),
|
||||
email = "tianqi.tchen@gmail.com"),
|
||||
@@ -63,5 +63,5 @@ Imports:
|
||||
data.table (>= 1.9.6),
|
||||
magrittr (>= 1.5),
|
||||
stringi (>= 0.5.2)
|
||||
RoxygenNote: 7.0.2
|
||||
RoxygenNote: 7.1.0
|
||||
SystemRequirements: GNU make, C++11
|
||||
|
||||
@@ -14,6 +14,7 @@ S3method(setinfo,xgb.DMatrix)
|
||||
S3method(slice,xgb.DMatrix)
|
||||
export("xgb.attr<-")
|
||||
export("xgb.attributes<-")
|
||||
export("xgb.config<-")
|
||||
export("xgb.parameters<-")
|
||||
export(cb.cv.predict)
|
||||
export(cb.early.stop)
|
||||
@@ -30,6 +31,7 @@ export(xgb.DMatrix)
|
||||
export(xgb.DMatrix.save)
|
||||
export(xgb.attr)
|
||||
export(xgb.attributes)
|
||||
export(xgb.config)
|
||||
export(xgb.create.features)
|
||||
export(xgb.cv)
|
||||
export(xgb.dump)
|
||||
@@ -38,6 +40,7 @@ export(xgb.ggplot.deepness)
|
||||
export(xgb.ggplot.importance)
|
||||
export(xgb.importance)
|
||||
export(xgb.load)
|
||||
export(xgb.load.raw)
|
||||
export(xgb.model.dt.tree)
|
||||
export(xgb.plot.deepness)
|
||||
export(xgb.plot.importance)
|
||||
@@ -46,7 +49,9 @@ export(xgb.plot.shap)
|
||||
export(xgb.plot.tree)
|
||||
export(xgb.save)
|
||||
export(xgb.save.raw)
|
||||
export(xgb.serialize)
|
||||
export(xgb.train)
|
||||
export(xgb.unserialize)
|
||||
export(xgboost)
|
||||
import(methods)
|
||||
importClassesFrom(Matrix,dgCMatrix)
|
||||
|
||||
@@ -28,7 +28,7 @@ NVL <- function(x, val) {
|
||||
# Merges booster params with whatever is provided in ...
|
||||
# plus runs some checks
|
||||
check.booster.params <- function(params, ...) {
|
||||
if (typeof(params) != "list")
|
||||
if (!identical(class(params), "list"))
|
||||
stop("params must be a list")
|
||||
|
||||
# in R interface, allow for '.' instead of '_' in parameter names
|
||||
@@ -78,7 +78,7 @@ check.booster.params <- function(params, ...) {
|
||||
if (!is.null(params[['interaction_constraints']]) &&
|
||||
typeof(params[['interaction_constraints']]) != "character"){
|
||||
# check input class
|
||||
if (class(params[['interaction_constraints']]) != 'list') stop('interaction_constraints should be class list')
|
||||
if (!identical(class(params[['interaction_constraints']]),'list')) stop('interaction_constraints should be class list')
|
||||
if (!all(unique(sapply(params[['interaction_constraints']], class)) %in% c('numeric','integer'))) {
|
||||
stop('interaction_constraints should be a list of numeric/integer vectors')
|
||||
}
|
||||
@@ -145,7 +145,7 @@ xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
|
||||
if (is.null(obj)) {
|
||||
.Call(XGBoosterUpdateOneIter_R, booster_handle, as.integer(iter), dtrain)
|
||||
} else {
|
||||
pred <- predict(booster_handle, dtrain, training = TRUE)
|
||||
pred <- predict(booster_handle, dtrain, outputmargin = TRUE, training = TRUE)
|
||||
gpair <- obj(pred, dtrain)
|
||||
.Call(XGBoosterBoostOneIter_R, booster_handle, dtrain, gpair$grad, gpair$hess)
|
||||
}
|
||||
|
||||
@@ -5,20 +5,34 @@ xgb.Booster.handle <- function(params = list(), cachelist = list(), modelfile =
|
||||
!all(vapply(cachelist, inherits, logical(1), what = 'xgb.DMatrix'))) {
|
||||
stop("cachelist must be a list of xgb.DMatrix objects")
|
||||
}
|
||||
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
## Load existing model, dispatch for on disk model file and in memory buffer
|
||||
if (!is.null(modelfile)) {
|
||||
if (typeof(modelfile) == "character") {
|
||||
## A filename
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
.Call(XGBoosterLoadModel_R, handle, modelfile[1])
|
||||
class(handle) <- "xgb.Booster.handle"
|
||||
if (length(params) > 0) {
|
||||
xgb.parameters(handle) <- params
|
||||
}
|
||||
return(handle)
|
||||
} else if (typeof(modelfile) == "raw") {
|
||||
.Call(XGBoosterLoadModelFromRaw_R, handle, modelfile)
|
||||
## A memory buffer
|
||||
bst <- xgb.unserialize(modelfile)
|
||||
xgb.parameters(bst) <- params
|
||||
return (bst)
|
||||
} else if (inherits(modelfile, "xgb.Booster")) {
|
||||
## A booster object
|
||||
bst <- xgb.Booster.complete(modelfile, saveraw = TRUE)
|
||||
.Call(XGBoosterLoadModelFromRaw_R, handle, bst$raw)
|
||||
bst <- xgb.unserialize(bst$raw)
|
||||
xgb.parameters(bst) <- params
|
||||
return (bst)
|
||||
} else {
|
||||
stop("modelfile must be either character filename, or raw booster dump, or xgb.Booster object")
|
||||
}
|
||||
}
|
||||
## Create new model
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
class(handle) <- "xgb.Booster.handle"
|
||||
if (length(params) > 0) {
|
||||
xgb.parameters(handle) <- params
|
||||
@@ -113,9 +127,29 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
if (is.null.handle(object$handle)) {
|
||||
object$handle <- xgb.Booster.handle(modelfile = object$raw)
|
||||
} else {
|
||||
if (is.null(object$raw) && saveraw)
|
||||
object$raw <- xgb.save.raw(object$handle)
|
||||
if (is.null(object$raw) && saveraw) {
|
||||
object$raw <- xgb.serialize(object$handle)
|
||||
}
|
||||
}
|
||||
|
||||
attrs <- xgb.attributes(object)
|
||||
if (!is.null(attrs$best_ntreelimit)) {
|
||||
object$best_ntreelimit <- as.integer(attrs$best_ntreelimit)
|
||||
}
|
||||
if (!is.null(attrs$best_iteration)) {
|
||||
## Convert from 0 based back to 1 based.
|
||||
object$best_iteration <- as.integer(attrs$best_iteration) + 1
|
||||
}
|
||||
if (!is.null(attrs$best_score)) {
|
||||
object$best_score <- as.numeric(attrs$best_score)
|
||||
}
|
||||
if (!is.null(attrs$best_msg)) {
|
||||
object$best_msg <- attrs$best_msg
|
||||
}
|
||||
if (!is.null(attrs$niter)) {
|
||||
object$niter <- as.integer(attrs$niter)
|
||||
}
|
||||
|
||||
return(object)
|
||||
}
|
||||
|
||||
@@ -399,7 +433,7 @@ predict.xgb.Booster.handle <- function(object, ...) {
|
||||
#' That would only matter if attributes need to be set many times.
|
||||
#' Note, however, that when feeding a handle of an \code{xgb.Booster} object to the attribute setters,
|
||||
#' the raw model cache of an \code{xgb.Booster} object would not be automatically updated,
|
||||
#' and it would be user's responsibility to call \code{xgb.save.raw} to update it.
|
||||
#' and it would be user's responsibility to call \code{xgb.serialize} to update it.
|
||||
#'
|
||||
#' The \code{xgb.attributes<-} setter either updates the existing or adds one or several attributes,
|
||||
#' but it doesn't delete the other existing attributes.
|
||||
@@ -458,7 +492,7 @@ xgb.attr <- function(object, name) {
|
||||
}
|
||||
.Call(XGBoosterSetAttr_R, handle, as.character(name[1]), value)
|
||||
if (is(object, 'xgb.Booster') && !is.null(object$raw)) {
|
||||
object$raw <- xgb.save.raw(object$handle)
|
||||
object$raw <- xgb.serialize(object$handle)
|
||||
}
|
||||
object
|
||||
}
|
||||
@@ -498,11 +532,41 @@ xgb.attributes <- function(object) {
|
||||
.Call(XGBoosterSetAttr_R, handle, names(a[i]), a[[i]])
|
||||
}
|
||||
if (is(object, 'xgb.Booster') && !is.null(object$raw)) {
|
||||
object$raw <- xgb.save.raw(object$handle)
|
||||
object$raw <- xgb.serialize(object$handle)
|
||||
}
|
||||
object
|
||||
}
|
||||
|
||||
#' Accessors for model parameters as JSON string.
|
||||
#'
|
||||
#' @param object Object of class \code{xgb.Booster}
|
||||
#' @param value A JSON string.
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#'
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
#' config <- xgb.config(bst)
|
||||
#'
|
||||
#' @rdname xgb.config
|
||||
#' @export
|
||||
xgb.config <- function(object) {
|
||||
handle <- xgb.get.handle(object)
|
||||
.Call(XGBoosterSaveJsonConfig_R, handle);
|
||||
}
|
||||
|
||||
#' @rdname xgb.config
|
||||
#' @export
|
||||
`xgb.config<-` <- function(object, value) {
|
||||
handle <- xgb.get.handle(object)
|
||||
.Call(XGBoosterLoadJsonConfig_R, handle, value)
|
||||
object$raw <- NULL # force renew the raw buffer
|
||||
object <- xgb.Booster.complete(object)
|
||||
object
|
||||
}
|
||||
|
||||
#' Accessors for model parameters.
|
||||
#'
|
||||
#' Only the setter for xgboost parameters is currently implemented.
|
||||
@@ -539,7 +603,7 @@ xgb.attributes <- function(object) {
|
||||
.Call(XGBoosterSetParam_R, handle, names(p[i]), p[[i]])
|
||||
}
|
||||
if (is(object, 'xgb.Booster') && !is.null(object$raw)) {
|
||||
object$raw <- xgb.save.raw(object$handle)
|
||||
object$raw <- xgb.serialize(object$handle)
|
||||
}
|
||||
object
|
||||
}
|
||||
|
||||
@@ -188,9 +188,10 @@ getinfo <- function(object, ...) UseMethod("getinfo")
|
||||
getinfo.xgb.DMatrix <- function(object, name, ...) {
|
||||
if (typeof(name) != "character" ||
|
||||
length(name) != 1 ||
|
||||
!name %in% c('label', 'weight', 'base_margin', 'nrow')) {
|
||||
!name %in% c('label', 'weight', 'base_margin', 'nrow',
|
||||
'label_lower_bound', 'label_upper_bound')) {
|
||||
stop("getinfo: name must be one of the following\n",
|
||||
" 'label', 'weight', 'base_margin', 'nrow'")
|
||||
" 'label', 'weight', 'base_margin', 'nrow', 'label_lower_bound', 'label_upper_bound'")
|
||||
}
|
||||
if (name != "nrow"){
|
||||
ret <- .Call(XGDMatrixGetInfo_R, object, name)
|
||||
@@ -243,6 +244,18 @@ setinfo.xgb.DMatrix <- function(object, name, info, ...) {
|
||||
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
|
||||
return(TRUE)
|
||||
}
|
||||
if (name == "label_lower_bound") {
|
||||
if (length(info) != nrow(object))
|
||||
stop("The length of lower-bound labels must equal to the number of rows in the input data")
|
||||
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
|
||||
return(TRUE)
|
||||
}
|
||||
if (name == "label_upper_bound") {
|
||||
if (length(info) != nrow(object))
|
||||
stop("The length of upper-bound labels must equal to the number of rows in the input data")
|
||||
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
|
||||
return(TRUE)
|
||||
}
|
||||
if (name == "weight") {
|
||||
if (length(info) != nrow(object))
|
||||
stop("The length of weights must equal to the number of rows in the input data")
|
||||
|
||||
@@ -101,7 +101,7 @@
|
||||
#' (only available with early stopping).
|
||||
#' \item \code{pred} CV prediction values available when \code{prediction} is set.
|
||||
#' It is either vector or matrix (see \code{\link{cb.cv.predict}}).
|
||||
#' \item \code{models} a liost of the CV folds' models. It is only available with the explicit
|
||||
#' \item \code{models} a list of the CV folds' models. It is only available with the explicit
|
||||
#' setting of the \code{cb.cv.predict(save_models = TRUE)} callback.
|
||||
#' }
|
||||
#'
|
||||
|
||||
@@ -1,30 +1,30 @@
|
||||
#' Load xgboost model from binary file
|
||||
#'
|
||||
#' Load xgboost model from the binary model file.
|
||||
#'
|
||||
#'
|
||||
#' Load xgboost model from the binary model file.
|
||||
#'
|
||||
#' @param modelfile the name of the binary input file.
|
||||
#'
|
||||
#' @details
|
||||
#'
|
||||
#' @details
|
||||
#' The input file is expected to contain a model saved in an xgboost-internal binary format
|
||||
#' using either \code{\link{xgb.save}} or \code{\link{cb.save.model}} in R, or using some
|
||||
#' appropriate methods from other xgboost interfaces. E.g., a model trained in Python and
|
||||
#' using either \code{\link{xgb.save}} or \code{\link{cb.save.model}} in R, or using some
|
||||
#' appropriate methods from other xgboost interfaces. E.g., a model trained in Python and
|
||||
#' saved from there in xgboost format, could be loaded from R.
|
||||
#'
|
||||
#'
|
||||
#' Note: a model saved as an R-object, has to be loaded using corresponding R-methods,
|
||||
#' not \code{xgb.load}.
|
||||
#'
|
||||
#' @return
|
||||
#'
|
||||
#' @return
|
||||
#' An object of \code{xgb.Booster} class.
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{xgb.save}}, \code{\link{xgb.Booster.complete}}.
|
||||
#'
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{xgb.save}}, \code{\link{xgb.Booster.complete}}.
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' test <- agaricus.test
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#' xgb.save(bst, 'xgb.model')
|
||||
#' bst <- xgb.load('xgb.model')
|
||||
|
||||
14
R-package/R/xgb.load.raw.R
Normal file
14
R-package/R/xgb.load.raw.R
Normal file
@@ -0,0 +1,14 @@
|
||||
#' Load serialised xgboost model from R's raw vector
|
||||
#'
|
||||
#' User can generate raw memory buffer by calling xgb.save.raw
|
||||
#'
|
||||
#' @param buffer the buffer returned by xgb.save.raw
|
||||
#'
|
||||
#' @export
|
||||
xgb.load.raw <- function(buffer) {
|
||||
cachelist <- list()
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
.Call(XGBoosterLoadModelFromRaw_R, handle, buffer)
|
||||
class(handle) <- "xgb.Booster.handle"
|
||||
return (handle)
|
||||
}
|
||||
@@ -1,23 +1,23 @@
|
||||
#' Save xgboost model to R's raw vector,
|
||||
#' user can call xgb.load to load the model back from raw vector
|
||||
#'
|
||||
#' user can call xgb.load.raw to load the model back from raw vector
|
||||
#'
|
||||
#' Save xgboost model from xgboost or xgb.train
|
||||
#'
|
||||
#'
|
||||
#' @param model the model object.
|
||||
#'
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' test <- agaricus.test
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#' raw <- xgb.save.raw(bst)
|
||||
#' bst <- xgb.load(raw)
|
||||
#' bst <- xgb.load.raw(raw)
|
||||
#' pred <- predict(bst, test$data)
|
||||
#'
|
||||
#' @export
|
||||
xgb.save.raw <- function(model) {
|
||||
model <- xgb.get.handle(model)
|
||||
.Call(XGBoosterModelToRaw_R, model)
|
||||
handle <- xgb.get.handle(model)
|
||||
.Call(XGBoosterModelToRaw_R, handle)
|
||||
}
|
||||
|
||||
21
R-package/R/xgb.serialize.R
Normal file
21
R-package/R/xgb.serialize.R
Normal file
@@ -0,0 +1,21 @@
|
||||
#' Serialize the booster instance into R's raw vector. The serialization method differs
|
||||
#' from \code{\link{xgb.save.raw}} as the latter one saves only the model but not
|
||||
#' parameters. This serialization format is not stable across different xgboost versions.
|
||||
#'
|
||||
#' @param booster the booster instance
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' test <- agaricus.test
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#' raw <- xgb.serialize(bst)
|
||||
#' bst <- xgb.unserialize(raw)
|
||||
#'
|
||||
#' @export
|
||||
xgb.serialize <- function(booster) {
|
||||
handle <- xgb.get.handle(booster)
|
||||
.Call(XGBoosterSerializeToBuffer_R, handle)
|
||||
}
|
||||
@@ -267,7 +267,7 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
}
|
||||
|
||||
# evaluation printing callback
|
||||
params <- c(params, list(silent = ifelse(verbose > 1, 0, 1)))
|
||||
params <- c(params)
|
||||
print_every_n <- max( as.integer(print_every_n), 1L)
|
||||
if (!has.callbacks(callbacks, 'cb.print.evaluation') &&
|
||||
verbose) {
|
||||
@@ -291,8 +291,10 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds,
|
||||
maximize = maximize, verbose = verbose))
|
||||
}
|
||||
|
||||
# Sort the callbacks into categories
|
||||
cb <- categorize.callbacks(callbacks)
|
||||
params['validate_parameters'] <- TRUE
|
||||
if (!is.null(params[['seed']])) {
|
||||
warning("xgb.train: `seed` is ignored in R package. Use `set.seed()` instead.")
|
||||
}
|
||||
|
||||
12
R-package/R/xgb.unserialize.R
Normal file
12
R-package/R/xgb.unserialize.R
Normal file
@@ -0,0 +1,12 @@
|
||||
#' Load the instance back from \code{\link{xgb.serialize}}
|
||||
#'
|
||||
#' @param buffer the buffer containing booster instance saved by \code{\link{xgb.serialize}}
|
||||
#'
|
||||
#' @export
|
||||
xgb.unserialize <- function(buffer) {
|
||||
cachelist <- list()
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
.Call(XGBoosterUnserializeFromBuffer_R, handle, buffer)
|
||||
class(handle) <- "xgb.Booster.handle"
|
||||
return (handle)
|
||||
}
|
||||
@@ -4,8 +4,10 @@
|
||||
\name{agaricus.test}
|
||||
\alias{agaricus.test}
|
||||
\title{Test part from Mushroom Data Set}
|
||||
\format{A list containing a label vector, and a dgCMatrix object with 1611
|
||||
rows and 126 variables}
|
||||
\format{
|
||||
A list containing a label vector, and a dgCMatrix object with 1611
|
||||
rows and 126 variables
|
||||
}
|
||||
\usage{
|
||||
data(agaricus.test)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,10 @@
|
||||
\name{agaricus.train}
|
||||
\alias{agaricus.train}
|
||||
\title{Training part from Mushroom Data Set}
|
||||
\format{A list containing a label vector, and a dgCMatrix object with 6513
|
||||
rows and 127 variables}
|
||||
\format{
|
||||
A list containing a label vector, and a dgCMatrix object with 6513
|
||||
rows and 127 variables
|
||||
}
|
||||
\usage{
|
||||
data(agaricus.train)
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ than for \code{xgb.Booster}, since only just a handle (pointer) would need to be
|
||||
That would only matter if attributes need to be set many times.
|
||||
Note, however, that when feeding a handle of an \code{xgb.Booster} object to the attribute setters,
|
||||
the raw model cache of an \code{xgb.Booster} object would not be automatically updated,
|
||||
and it would be user's responsibility to call \code{xgb.save.raw} to update it.
|
||||
and it would be user's responsibility to call \code{xgb.serialize} to update it.
|
||||
|
||||
The \code{xgb.attributes<-} setter either updates the existing or adds one or several attributes,
|
||||
but it doesn't delete the other existing attributes.
|
||||
|
||||
28
R-package/man/xgb.config.Rd
Normal file
28
R-package/man/xgb.config.Rd
Normal file
@@ -0,0 +1,28 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.Booster.R
|
||||
\name{xgb.config}
|
||||
\alias{xgb.config}
|
||||
\alias{xgb.config<-}
|
||||
\title{Accessors for model parameters as JSON string.}
|
||||
\usage{
|
||||
xgb.config(object)
|
||||
|
||||
xgb.config(object) <- value
|
||||
}
|
||||
\arguments{
|
||||
\item{object}{Object of class \code{xgb.Booster}}
|
||||
|
||||
\item{value}{A JSON string.}
|
||||
}
|
||||
\description{
|
||||
Accessors for model parameters as JSON string.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
train <- agaricus.train
|
||||
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
config <- xgb.config(bst)
|
||||
|
||||
}
|
||||
@@ -135,7 +135,7 @@ An object of class \code{xgb.cv.synchronous} with the following elements:
|
||||
(only available with early stopping).
|
||||
\item \code{pred} CV prediction values available when \code{prediction} is set.
|
||||
It is either vector or matrix (see \code{\link{cb.cv.predict}}).
|
||||
\item \code{models} a liost of the CV folds' models. It is only available with the explicit
|
||||
\item \code{models} a list of the CV folds' models. It is only available with the explicit
|
||||
setting of the \code{cb.cv.predict(save_models = TRUE)} callback.
|
||||
}
|
||||
}
|
||||
|
||||
14
R-package/man/xgb.load.raw.Rd
Normal file
14
R-package/man/xgb.load.raw.Rd
Normal file
@@ -0,0 +1,14 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.load.raw.R
|
||||
\name{xgb.load.raw}
|
||||
\alias{xgb.load.raw}
|
||||
\title{Load serialised xgboost model from R's raw vector}
|
||||
\usage{
|
||||
xgb.load.raw(buffer)
|
||||
}
|
||||
\arguments{
|
||||
\item{buffer}{the buffer returned by xgb.save.raw}
|
||||
}
|
||||
\description{
|
||||
User can generate raw memory buffer by calling xgb.save.raw
|
||||
}
|
||||
@@ -3,7 +3,7 @@
|
||||
\name{xgb.save.raw}
|
||||
\alias{xgb.save.raw}
|
||||
\title{Save xgboost model to R's raw vector,
|
||||
user can call xgb.load to load the model back from raw vector}
|
||||
user can call xgb.load.raw to load the model back from raw vector}
|
||||
\usage{
|
||||
xgb.save.raw(model)
|
||||
}
|
||||
@@ -18,10 +18,10 @@ data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
raw <- xgb.save.raw(bst)
|
||||
bst <- xgb.load(raw)
|
||||
bst <- xgb.load.raw(raw)
|
||||
pred <- predict(bst, test$data)
|
||||
|
||||
}
|
||||
|
||||
29
R-package/man/xgb.serialize.Rd
Normal file
29
R-package/man/xgb.serialize.Rd
Normal file
@@ -0,0 +1,29 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.serialize.R
|
||||
\name{xgb.serialize}
|
||||
\alias{xgb.serialize}
|
||||
\title{Serialize the booster instance into R's raw vector. The serialization method differs
|
||||
from \code{\link{xgb.save.raw}} as the latter one saves only the model but not
|
||||
parameters. This serialization format is not stable across different xgboost versions.}
|
||||
\usage{
|
||||
xgb.serialize(booster)
|
||||
}
|
||||
\arguments{
|
||||
\item{booster}{the booster instance}
|
||||
}
|
||||
\description{
|
||||
Serialize the booster instance into R's raw vector. The serialization method differs
|
||||
from \code{\link{xgb.save.raw}} as the latter one saves only the model but not
|
||||
parameters. This serialization format is not stable across different xgboost versions.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
raw <- xgb.serialize(bst)
|
||||
bst <- xgb.unserialize(raw)
|
||||
|
||||
}
|
||||
14
R-package/man/xgb.unserialize.Rd
Normal file
14
R-package/man/xgb.unserialize.Rd
Normal file
@@ -0,0 +1,14 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.unserialize.R
|
||||
\name{xgb.unserialize}
|
||||
\alias{xgb.unserialize}
|
||||
\title{Load the instance back from \code{\link{xgb.serialize}}}
|
||||
\usage{
|
||||
xgb.unserialize(buffer)
|
||||
}
|
||||
\arguments{
|
||||
\item{buffer}{the buffer containing booster instance saved by \code{\link{xgb.serialize}}}
|
||||
}
|
||||
\description{
|
||||
Load the instance back from \code{\link{xgb.serialize}}
|
||||
}
|
||||
@@ -23,6 +23,10 @@ extern SEXP XGBoosterGetAttrNames_R(SEXP);
|
||||
extern SEXP XGBoosterGetAttr_R(SEXP, SEXP);
|
||||
extern SEXP XGBoosterLoadModelFromRaw_R(SEXP, SEXP);
|
||||
extern SEXP XGBoosterLoadModel_R(SEXP, SEXP);
|
||||
extern SEXP XGBoosterSaveJsonConfig_R(SEXP handle);
|
||||
extern SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value);
|
||||
extern SEXP XGBoosterSerializeToBuffer_R(SEXP handle);
|
||||
extern SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw);
|
||||
extern SEXP XGBoosterModelToRaw_R(SEXP);
|
||||
extern SEXP XGBoosterPredict_R(SEXP, SEXP, SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterSaveModel_R(SEXP, SEXP);
|
||||
@@ -49,6 +53,10 @@ static const R_CallMethodDef CallEntries[] = {
|
||||
{"XGBoosterGetAttr_R", (DL_FUNC) &XGBoosterGetAttr_R, 2},
|
||||
{"XGBoosterLoadModelFromRaw_R", (DL_FUNC) &XGBoosterLoadModelFromRaw_R, 2},
|
||||
{"XGBoosterLoadModel_R", (DL_FUNC) &XGBoosterLoadModel_R, 2},
|
||||
{"XGBoosterSaveJsonConfig_R", (DL_FUNC) &XGBoosterSaveJsonConfig_R, 1},
|
||||
{"XGBoosterLoadJsonConfig_R", (DL_FUNC) &XGBoosterLoadJsonConfig_R, 2},
|
||||
{"XGBoosterSerializeToBuffer_R", (DL_FUNC) &XGBoosterSerializeToBuffer_R, 1},
|
||||
{"XGBoosterUnserializeFromBuffer_R", (DL_FUNC) &XGBoosterUnserializeFromBuffer_R, 2},
|
||||
{"XGBoosterModelToRaw_R", (DL_FUNC) &XGBoosterModelToRaw_R, 1},
|
||||
{"XGBoosterPredict_R", (DL_FUNC) &XGBoosterPredict_R, 5},
|
||||
{"XGBoosterSaveModel_R", (DL_FUNC) &XGBoosterSaveModel_R, 2},
|
||||
|
||||
@@ -338,15 +338,6 @@ SEXP XGBoosterSaveModel_R(SEXP handle, SEXP fname) {
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw) {
|
||||
R_API_BEGIN();
|
||||
CHECK_CALL(XGBoosterLoadModelFromBuffer(R_ExternalPtrAddr(handle),
|
||||
RAW(raw),
|
||||
length(raw)));
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterModelToRaw_R(SEXP handle) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
@@ -362,6 +353,57 @@ SEXP XGBoosterModelToRaw_R(SEXP handle) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw) {
|
||||
R_API_BEGIN();
|
||||
CHECK_CALL(XGBoosterLoadModelFromBuffer(R_ExternalPtrAddr(handle),
|
||||
RAW(raw),
|
||||
length(raw)));
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterSaveJsonConfig_R(SEXP handle) {
|
||||
const char* ret;
|
||||
R_API_BEGIN();
|
||||
bst_ulong len {0};
|
||||
CHECK_CALL(XGBoosterSaveJsonConfig(R_ExternalPtrAddr(handle),
|
||||
&len,
|
||||
&ret));
|
||||
R_API_END();
|
||||
return mkString(ret);
|
||||
}
|
||||
|
||||
SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value) {
|
||||
R_API_BEGIN();
|
||||
XGBoosterLoadJsonConfig(R_ExternalPtrAddr(handle), CHAR(asChar(value)));
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterSerializeToBuffer_R(SEXP handle) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
bst_ulong out_len;
|
||||
const char *raw;
|
||||
CHECK_CALL(XGBoosterSerializeToBuffer(R_ExternalPtrAddr(handle), &out_len, &raw));
|
||||
ret = PROTECT(allocVector(RAWSXP, out_len));
|
||||
if (out_len != 0) {
|
||||
memcpy(RAW(ret), raw, out_len);
|
||||
}
|
||||
R_API_END();
|
||||
UNPROTECT(1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw) {
|
||||
R_API_BEGIN();
|
||||
XGBoosterUnserializeFromBuffer(R_ExternalPtrAddr(handle),
|
||||
RAW(raw),
|
||||
length(raw));
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterDumpModel_R(SEXP handle, SEXP fmap, SEXP with_stats, SEXP dump_format) {
|
||||
SEXP out;
|
||||
R_API_BEGIN();
|
||||
|
||||
@@ -179,9 +179,39 @@ XGB_DLL SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw);
|
||||
* \brief save model into R's raw array
|
||||
* \param handle handle
|
||||
* \return raw array
|
||||
*/
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterModelToRaw_R(SEXP handle);
|
||||
|
||||
/*!
|
||||
* \brief Save internal parameters as a JSON string
|
||||
* \param handle handle
|
||||
* \return JSON string
|
||||
*/
|
||||
|
||||
XGB_DLL SEXP XGBoosterSaveJsonConfig_R(SEXP handle);
|
||||
/*!
|
||||
* \brief Load the JSON string returnd by XGBoosterSaveJsonConfig_R
|
||||
* \param handle handle
|
||||
* \param value JSON string
|
||||
* \return R_NilValue
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value);
|
||||
|
||||
/*!
|
||||
* \brief Memory snapshot based serialization method. Saves everything states
|
||||
* into buffer.
|
||||
* \param handle handle to booster
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterSerializeToBuffer_R(SEXP handle);
|
||||
|
||||
/*!
|
||||
* \brief Memory snapshot based serialization method. Loads the buffer returned
|
||||
* from `XGBoosterSerializeToBuffer'.
|
||||
* \param handle handle to booster
|
||||
* \return raw byte array
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw);
|
||||
|
||||
/*!
|
||||
* \brief dump model into a string
|
||||
* \param handle handle
|
||||
|
||||
@@ -35,6 +35,40 @@ test_that("train and predict binary classification", {
|
||||
expect_lt(abs(err_pred1 - err_log), 10e-6)
|
||||
})
|
||||
|
||||
test_that("parameter validation works", {
|
||||
p <- list(foo = "bar")
|
||||
nrounds = 1
|
||||
set.seed(1994)
|
||||
|
||||
d <- cbind(
|
||||
x1 = rnorm(10),
|
||||
x2 = rnorm(10),
|
||||
x3 = rnorm(10))
|
||||
y <- d[,"x1"] + d[,"x2"]^2 +
|
||||
ifelse(d[,"x3"] > .5, d[,"x3"]^2, 2^d[,"x3"]) +
|
||||
rnorm(10)
|
||||
dtrain <- xgb.DMatrix(data=d, info = list(label=y))
|
||||
|
||||
correct <- function() {
|
||||
params <- list(max_depth = 2, booster = "dart",
|
||||
rate_drop = 0.5, one_drop = TRUE,
|
||||
objective = "reg:squarederror")
|
||||
xgb.train(params = params, data = dtrain, nrounds = nrounds)
|
||||
}
|
||||
expect_silent(correct())
|
||||
incorrect <- function() {
|
||||
params <- list(max_depth = 2, booster = "dart",
|
||||
rate_drop = 0.5, one_drop = TRUE,
|
||||
objective = "reg:squarederror",
|
||||
foo = "bar", bar = "foo")
|
||||
output <- capture.output(
|
||||
xgb.train(params = params, data = dtrain, nrounds = nrounds))
|
||||
print(output)
|
||||
}
|
||||
expect_output(incorrect(), "bar, foo")
|
||||
})
|
||||
|
||||
|
||||
test_that("dart prediction works", {
|
||||
nrounds = 32
|
||||
set.seed(1994)
|
||||
@@ -68,7 +102,6 @@ test_that("dart prediction works", {
|
||||
one_drop = TRUE,
|
||||
nthread = 1,
|
||||
tree_method= "exact",
|
||||
verbosity = 3,
|
||||
objective = "reg:squarederror"
|
||||
),
|
||||
data = dtrain,
|
||||
@@ -219,6 +252,21 @@ test_that("training continuation works", {
|
||||
expect_equal(dim(bst2$evaluation_log), c(2, 2))
|
||||
})
|
||||
|
||||
test_that("model serialization works", {
|
||||
out_path <- "model_serialization"
|
||||
dtrain <- xgb.DMatrix(train$data, label = train$label)
|
||||
watchlist = list(train=dtrain)
|
||||
param <- list(objective = "binary:logistic")
|
||||
booster <- xgb.train(param, dtrain, nrounds = 4, watchlist)
|
||||
raw <- xgb.serialize(booster)
|
||||
saveRDS(raw, out_path)
|
||||
raw <- readRDS(out_path)
|
||||
|
||||
loaded <- xgb.unserialize(raw)
|
||||
raw_from_loaded <- xgb.serialize(loaded)
|
||||
expect_equal(raw, raw_from_loaded)
|
||||
file.remove(out_path)
|
||||
})
|
||||
|
||||
test_that("xgb.cv works", {
|
||||
set.seed(11)
|
||||
@@ -309,18 +357,28 @@ test_that("colsample_bytree works", {
|
||||
test_y <- as.numeric(rowSums(test_x) > 0)
|
||||
colnames(train_x) <- paste0("Feature_", sprintf("%03d", 1:100))
|
||||
colnames(test_x) <- paste0("Feature_", sprintf("%03d", 1:100))
|
||||
dtrain <- xgb.DMatrix(train_x, label = train_y)
|
||||
dtrain <- xgb.DMatrix(train_x, label = train_y)
|
||||
dtest <- xgb.DMatrix(test_x, label = test_y)
|
||||
watchlist <- list(train = dtrain, eval = dtest)
|
||||
# Use colsample_bytree = 0.01, so that roughly one out of 100 features is
|
||||
# chosen for each tree
|
||||
param <- list(max_depth = 2, eta = 0, silent = 1, nthread = 2,
|
||||
## Use colsample_bytree = 0.01, so that roughly one out of 100 features is chosen for
|
||||
## each tree
|
||||
param <- list(max_depth = 2, eta = 0, nthread = 2,
|
||||
colsample_bytree = 0.01, objective = "binary:logistic",
|
||||
eval_metric = "auc")
|
||||
set.seed(2)
|
||||
set.seed(2)
|
||||
bst <- xgb.train(param, dtrain, nrounds = 100, watchlist, verbose = 0)
|
||||
xgb.importance(model = bst)
|
||||
# If colsample_bytree works properly, a variety of features should be used
|
||||
# in the 100 trees
|
||||
expect_gte(nrow(xgb.importance(model = bst)), 30)
|
||||
})
|
||||
|
||||
test_that("Configuration works", {
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic",
|
||||
eval_metric = 'error', eval_metric = 'auc', eval_metric = "logloss")
|
||||
config <- xgb.config(bst)
|
||||
xgb.config(bst) <- config
|
||||
reloaded_config <- xgb.config(bst)
|
||||
expect_equal(config, reloaded_config);
|
||||
})
|
||||
|
||||
@@ -30,16 +30,16 @@ param <- list(objective = "binary:logistic", max_depth = 2, nthread = 2)
|
||||
|
||||
|
||||
test_that("cb.print.evaluation works as expected", {
|
||||
|
||||
|
||||
bst_evaluation <- c('train-auc'=0.9, 'test-auc'=0.8)
|
||||
bst_evaluation_err <- NULL
|
||||
begin_iteration <- 1
|
||||
end_iteration <- 7
|
||||
|
||||
|
||||
f0 <- cb.print.evaluation(period=0)
|
||||
f1 <- cb.print.evaluation(period=1)
|
||||
f5 <- cb.print.evaluation(period=5)
|
||||
|
||||
|
||||
expect_false(is.null(attr(f1, 'call')))
|
||||
expect_equal(attr(f1, 'name'), 'cb.print.evaluation')
|
||||
|
||||
@@ -48,15 +48,15 @@ test_that("cb.print.evaluation works as expected", {
|
||||
expect_output(f1(), "\\[1\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
|
||||
expect_output(f5(), "\\[1\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
|
||||
expect_null(f1())
|
||||
|
||||
|
||||
iteration <- 2
|
||||
expect_output(f1(), "\\[2\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
|
||||
expect_silent(f5())
|
||||
|
||||
|
||||
iteration <- 7
|
||||
expect_output(f1(), "\\[7\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
|
||||
expect_output(f5(), "\\[7\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
|
||||
|
||||
|
||||
bst_evaluation_err <- c('train-auc'=0.1, 'test-auc'=0.2)
|
||||
expect_output(f1(), "\\[7\\]\ttrain-auc:0.900000\\+0.100000\ttest-auc:0.800000\\+0.200000")
|
||||
})
|
||||
@@ -65,40 +65,40 @@ test_that("cb.evaluation.log works as expected", {
|
||||
|
||||
bst_evaluation <- c('train-auc'=0.9, 'test-auc'=0.8)
|
||||
bst_evaluation_err <- NULL
|
||||
|
||||
|
||||
evaluation_log <- list()
|
||||
f <- cb.evaluation.log()
|
||||
|
||||
|
||||
expect_false(is.null(attr(f, 'call')))
|
||||
expect_equal(attr(f, 'name'), 'cb.evaluation.log')
|
||||
|
||||
|
||||
iteration <- 1
|
||||
expect_silent(f())
|
||||
expect_equal(evaluation_log,
|
||||
expect_equal(evaluation_log,
|
||||
list(c(iter=1, bst_evaluation)))
|
||||
iteration <- 2
|
||||
expect_silent(f())
|
||||
expect_equal(evaluation_log,
|
||||
expect_equal(evaluation_log,
|
||||
list(c(iter=1, bst_evaluation), c(iter=2, bst_evaluation)))
|
||||
expect_silent(f(finalize = TRUE))
|
||||
expect_equal(evaluation_log,
|
||||
expect_equal(evaluation_log,
|
||||
data.table(iter=1:2, train_auc=c(0.9,0.9), test_auc=c(0.8,0.8)))
|
||||
|
||||
|
||||
bst_evaluation_err <- c('train-auc'=0.1, 'test-auc'=0.2)
|
||||
evaluation_log <- list()
|
||||
f <- cb.evaluation.log()
|
||||
|
||||
|
||||
iteration <- 1
|
||||
expect_silent(f())
|
||||
expect_equal(evaluation_log,
|
||||
expect_equal(evaluation_log,
|
||||
list(c(iter=1, c(bst_evaluation, bst_evaluation_err))))
|
||||
iteration <- 2
|
||||
expect_silent(f())
|
||||
expect_equal(evaluation_log,
|
||||
expect_equal(evaluation_log,
|
||||
list(c(iter=1, c(bst_evaluation, bst_evaluation_err)),
|
||||
c(iter=2, c(bst_evaluation, bst_evaluation_err))))
|
||||
expect_silent(f(finalize = TRUE))
|
||||
expect_equal(evaluation_log,
|
||||
expect_equal(evaluation_log,
|
||||
data.table(iter=1:2,
|
||||
train_auc_mean=c(0.9,0.9), train_auc_std=c(0.1,0.1),
|
||||
test_auc_mean=c(0.8,0.8), test_auc_std=c(0.2,0.2)))
|
||||
@@ -130,18 +130,18 @@ test_that("cb.reset.parameters works as expected", {
|
||||
bst1 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0,
|
||||
callbacks = list(cb.reset.parameters(my_par)))
|
||||
expect_false(is.null(bst1$evaluation_log$train_error))
|
||||
expect_equal(bst0$evaluation_log$train_error,
|
||||
expect_equal(bst0$evaluation_log$train_error,
|
||||
bst1$evaluation_log$train_error)
|
||||
|
||||
|
||||
# same eta but re-set via a function in the callback
|
||||
set.seed(111)
|
||||
my_par <- list(eta = function(itr, itr_end) 0.9)
|
||||
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0,
|
||||
callbacks = list(cb.reset.parameters(my_par)))
|
||||
expect_false(is.null(bst2$evaluation_log$train_error))
|
||||
expect_equal(bst0$evaluation_log$train_error,
|
||||
expect_equal(bst0$evaluation_log$train_error,
|
||||
bst2$evaluation_log$train_error)
|
||||
|
||||
|
||||
# different eta re-set as a vector parameter in the callback
|
||||
set.seed(111)
|
||||
my_par <- list(eta = c(0.6, 0.5))
|
||||
@@ -149,7 +149,7 @@ test_that("cb.reset.parameters works as expected", {
|
||||
callbacks = list(cb.reset.parameters(my_par)))
|
||||
expect_false(is.null(bst3$evaluation_log$train_error))
|
||||
expect_false(all(bst0$evaluation_log$train_error == bst3$evaluation_log$train_error))
|
||||
|
||||
|
||||
# resetting multiple parameters at the same time runs with no error
|
||||
my_par <- list(eta = c(1., 0.5), gamma = c(1, 2), max_depth = c(4, 8))
|
||||
expect_error(
|
||||
@@ -175,7 +175,7 @@ test_that("cb.reset.parameters works as expected", {
|
||||
test_that("cb.save.model works as expected", {
|
||||
files <- c('xgboost_01.model', 'xgboost_02.model', 'xgboost.model')
|
||||
for (f in files) if (file.exists(f)) file.remove(f)
|
||||
|
||||
|
||||
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 1, verbose = 0,
|
||||
save_period = 1, save_name = "xgboost_%02d.model")
|
||||
expect_true(file.exists('xgboost_01.model'))
|
||||
@@ -184,6 +184,9 @@ test_that("cb.save.model works as expected", {
|
||||
expect_equal(xgb.ntree(b1), 1)
|
||||
b2 <- xgb.load('xgboost_02.model')
|
||||
expect_equal(xgb.ntree(b2), 2)
|
||||
|
||||
xgb.config(b2) <- xgb.config(bst)
|
||||
expect_equal(xgb.config(bst), xgb.config(b2))
|
||||
expect_equal(bst$raw, b2$raw)
|
||||
|
||||
# save_period = 0 saves the last iteration's model
|
||||
@@ -191,8 +194,9 @@ test_that("cb.save.model works as expected", {
|
||||
save_period = 0)
|
||||
expect_true(file.exists('xgboost.model'))
|
||||
b2 <- xgb.load('xgboost.model')
|
||||
xgb.config(b2) <- xgb.config(bst)
|
||||
expect_equal(bst$raw, b2$raw)
|
||||
|
||||
|
||||
for (f in files) if (file.exists(f)) file.remove(f)
|
||||
})
|
||||
|
||||
@@ -211,13 +215,22 @@ test_that("early stopping xgb.train works", {
|
||||
err_pred <- err(ltest, pred)
|
||||
err_log <- bst$evaluation_log[bst$best_iteration, test_error]
|
||||
expect_equal(err_log, err_pred, tolerance = 5e-6)
|
||||
|
||||
|
||||
set.seed(11)
|
||||
expect_silent(
|
||||
bst0 <- xgb.train(param, dtrain, nrounds = 20, watchlist, eta = 0.3,
|
||||
early_stopping_rounds = 3, maximize = FALSE, verbose = 0)
|
||||
)
|
||||
expect_equal(bst$evaluation_log, bst0$evaluation_log)
|
||||
|
||||
xgb.save(bst, "model.bin")
|
||||
loaded <- xgb.load("model.bin")
|
||||
|
||||
expect_false(is.null(loaded$best_iteration))
|
||||
expect_equal(loaded$best_iteration, bst$best_ntreelimit)
|
||||
expect_equal(loaded$best_ntreelimit, bst$best_ntreelimit)
|
||||
|
||||
file.remove("model.bin")
|
||||
})
|
||||
|
||||
test_that("early stopping using a specific metric works", {
|
||||
@@ -288,13 +301,13 @@ test_that("prediction in early-stopping xgb.cv works", {
|
||||
early_stopping_rounds = 5, maximize = FALSE, stratified = FALSE,
|
||||
prediction = TRUE)
|
||||
, "Stopping. Best iteration")
|
||||
|
||||
|
||||
expect_false(is.null(cv$best_iteration))
|
||||
expect_lt(cv$best_iteration, 19)
|
||||
expect_false(is.null(cv$evaluation_log))
|
||||
expect_false(is.null(cv$pred))
|
||||
expect_length(cv$pred, nrow(train$data))
|
||||
|
||||
|
||||
err_pred <- mean( sapply(cv$folds, function(f) mean(err(ltrain[f], cv$pred[f]))) )
|
||||
err_log <- cv$evaluation_log[cv$best_iteration, test_error_mean]
|
||||
expect_equal(err_pred, err_log, tolerance = 1e-6)
|
||||
|
||||
@@ -58,3 +58,20 @@ test_that("custom objective using DMatrix attr works", {
|
||||
bst <- xgb.train(param, dtrain, num_round, watchlist)
|
||||
expect_equal(class(bst), "xgb.Booster")
|
||||
})
|
||||
|
||||
test_that("custom objective with multi-class works", {
|
||||
data = as.matrix(iris[, -5])
|
||||
label = as.numeric(iris$Species) - 1
|
||||
dtrain <- xgb.DMatrix(data = data, label = label)
|
||||
nclasses <- 3
|
||||
|
||||
fake_softprob <- function(preds, dtrain) {
|
||||
expect_true(all(matrix(preds) == 0.5))
|
||||
grad <- rnorm(dim(as.matrix(preds))[1])
|
||||
expect_equal(dim(data)[1] * nclasses, dim(as.matrix(preds))[1])
|
||||
hess <- rnorm(dim(as.matrix(preds))[1])
|
||||
return (list(grad = grad, hess = hess))
|
||||
}
|
||||
param$objective = fake_softprob
|
||||
bst <- xgb.train(param, dtrain, 1, num_class=nclasses)
|
||||
})
|
||||
|
||||
@@ -50,6 +50,12 @@ test_that("xgb.DMatrix: getinfo & setinfo", {
|
||||
labels <- getinfo(dtest, 'label')
|
||||
expect_equal(test_label, getinfo(dtest, 'label'))
|
||||
|
||||
expect_true(setinfo(dtest, 'label_lower_bound', test_label))
|
||||
expect_equal(test_label, getinfo(dtest, 'label_lower_bound'))
|
||||
|
||||
expect_true(setinfo(dtest, 'label_upper_bound', test_label))
|
||||
expect_equal(test_label, getinfo(dtest, 'label_upper_bound'))
|
||||
|
||||
expect_true(length(getinfo(dtest, 'weight')) == 0)
|
||||
expect_true(length(getinfo(dtest, 'base_margin')) == 0)
|
||||
|
||||
@@ -59,7 +65,7 @@ test_that("xgb.DMatrix: getinfo & setinfo", {
|
||||
expect_error(setinfo(dtest, 'group', test_label))
|
||||
|
||||
# providing character values will give a warning
|
||||
expect_warning( setinfo(dtest, 'weight', rep('a', nrow(test_data))) )
|
||||
expect_warning(setinfo(dtest, 'weight', rep('a', nrow(test_data))))
|
||||
|
||||
# any other label should error
|
||||
expect_error(setinfo(dtest, 'asdf', test_label))
|
||||
|
||||
@@ -40,7 +40,7 @@ test_that("gblinear works", {
|
||||
expect_lt(bst$evaluation_log$eval_error[2], ERR_UL)
|
||||
|
||||
bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'thrifty',
|
||||
top_n = 50, callbacks = list(cb.gblinear.history(sparse = TRUE)))
|
||||
top_k = 50, callbacks = list(cb.gblinear.history(sparse = TRUE)))
|
||||
expect_lt(bst$evaluation_log$eval_error[n], ERR_UL)
|
||||
h <- xgb.gblinear.history(bst)
|
||||
expect_equal(dim(h), c(n, ncol(dtrain) + 1))
|
||||
|
||||
@@ -7,8 +7,8 @@ require(vcd, quietly = TRUE)
|
||||
|
||||
float_tolerance = 5e-6
|
||||
|
||||
# disable some tests for Win32
|
||||
win32_flag = .Platform$OS.type == "windows" && .Machine$sizeof.pointer != 8
|
||||
# disable some tests for 32-bit environment
|
||||
flag_32bit = .Machine$sizeof.pointer != 8
|
||||
|
||||
set.seed(1982)
|
||||
data(Arthritis)
|
||||
@@ -44,7 +44,7 @@ mbst.GLM <- xgboost(data = as.matrix(iris[, -5]), label = mlabel, verbose = 0,
|
||||
|
||||
|
||||
test_that("xgb.dump works", {
|
||||
if (!win32_flag)
|
||||
if (!flag_32bit)
|
||||
expect_length(xgb.dump(bst.Tree), 200)
|
||||
dump_file = file.path(tempdir(), 'xgb.model.dump')
|
||||
expect_true(xgb.dump(bst.Tree, dump_file, with_stats = T))
|
||||
@@ -54,7 +54,7 @@ test_that("xgb.dump works", {
|
||||
# JSON format
|
||||
dmp <- xgb.dump(bst.Tree, dump_format = "json")
|
||||
expect_length(dmp, 1)
|
||||
if (!win32_flag)
|
||||
if (!flag_32bit)
|
||||
expect_length(grep('nodeid', strsplit(dmp, '\n')[[1]]), 188)
|
||||
})
|
||||
|
||||
@@ -256,7 +256,7 @@ test_that("xgb.model.dt.tree works with and without feature names", {
|
||||
names.dt.trees <- c("Tree", "Node", "ID", "Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover")
|
||||
dt.tree <- xgb.model.dt.tree(feature_names = feature.names, model = bst.Tree)
|
||||
expect_equal(names.dt.trees, names(dt.tree))
|
||||
if (!win32_flag)
|
||||
if (!flag_32bit)
|
||||
expect_equal(dim(dt.tree), c(188, 10))
|
||||
expect_output(str(dt.tree), 'Feature.*\\"Age\\"')
|
||||
|
||||
@@ -283,7 +283,7 @@ test_that("xgb.model.dt.tree throws error for gblinear", {
|
||||
|
||||
test_that("xgb.importance works with and without feature names", {
|
||||
importance.Tree <- xgb.importance(feature_names = feature.names, model = bst.Tree)
|
||||
if (!win32_flag)
|
||||
if (!flag_32bit)
|
||||
expect_equal(dim(importance.Tree), c(7, 4))
|
||||
expect_equal(colnames(importance.Tree), c("Feature", "Gain", "Cover", "Frequency"))
|
||||
expect_output(str(importance.Tree), 'Feature.*\\"Age\\"')
|
||||
|
||||
@@ -14,25 +14,42 @@ test_that("interaction constraints for regression", {
|
||||
bst <- xgboost(data = train, label = y, max_depth = 3,
|
||||
eta = 0.1, nthread = 2, nrounds = 100, verbose = 0,
|
||||
interaction_constraints = list(c(0,1)))
|
||||
|
||||
|
||||
# Set all observations to have the same x3 values then increment
|
||||
# by the same amount
|
||||
preds <- lapply(c(1,2,3), function(x){
|
||||
tmat <- matrix(c(x1,x2,rep(x,1000)), ncol=3)
|
||||
return(predict(bst, tmat))
|
||||
})
|
||||
preds <- lapply(c(1,2,3), function(x){
|
||||
tmat <- matrix(c(x1,x2,rep(x,1000)), ncol=3)
|
||||
return(predict(bst, tmat))
|
||||
})
|
||||
|
||||
# Check incrementing x3 has the same effect on all observations
|
||||
# since x3 is constrained to be independent of x1 and x2
|
||||
# and all observations start off from the same x3 value
|
||||
diff1 <- preds[[2]] - preds[[1]]
|
||||
test1 <- all(abs(diff1 - diff1[1]) < 1e-4)
|
||||
|
||||
diff2 <- preds[[3]] - preds[[2]]
|
||||
test2 <- all(abs(diff2 - diff2[1]) < 1e-4)
|
||||
|
||||
diff1 <- preds[[2]] - preds[[1]]
|
||||
test1 <- all(abs(diff1 - diff1[1]) < 1e-4)
|
||||
|
||||
diff2 <- preds[[3]] - preds[[2]]
|
||||
test2 <- all(abs(diff2 - diff2[1]) < 1e-4)
|
||||
|
||||
expect_true({
|
||||
test1 & test2
|
||||
}, "Interaction Contraint Satisfied")
|
||||
|
||||
})
|
||||
|
||||
test_that("interaction constraints scientific representation", {
|
||||
rows <- 10
|
||||
## When number exceeds 1e5, R paste function uses scientific representation.
|
||||
## See: https://github.com/dmlc/xgboost/issues/5179
|
||||
cols <- 1e5+10
|
||||
|
||||
d <- matrix(rexp(rows, rate=.1), nrow=rows, ncol=cols)
|
||||
y <- rnorm(rows)
|
||||
|
||||
dtrain <- xgb.DMatrix(data=d, info = list(label=y))
|
||||
inc <- list(c(seq.int(from = 0, to = cols, by = 1)))
|
||||
|
||||
with_inc <- xgb.train(data=dtrain, tree_method='hist',
|
||||
interaction_constraints=inc, nrounds=10)
|
||||
without_inc <- xgb.train(data=dtrain, tree_method='hist', nrounds=10)
|
||||
expect_equal(xgb.save.raw(with_inc), xgb.save.raw(without_inc))
|
||||
})
|
||||
|
||||
@@ -410,7 +410,7 @@ In some very specific cases, like when you want to pilot **XGBoost** from `caret
|
||||
|
||||
```{r saveLoadRBinVectorModel, message=F, warning=F}
|
||||
# save model to R's raw vector
|
||||
rawVec <- xgb.save.raw(bst)
|
||||
rawVec <- xgb.serialize(bst)
|
||||
|
||||
# print class
|
||||
print(class(rawVec))
|
||||
|
||||
@@ -27,7 +27,7 @@ License
|
||||
Contribute to XGBoost
|
||||
---------------------
|
||||
XGBoost has been developed and used by a group of active community members. Your help is very valuable to make the package better for everyone.
|
||||
Checkout the [Community Page](https://xgboost.ai/community)
|
||||
Checkout the [Community Page](https://xgboost.ai/community).
|
||||
|
||||
Reference
|
||||
---------
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
#include "../src/metric/elementwise_metric.cc"
|
||||
#include "../src/metric/multiclass_metric.cc"
|
||||
#include "../src/metric/rank_metric.cc"
|
||||
#include "../src/metric/survival_metric.cc"
|
||||
|
||||
// objectives
|
||||
#include "../src/objective/objective.cc"
|
||||
@@ -21,6 +22,7 @@
|
||||
#include "../src/objective/multiclass_obj.cc"
|
||||
#include "../src/objective/rank_obj.cc"
|
||||
#include "../src/objective/hinge.cc"
|
||||
#include "../src/objective/aft_obj.cc"
|
||||
|
||||
// gbms
|
||||
#include "../src/gbm/gbm.cc"
|
||||
@@ -31,7 +33,6 @@
|
||||
|
||||
// data
|
||||
#include "../src/data/data.cc"
|
||||
#include "../src/data/simple_csr_source.cc"
|
||||
#include "../src/data/simple_dmatrix.cc"
|
||||
#include "../src/data/sparse_page_raw_format.cc"
|
||||
#include "../src/data/ellpack_page.cc"
|
||||
@@ -45,7 +46,7 @@
|
||||
#include "../src/data/sparse_page_dmatrix.cc"
|
||||
#endif
|
||||
|
||||
// tress
|
||||
// trees
|
||||
#include "../src/tree/param.cc"
|
||||
#include "../src/tree/split_evaluator.cc"
|
||||
#include "../src/tree/tree_model.cc"
|
||||
@@ -73,6 +74,8 @@
|
||||
#include "../src/common/hist_util.cc"
|
||||
#include "../src/common/json.cc"
|
||||
#include "../src/common/io.cc"
|
||||
#include "../src/common/survival_util.cc"
|
||||
#include "../src/common/probability_distribution.cc"
|
||||
#include "../src/common/version.cc"
|
||||
|
||||
// c_api
|
||||
|
||||
@@ -65,6 +65,11 @@ function(set_output_directory target dir)
|
||||
LIBRARY_OUTPUT_DIRECTORY_RELEASE ${dir}
|
||||
LIBRARY_OUTPUT_DIRECTORY_RELWITHDEBINFO ${dir}
|
||||
LIBRARY_OUTPUT_DIRECTORY_MINSIZEREL ${dir}
|
||||
ARCHIVE_OUTPUT_DIRECTORY ${dir}
|
||||
ARCHIVE_OUTPUT_DIRECTORY_DEBUG ${dir}
|
||||
ARCHIVE_OUTPUT_DIRECTORY_RELEASE ${dir}
|
||||
ARCHIVE_OUTPUT_DIRECTORY_RELWITHDEBINFO ${dir}
|
||||
ARCHIVE_OUTPUT_DIRECTORY_MINSIZEREL ${dir}
|
||||
)
|
||||
endfunction(set_output_directory)
|
||||
|
||||
|
||||
@@ -50,10 +50,10 @@ function(create_rlib_for_msvc)
|
||||
\nDo you have Rtools installed with its MinGW's bin/ in PATH?")
|
||||
endif()
|
||||
# extract symbols from R.dll into R.def and R.lib import library
|
||||
execute_process(COMMAND gendef
|
||||
execute_process(COMMAND ${GENDEF_EXE}
|
||||
"-" "${LIBR_LIB_DIR}/R.dll"
|
||||
OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/R.def")
|
||||
execute_process(COMMAND dlltool
|
||||
execute_process(COMMAND ${DLLTOOL_EXE}
|
||||
"--input-def" "${CMAKE_CURRENT_BINARY_DIR}/R.def"
|
||||
"--output-lib" "${CMAKE_CURRENT_BINARY_DIR}/R.lib")
|
||||
endfunction(create_rlib_for_msvc)
|
||||
@@ -103,12 +103,12 @@ else()
|
||||
)
|
||||
# ask R for the include dir
|
||||
execute_process(
|
||||
COMMAND ${LIBR_EXECUTABLE} "--slave" "--no-save" "-e" "cat(R.home('include'))"
|
||||
COMMAND ${LIBR_EXECUTABLE} "--slave" "--vanilla" "-e" "cat(R.home('include'))"
|
||||
OUTPUT_VARIABLE LIBR_INCLUDE_DIRS
|
||||
)
|
||||
# ask R for the lib dir
|
||||
execute_process(
|
||||
COMMAND ${LIBR_EXECUTABLE} "--slave" "--no-save" "-e" "cat(R.home('lib'))"
|
||||
COMMAND ${LIBR_EXECUTABLE} "--slave" "--vanilla" "-e" "cat(R.home('lib'))"
|
||||
OUTPUT_VARIABLE LIBR_LIB_DIR
|
||||
)
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ Contents
|
||||
- [Tutorials](#tutorials)
|
||||
- [Usecases](#usecases)
|
||||
- [Tools using XGBoost](#tools-using-xgboost)
|
||||
- [Integrations with 3rd party software](#integrations-with-3rd-party-software)
|
||||
- [Awards](#awards)
|
||||
- [Windows Binaries](#windows-binaries)
|
||||
|
||||
@@ -114,6 +115,7 @@ Please send pull requests if you find ones that are missing here.
|
||||
- [Complete Guide to Parameter Tuning in XGBoost](http://www.analyticsvidhya.com/blog/2016/03/complete-guide-parameter-tuning-xgboost-with-codes-python/) by Aarshay Jain
|
||||
- [Practical XGBoost in Python online course](http://education.parrotprediction.teachable.com/courses/practical-xgboost-in-python) by Parrot Prediction
|
||||
- [Spark and XGBoost using Scala](http://www.elenacuoco.com/2016/10/10/scala-spark-xgboost-classification/) by Elena Cuoco
|
||||
|
||||
## Usecases
|
||||
If you have particular usecase of xgboost that you would like to highlight.
|
||||
Send a PR to add a one sentence description:)
|
||||
@@ -126,14 +128,17 @@ Send a PR to add a one sentence description:)
|
||||
- [Hanjing Su](https://www.52cs.org) from Tencent data platform team: "We use distributed XGBoost for click through prediction in wechat shopping and lookalikes. The problems involve hundreds millions of users and thousands of features. XGBoost is cleanly designed and can be easily integrated into our production environment, reducing our cost in developments."
|
||||
- [CNevd](https://github.com/CNevd) from autohome.com ad platform team: "Distributed XGBoost is used for click through rate prediction in our display advertising, XGBoost is highly efficient and flexible and can be easily used on our distributed platform, our ctr made a great improvement with hundred millions samples and millions features due to this awesome XGBoost"
|
||||
|
||||
|
||||
|
||||
## Tools using XGBoost
|
||||
|
||||
- [BayesBoost](https://github.com/mpearmain/BayesBoost) - Bayesian Optimization using xgboost and sklearn API
|
||||
- [gp_xgboost_gridsearch](https://github.com/vatsan/gp_xgboost_gridsearch) - In-database parallel grid-search for XGBoost on [Greenplum](https://github.com/greenplum-db/gpdb) using PL/Python
|
||||
- [tpot](https://github.com/rhiever/tpot) - A Python tool that automatically creates and optimizes machine learning pipelines using genetic programming.
|
||||
|
||||
## Integrations with 3rd party software
|
||||
Open source integrations with XGBoost:
|
||||
* [Neptune.ai](http://neptune.ai/) - Experiment management and collaboration tool for ML/DL/RL specialists. Integration has a form of the [XGBoost callback](https://docs.neptune.ai/integrations/xgboost.html) that automatically logs training and evaluation metrics, as well as saved model (booster), feature importance chart and visualized trees.
|
||||
* [Optuna](https://optuna.org/) - An open source hyperparameter optimization framework to automate hyperparameter search. Optuna integrates with XGBoost in the [XGBoostPruningCallback](https://optuna.readthedocs.io/en/stable/reference/integration.html#optuna.integration.XGBoostPruningCallback) that let users easily prune unpromising trials.
|
||||
|
||||
## Awards
|
||||
- [John Chambers Award](http://stat-computing.org/awards/jmc/winners.html) - 2016 Winner: XGBoost R Package, by Tong He (Simon Fraser University) and Tianqi Chen (University of Washington)
|
||||
- [InfoWorld’s 2019 Technology of the Year Award](https://www.infoworld.com/article/3336072/application-development/infoworlds-2019-technology-of-the-year-award-winners.html)
|
||||
|
||||
54
demo/aft_survival/aft_survival_demo.py
Normal file
54
demo/aft_survival/aft_survival_demo.py
Normal file
@@ -0,0 +1,54 @@
|
||||
"""
|
||||
Demo for survival analysis (regression) using Accelerated Failure Time (AFT) model
|
||||
"""
|
||||
from sklearn.model_selection import ShuffleSplit
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import xgboost as xgb
|
||||
|
||||
# The Veterans' Administration Lung Cancer Trial
|
||||
# The Statistical Analysis of Failure Time Data by Kalbfleisch J. and Prentice R (1980)
|
||||
df = pd.read_csv('../data/veterans_lung_cancer.csv')
|
||||
print('Training data:')
|
||||
print(df)
|
||||
|
||||
# Split features and labels
|
||||
y_lower_bound = df['Survival_label_lower_bound']
|
||||
y_upper_bound = df['Survival_label_upper_bound']
|
||||
X = df.drop(['Survival_label_lower_bound', 'Survival_label_upper_bound'], axis=1)
|
||||
|
||||
# Split data into training and validation sets
|
||||
rs = ShuffleSplit(n_splits=2, test_size=.7, random_state=0)
|
||||
train_index, valid_index = next(rs.split(X))
|
||||
dtrain = xgb.DMatrix(X.values[train_index, :])
|
||||
dtrain.set_float_info('label_lower_bound', y_lower_bound[train_index])
|
||||
dtrain.set_float_info('label_upper_bound', y_upper_bound[train_index])
|
||||
dvalid = xgb.DMatrix(X.values[valid_index, :])
|
||||
dvalid.set_float_info('label_lower_bound', y_lower_bound[valid_index])
|
||||
dvalid.set_float_info('label_upper_bound', y_upper_bound[valid_index])
|
||||
|
||||
# Train gradient boosted trees using AFT loss and metric
|
||||
params = {'verbosity': 0,
|
||||
'objective': 'survival:aft',
|
||||
'eval_metric': 'aft-nloglik',
|
||||
'tree_method': 'hist',
|
||||
'learning_rate': 0.05,
|
||||
'aft_loss_distribution': 'normal',
|
||||
'aft_loss_distribution_scale': 1.20,
|
||||
'max_depth': 6,
|
||||
'lambda': 0.01,
|
||||
'alpha': 0.02}
|
||||
bst = xgb.train(params, dtrain, num_boost_round=10000,
|
||||
evals=[(dtrain, 'train'), (dvalid, 'valid')],
|
||||
early_stopping_rounds=50)
|
||||
|
||||
# Run prediction on the validation set
|
||||
df = pd.DataFrame({'Label (lower bound)': y_lower_bound[valid_index],
|
||||
'Label (upper bound)': y_upper_bound[valid_index],
|
||||
'Predicted label': bst.predict(dvalid)})
|
||||
print(df)
|
||||
# Show only data points with right-censored labels
|
||||
print(df[np.isinf(df['Label (upper bound)'])])
|
||||
|
||||
# Save trained model
|
||||
bst.save_model('aft_model.json')
|
||||
78
demo/aft_survival/aft_survival_demo_with_optuna.py
Normal file
78
demo/aft_survival/aft_survival_demo_with_optuna.py
Normal file
@@ -0,0 +1,78 @@
|
||||
"""
|
||||
Demo for survival analysis (regression) using Accelerated Failure Time (AFT) model, using Optuna
|
||||
to tune hyperparameters
|
||||
"""
|
||||
from sklearn.model_selection import ShuffleSplit
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import xgboost as xgb
|
||||
import optuna
|
||||
|
||||
# The Veterans' Administration Lung Cancer Trial
|
||||
# The Statistical Analysis of Failure Time Data by Kalbfleisch J. and Prentice R (1980)
|
||||
df = pd.read_csv('../data/veterans_lung_cancer.csv')
|
||||
print('Training data:')
|
||||
print(df)
|
||||
|
||||
# Split features and labels
|
||||
y_lower_bound = df['Survival_label_lower_bound']
|
||||
y_upper_bound = df['Survival_label_upper_bound']
|
||||
X = df.drop(['Survival_label_lower_bound', 'Survival_label_upper_bound'], axis=1)
|
||||
|
||||
# Split data into training and validation sets
|
||||
rs = ShuffleSplit(n_splits=2, test_size=.7, random_state=0)
|
||||
train_index, valid_index = next(rs.split(X))
|
||||
dtrain = xgb.DMatrix(X.values[train_index, :])
|
||||
dtrain.set_float_info('label_lower_bound', y_lower_bound[train_index])
|
||||
dtrain.set_float_info('label_upper_bound', y_upper_bound[train_index])
|
||||
dvalid = xgb.DMatrix(X.values[valid_index, :])
|
||||
dvalid.set_float_info('label_lower_bound', y_lower_bound[valid_index])
|
||||
dvalid.set_float_info('label_upper_bound', y_upper_bound[valid_index])
|
||||
|
||||
# Define hyperparameter search space
|
||||
base_params = {'verbosity': 0,
|
||||
'objective': 'survival:aft',
|
||||
'eval_metric': 'aft-nloglik',
|
||||
'tree_method': 'hist'} # Hyperparameters common to all trials
|
||||
def objective(trial):
|
||||
params = {'learning_rate': trial.suggest_loguniform('learning_rate', 0.01, 1.0),
|
||||
'aft_loss_distribution': trial.suggest_categorical('aft_loss_distribution',
|
||||
['normal', 'logistic', 'extreme']),
|
||||
'aft_loss_distribution_scale': trial.suggest_loguniform('aft_loss_distribution_scale', 0.1, 10.0),
|
||||
'max_depth': trial.suggest_int('max_depth', 3, 8),
|
||||
'lambda': trial.suggest_loguniform('lambda', 1e-8, 1.0),
|
||||
'alpha': trial.suggest_loguniform('alpha', 1e-8, 1.0)} # Search space
|
||||
params.update(base_params)
|
||||
pruning_callback = optuna.integration.XGBoostPruningCallback(trial, 'valid-aft-nloglik')
|
||||
bst = xgb.train(params, dtrain, num_boost_round=10000,
|
||||
evals=[(dtrain, 'train'), (dvalid, 'valid')],
|
||||
early_stopping_rounds=50, verbose_eval=False, callbacks=[pruning_callback])
|
||||
if bst.best_iteration >= 25:
|
||||
return bst.best_score
|
||||
else:
|
||||
return np.inf # Reject models with < 25 trees
|
||||
|
||||
# Run hyperparameter search
|
||||
study = optuna.create_study(direction='minimize')
|
||||
study.optimize(objective, n_trials=200)
|
||||
print('Completed hyperparameter tuning with best aft-nloglik = {}.'.format(study.best_trial.value))
|
||||
params = {}
|
||||
params.update(base_params)
|
||||
params.update(study.best_trial.params)
|
||||
|
||||
# Re-run training with the best hyperparameter combination
|
||||
print('Re-running the best trial... params = {}'.format(params))
|
||||
bst = xgb.train(params, dtrain, num_boost_round=10000,
|
||||
evals=[(dtrain, 'train'), (dvalid, 'valid')],
|
||||
early_stopping_rounds=50)
|
||||
|
||||
# Run prediction on the validation set
|
||||
df = pd.DataFrame({'Label (lower bound)': y_lower_bound[valid_index],
|
||||
'Label (upper bound)': y_upper_bound[valid_index],
|
||||
'Predicted label': bst.predict(dvalid)})
|
||||
print(df)
|
||||
# Show only data points with right-censored labels
|
||||
print(df[np.isinf(df['Label (upper bound)'])])
|
||||
|
||||
# Save trained model
|
||||
bst.save_model('aft_best_model.json')
|
||||
97
demo/aft_survival/aft_survival_viz_demo.py
Normal file
97
demo/aft_survival/aft_survival_viz_demo.py
Normal file
@@ -0,0 +1,97 @@
|
||||
"""
|
||||
Visual demo for survival analysis (regression) with Accelerated Failure Time (AFT) model.
|
||||
|
||||
This demo uses 1D toy data and visualizes how XGBoost fits a tree ensemble. The ensemble model
|
||||
starts out as a flat line and evolves into a step function in order to account for all ranged
|
||||
labels.
|
||||
"""
|
||||
import numpy as np
|
||||
import xgboost as xgb
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
plt.rcParams.update({'font.size': 13})
|
||||
|
||||
# Function to visualize censored labels
|
||||
def plot_censored_labels(X, y_lower, y_upper):
|
||||
def replace_inf(x, target_value):
|
||||
x[np.isinf(x)] = target_value
|
||||
return x
|
||||
plt.plot(X, y_lower, 'o', label='y_lower', color='blue')
|
||||
plt.plot(X, y_upper, 'o', label='y_upper', color='fuchsia')
|
||||
plt.vlines(X, ymin=replace_inf(y_lower, 0.01), ymax=replace_inf(y_upper, 1000),
|
||||
label='Range for y', color='gray')
|
||||
|
||||
# Toy data
|
||||
X = np.array([1, 2, 3, 4, 5]).reshape((-1, 1))
|
||||
INF = np.inf
|
||||
y_lower = np.array([ 10, 15, -INF, 30, 100])
|
||||
y_upper = np.array([INF, INF, 20, 50, INF])
|
||||
|
||||
# Visualize toy data
|
||||
plt.figure(figsize=(5, 4))
|
||||
plot_censored_labels(X, y_lower, y_upper)
|
||||
plt.ylim((6, 200))
|
||||
plt.legend(loc='lower right')
|
||||
plt.title('Toy data')
|
||||
plt.xlabel('Input feature')
|
||||
plt.ylabel('Label')
|
||||
plt.yscale('log')
|
||||
plt.tight_layout()
|
||||
plt.show(block=True)
|
||||
|
||||
# Will be used to visualize XGBoost model
|
||||
grid_pts = np.linspace(0.8, 5.2, 1000).reshape((-1, 1))
|
||||
|
||||
# Train AFT model using XGBoost
|
||||
dmat = xgb.DMatrix(X)
|
||||
dmat.set_float_info('label_lower_bound', y_lower)
|
||||
dmat.set_float_info('label_upper_bound', y_upper)
|
||||
params = {'max_depth': 3, 'objective':'survival:aft', 'min_child_weight': 0}
|
||||
|
||||
accuracy_history = []
|
||||
def plot_intermediate_model_callback(env):
|
||||
"""Custom callback to plot intermediate models"""
|
||||
# Compute y_pred = prediction using the intermediate model, at current boosting iteration
|
||||
y_pred = env.model.predict(dmat)
|
||||
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper) includes
|
||||
# the corresponding predicted label (y_pred)
|
||||
acc = np.sum(np.logical_and(y_pred >= y_lower, y_pred <= y_upper)/len(X) * 100)
|
||||
accuracy_history.append(acc)
|
||||
|
||||
# Plot ranged labels as well as predictions by the model
|
||||
plt.subplot(5, 3, env.iteration + 1)
|
||||
plot_censored_labels(X, y_lower, y_upper)
|
||||
y_pred_grid_pts = env.model.predict(xgb.DMatrix(grid_pts))
|
||||
plt.plot(grid_pts, y_pred_grid_pts, 'r-', label='XGBoost AFT model', linewidth=4)
|
||||
plt.title('Iteration {}'.format(env.iteration), x=0.5, y=0.8)
|
||||
plt.xlim((0.8, 5.2))
|
||||
plt.ylim((1 if np.min(y_pred) < 6 else 6, 200))
|
||||
plt.yscale('log')
|
||||
|
||||
res = {}
|
||||
plt.figure(figsize=(12,13))
|
||||
bst = xgb.train(params, dmat, 15, [(dmat, 'train')], evals_result=res,
|
||||
callbacks=[plot_intermediate_model_callback])
|
||||
plt.tight_layout()
|
||||
plt.legend(loc='lower center', ncol=4,
|
||||
bbox_to_anchor=(0.5, 0),
|
||||
bbox_transform=plt.gcf().transFigure)
|
||||
plt.tight_layout()
|
||||
|
||||
# Plot negative log likelihood over boosting iterations
|
||||
plt.figure(figsize=(8,3))
|
||||
plt.subplot(1, 2, 1)
|
||||
plt.plot(res['train']['aft-nloglik'], 'b-o', label='aft-nloglik')
|
||||
plt.xlabel('# Boosting Iterations')
|
||||
plt.legend(loc='best')
|
||||
|
||||
# Plot "accuracy" over boosting iterations
|
||||
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper) includes
|
||||
# the corresponding predicted label (y_pred)
|
||||
plt.subplot(1, 2, 2)
|
||||
plt.plot(accuracy_history, 'r-o', label='Accuracy (%)')
|
||||
plt.xlabel('# Boosting Iterations')
|
||||
plt.legend(loc='best')
|
||||
plt.tight_layout()
|
||||
|
||||
plt.show()
|
||||
@@ -156,7 +156,7 @@ If you want to continue boosting from existing model, say 0002.model, use
|
||||
```
|
||||
xgboost will load from 0002.model continue boosting for 2 rounds, and save output to continue.model. However, beware that the training and evaluation data specified in mushroom.conf should not change when you use this function.
|
||||
#### Use Multi-Threading
|
||||
When you are working with a large dataset, you may want to take advantage of parallelism. If your compiler supports OpenMP, xgboost is naturally multi-threaded, to set number of parallel running add ```nthread``` parameter to you configuration.
|
||||
When you are working with a large dataset, you may want to take advantage of parallelism. If your compiler supports OpenMP, xgboost is naturally multi-threaded, to set number of parallel running add ```nthread``` parameter to your configuration.
|
||||
Eg. ```nthread=10```
|
||||
|
||||
Set nthread to be the number of your real cpu (On Unix, this can be found using ```lscpu```)
|
||||
|
||||
@@ -22,7 +22,6 @@ def main(client):
|
||||
# evaluation metrics.
|
||||
output = xgb.dask.train(client,
|
||||
{'verbosity': 1,
|
||||
'nthread': 1,
|
||||
'tree_method': 'hist'},
|
||||
dtrain,
|
||||
num_boost_round=4, evals=[(dtrain, 'train')])
|
||||
@@ -37,6 +36,6 @@ def main(client):
|
||||
|
||||
if __name__ == '__main__':
|
||||
# or use other clusters for scaling
|
||||
with LocalCluster(n_workers=7, threads_per_worker=1) as cluster:
|
||||
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
|
||||
with Client(cluster) as client:
|
||||
main(client)
|
||||
|
||||
@@ -22,7 +22,6 @@ def main(client):
|
||||
# evaluation metrics.
|
||||
output = xgb.dask.train(client,
|
||||
{'verbosity': 2,
|
||||
'nthread': 1,
|
||||
# Golden line for GPU training
|
||||
'tree_method': 'gpu_hist'},
|
||||
dtrain,
|
||||
@@ -41,6 +40,6 @@ if __name__ == '__main__':
|
||||
# `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here
|
||||
# `n_workers` represents the number of GPUs since we use one GPU per worker
|
||||
# process.
|
||||
with LocalCUDACluster(n_workers=2, threads_per_worker=1) as cluster:
|
||||
with LocalCUDACluster(n_workers=2, threads_per_worker=4) as cluster:
|
||||
with Client(cluster) as client:
|
||||
main(client)
|
||||
|
||||
138
demo/data/veterans_lung_cancer.csv
Normal file
138
demo/data/veterans_lung_cancer.csv
Normal file
@@ -0,0 +1,138 @@
|
||||
Survival_label_lower_bound,Survival_label_upper_bound,Age_in_years,Karnofsky_score,Months_from_Diagnosis,Celltype=adeno,Celltype=large,Celltype=smallcell,Celltype=squamous,Prior_therapy=no,Prior_therapy=yes,Treatment=standard,Treatment=test
|
||||
72.0,72.0,69.0,60.0,7.0,0,0,0,1,1,0,1,0
|
||||
411.0,411.0,64.0,70.0,5.0,0,0,0,1,0,1,1,0
|
||||
228.0,228.0,38.0,60.0,3.0,0,0,0,1,1,0,1,0
|
||||
126.0,126.0,63.0,60.0,9.0,0,0,0,1,0,1,1,0
|
||||
118.0,118.0,65.0,70.0,11.0,0,0,0,1,0,1,1,0
|
||||
10.0,10.0,49.0,20.0,5.0,0,0,0,1,1,0,1,0
|
||||
82.0,82.0,69.0,40.0,10.0,0,0,0,1,0,1,1,0
|
||||
110.0,110.0,68.0,80.0,29.0,0,0,0,1,1,0,1,0
|
||||
314.0,314.0,43.0,50.0,18.0,0,0,0,1,1,0,1,0
|
||||
100.0,inf,70.0,70.0,6.0,0,0,0,1,1,0,1,0
|
||||
42.0,42.0,81.0,60.0,4.0,0,0,0,1,1,0,1,0
|
||||
8.0,8.0,63.0,40.0,58.0,0,0,0,1,0,1,1,0
|
||||
144.0,144.0,63.0,30.0,4.0,0,0,0,1,1,0,1,0
|
||||
25.0,inf,52.0,80.0,9.0,0,0,0,1,0,1,1,0
|
||||
11.0,11.0,48.0,70.0,11.0,0,0,0,1,0,1,1,0
|
||||
30.0,30.0,61.0,60.0,3.0,0,0,1,0,1,0,1,0
|
||||
384.0,384.0,42.0,60.0,9.0,0,0,1,0,1,0,1,0
|
||||
4.0,4.0,35.0,40.0,2.0,0,0,1,0,1,0,1,0
|
||||
54.0,54.0,63.0,80.0,4.0,0,0,1,0,0,1,1,0
|
||||
13.0,13.0,56.0,60.0,4.0,0,0,1,0,1,0,1,0
|
||||
123.0,inf,55.0,40.0,3.0,0,0,1,0,1,0,1,0
|
||||
97.0,inf,67.0,60.0,5.0,0,0,1,0,1,0,1,0
|
||||
153.0,153.0,63.0,60.0,14.0,0,0,1,0,0,1,1,0
|
||||
59.0,59.0,65.0,30.0,2.0,0,0,1,0,1,0,1,0
|
||||
117.0,117.0,46.0,80.0,3.0,0,0,1,0,1,0,1,0
|
||||
16.0,16.0,53.0,30.0,4.0,0,0,1,0,0,1,1,0
|
||||
151.0,151.0,69.0,50.0,12.0,0,0,1,0,1,0,1,0
|
||||
22.0,22.0,68.0,60.0,4.0,0,0,1,0,1,0,1,0
|
||||
56.0,56.0,43.0,80.0,12.0,0,0,1,0,0,1,1,0
|
||||
21.0,21.0,55.0,40.0,2.0,0,0,1,0,0,1,1,0
|
||||
18.0,18.0,42.0,20.0,15.0,0,0,1,0,1,0,1,0
|
||||
139.0,139.0,64.0,80.0,2.0,0,0,1,0,1,0,1,0
|
||||
20.0,20.0,65.0,30.0,5.0,0,0,1,0,1,0,1,0
|
||||
31.0,31.0,65.0,75.0,3.0,0,0,1,0,1,0,1,0
|
||||
52.0,52.0,55.0,70.0,2.0,0,0,1,0,1,0,1,0
|
||||
287.0,287.0,66.0,60.0,25.0,0,0,1,0,0,1,1,0
|
||||
18.0,18.0,60.0,30.0,4.0,0,0,1,0,1,0,1,0
|
||||
51.0,51.0,67.0,60.0,1.0,0,0,1,0,1,0,1,0
|
||||
122.0,122.0,53.0,80.0,28.0,0,0,1,0,1,0,1,0
|
||||
27.0,27.0,62.0,60.0,8.0,0,0,1,0,1,0,1,0
|
||||
54.0,54.0,67.0,70.0,1.0,0,0,1,0,1,0,1,0
|
||||
7.0,7.0,72.0,50.0,7.0,0,0,1,0,1,0,1,0
|
||||
63.0,63.0,48.0,50.0,11.0,0,0,1,0,1,0,1,0
|
||||
392.0,392.0,68.0,40.0,4.0,0,0,1,0,1,0,1,0
|
||||
10.0,10.0,67.0,40.0,23.0,0,0,1,0,0,1,1,0
|
||||
8.0,8.0,61.0,20.0,19.0,1,0,0,0,0,1,1,0
|
||||
92.0,92.0,60.0,70.0,10.0,1,0,0,0,1,0,1,0
|
||||
35.0,35.0,62.0,40.0,6.0,1,0,0,0,1,0,1,0
|
||||
117.0,117.0,38.0,80.0,2.0,1,0,0,0,1,0,1,0
|
||||
132.0,132.0,50.0,80.0,5.0,1,0,0,0,1,0,1,0
|
||||
12.0,12.0,63.0,50.0,4.0,1,0,0,0,0,1,1,0
|
||||
162.0,162.0,64.0,80.0,5.0,1,0,0,0,1,0,1,0
|
||||
3.0,3.0,43.0,30.0,3.0,1,0,0,0,1,0,1,0
|
||||
95.0,95.0,34.0,80.0,4.0,1,0,0,0,1,0,1,0
|
||||
177.0,177.0,66.0,50.0,16.0,0,1,0,0,0,1,1,0
|
||||
162.0,162.0,62.0,80.0,5.0,0,1,0,0,1,0,1,0
|
||||
216.0,216.0,52.0,50.0,15.0,0,1,0,0,1,0,1,0
|
||||
553.0,553.0,47.0,70.0,2.0,0,1,0,0,1,0,1,0
|
||||
278.0,278.0,63.0,60.0,12.0,0,1,0,0,1,0,1,0
|
||||
12.0,12.0,68.0,40.0,12.0,0,1,0,0,0,1,1,0
|
||||
260.0,260.0,45.0,80.0,5.0,0,1,0,0,1,0,1,0
|
||||
200.0,200.0,41.0,80.0,12.0,0,1,0,0,0,1,1,0
|
||||
156.0,156.0,66.0,70.0,2.0,0,1,0,0,1,0,1,0
|
||||
182.0,inf,62.0,90.0,2.0,0,1,0,0,1,0,1,0
|
||||
143.0,143.0,60.0,90.0,8.0,0,1,0,0,1,0,1,0
|
||||
105.0,105.0,66.0,80.0,11.0,0,1,0,0,1,0,1,0
|
||||
103.0,103.0,38.0,80.0,5.0,0,1,0,0,1,0,1,0
|
||||
250.0,250.0,53.0,70.0,8.0,0,1,0,0,0,1,1,0
|
||||
100.0,100.0,37.0,60.0,13.0,0,1,0,0,0,1,1,0
|
||||
999.0,999.0,54.0,90.0,12.0,0,0,0,1,0,1,0,1
|
||||
112.0,112.0,60.0,80.0,6.0,0,0,0,1,1,0,0,1
|
||||
87.0,inf,48.0,80.0,3.0,0,0,0,1,1,0,0,1
|
||||
231.0,inf,52.0,50.0,8.0,0,0,0,1,0,1,0,1
|
||||
242.0,242.0,70.0,50.0,1.0,0,0,0,1,1,0,0,1
|
||||
991.0,991.0,50.0,70.0,7.0,0,0,0,1,0,1,0,1
|
||||
111.0,111.0,62.0,70.0,3.0,0,0,0,1,1,0,0,1
|
||||
1.0,1.0,65.0,20.0,21.0,0,0,0,1,0,1,0,1
|
||||
587.0,587.0,58.0,60.0,3.0,0,0,0,1,1,0,0,1
|
||||
389.0,389.0,62.0,90.0,2.0,0,0,0,1,1,0,0,1
|
||||
33.0,33.0,64.0,30.0,6.0,0,0,0,1,1,0,0,1
|
||||
25.0,25.0,63.0,20.0,36.0,0,0,0,1,1,0,0,1
|
||||
357.0,357.0,58.0,70.0,13.0,0,0,0,1,1,0,0,1
|
||||
467.0,467.0,64.0,90.0,2.0,0,0,0,1,1,0,0,1
|
||||
201.0,201.0,52.0,80.0,28.0,0,0,0,1,0,1,0,1
|
||||
1.0,1.0,35.0,50.0,7.0,0,0,0,1,1,0,0,1
|
||||
30.0,30.0,63.0,70.0,11.0,0,0,0,1,1,0,0,1
|
||||
44.0,44.0,70.0,60.0,13.0,0,0,0,1,0,1,0,1
|
||||
283.0,283.0,51.0,90.0,2.0,0,0,0,1,1,0,0,1
|
||||
15.0,15.0,40.0,50.0,13.0,0,0,0,1,0,1,0,1
|
||||
25.0,25.0,69.0,30.0,2.0,0,0,1,0,1,0,0,1
|
||||
103.0,inf,36.0,70.0,22.0,0,0,1,0,0,1,0,1
|
||||
21.0,21.0,71.0,20.0,4.0,0,0,1,0,1,0,0,1
|
||||
13.0,13.0,62.0,30.0,2.0,0,0,1,0,1,0,0,1
|
||||
87.0,87.0,60.0,60.0,2.0,0,0,1,0,1,0,0,1
|
||||
2.0,2.0,44.0,40.0,36.0,0,0,1,0,0,1,0,1
|
||||
20.0,20.0,54.0,30.0,9.0,0,0,1,0,0,1,0,1
|
||||
7.0,7.0,66.0,20.0,11.0,0,0,1,0,1,0,0,1
|
||||
24.0,24.0,49.0,60.0,8.0,0,0,1,0,1,0,0,1
|
||||
99.0,99.0,72.0,70.0,3.0,0,0,1,0,1,0,0,1
|
||||
8.0,8.0,68.0,80.0,2.0,0,0,1,0,1,0,0,1
|
||||
99.0,99.0,62.0,85.0,4.0,0,0,1,0,1,0,0,1
|
||||
61.0,61.0,71.0,70.0,2.0,0,0,1,0,1,0,0,1
|
||||
25.0,25.0,70.0,70.0,2.0,0,0,1,0,1,0,0,1
|
||||
95.0,95.0,61.0,70.0,1.0,0,0,1,0,1,0,0,1
|
||||
80.0,80.0,71.0,50.0,17.0,0,0,1,0,1,0,0,1
|
||||
51.0,51.0,59.0,30.0,87.0,0,0,1,0,0,1,0,1
|
||||
29.0,29.0,67.0,40.0,8.0,0,0,1,0,1,0,0,1
|
||||
24.0,24.0,60.0,40.0,2.0,1,0,0,0,1,0,0,1
|
||||
18.0,18.0,69.0,40.0,5.0,1,0,0,0,0,1,0,1
|
||||
83.0,inf,57.0,99.0,3.0,1,0,0,0,1,0,0,1
|
||||
31.0,31.0,39.0,80.0,3.0,1,0,0,0,1,0,0,1
|
||||
51.0,51.0,62.0,60.0,5.0,1,0,0,0,1,0,0,1
|
||||
90.0,90.0,50.0,60.0,22.0,1,0,0,0,0,1,0,1
|
||||
52.0,52.0,43.0,60.0,3.0,1,0,0,0,1,0,0,1
|
||||
73.0,73.0,70.0,60.0,3.0,1,0,0,0,1,0,0,1
|
||||
8.0,8.0,66.0,50.0,5.0,1,0,0,0,1,0,0,1
|
||||
36.0,36.0,61.0,70.0,8.0,1,0,0,0,1,0,0,1
|
||||
48.0,48.0,81.0,10.0,4.0,1,0,0,0,1,0,0,1
|
||||
7.0,7.0,58.0,40.0,4.0,1,0,0,0,1,0,0,1
|
||||
140.0,140.0,63.0,70.0,3.0,1,0,0,0,1,0,0,1
|
||||
186.0,186.0,60.0,90.0,3.0,1,0,0,0,1,0,0,1
|
||||
84.0,84.0,62.0,80.0,4.0,1,0,0,0,0,1,0,1
|
||||
19.0,19.0,42.0,50.0,10.0,1,0,0,0,1,0,0,1
|
||||
45.0,45.0,69.0,40.0,3.0,1,0,0,0,1,0,0,1
|
||||
80.0,80.0,63.0,40.0,4.0,1,0,0,0,1,0,0,1
|
||||
52.0,52.0,45.0,60.0,4.0,0,1,0,0,1,0,0,1
|
||||
164.0,164.0,68.0,70.0,15.0,0,1,0,0,0,1,0,1
|
||||
19.0,19.0,39.0,30.0,4.0,0,1,0,0,0,1,0,1
|
||||
53.0,53.0,66.0,60.0,12.0,0,1,0,0,1,0,0,1
|
||||
15.0,15.0,63.0,30.0,5.0,0,1,0,0,1,0,0,1
|
||||
43.0,43.0,49.0,60.0,11.0,0,1,0,0,0,1,0,1
|
||||
340.0,340.0,64.0,80.0,10.0,0,1,0,0,0,1,0,1
|
||||
133.0,133.0,65.0,75.0,1.0,0,1,0,0,1,0,0,1
|
||||
111.0,111.0,64.0,60.0,5.0,0,1,0,0,1,0,0,1
|
||||
231.0,231.0,67.0,70.0,18.0,0,1,0,0,0,1,0,1
|
||||
378.0,378.0,65.0,80.0,4.0,0,1,0,0,1,0,0,1
|
||||
49.0,49.0,37.0,30.0,3.0,0,1,0,0,1,0,0,1
|
||||
|
@@ -8,7 +8,11 @@ if you are interested in contributing.
|
||||
Build XGBoost with Distributed Filesystem Support
|
||||
-------------------------------------------------
|
||||
To use distributed xgboost, you only need to turn the options on to build
|
||||
with distributed filesystems(HDFS or S3) in ```xgboost/make/config.mk```.
|
||||
with distributed filesystems(HDFS or S3) in cmake.
|
||||
|
||||
```
|
||||
cmake <path/to/xgboost> -DUSE_HDFS=ON -DUSE_S3=ON -DUSE_AZURE=ON
|
||||
```
|
||||
|
||||
|
||||
Step by Step Tutorial on AWS
|
||||
|
||||
@@ -3,6 +3,7 @@ XGBoost Python Feature Walkthrough
|
||||
* [Basic walkthrough of wrappers](basic_walkthrough.py)
|
||||
* [Customize loss function, and evaluation metric](custom_objective.py)
|
||||
* [Re-implement RMSLE as customized metric and objective](custom_rmsle.py)
|
||||
* [Re-Implement `multi:softmax` objective as customized objective](custom_softmax.py)
|
||||
* [Boosting from existing prediction](boost_from_prediction.py)
|
||||
* [Predicting using first n trees](predict_first_ntree.py)
|
||||
* [Generalized Linear Model](generalized_linear_model.py)
|
||||
|
||||
@@ -1,16 +1,22 @@
|
||||
#!/usr/bin/python
|
||||
#!/usr/bin/env python
|
||||
import numpy as np
|
||||
import scipy.sparse
|
||||
import pickle
|
||||
import xgboost as xgb
|
||||
import os
|
||||
|
||||
### simple example
|
||||
# Make sure the demo knows where to load the data.
|
||||
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
XGBOOST_ROOT_DIR = os.path.dirname(os.path.dirname(CURRENT_DIR))
|
||||
DEMO_DIR = os.path.join(XGBOOST_ROOT_DIR, 'demo')
|
||||
|
||||
# simple example
|
||||
# load file from text file, also binary buffer generated by xgboost
|
||||
dtrain = xgb.DMatrix('../data/agaricus.txt.train')
|
||||
dtest = xgb.DMatrix('../data/agaricus.txt.test')
|
||||
dtrain = xgb.DMatrix(os.path.join(DEMO_DIR, 'data', 'agaricus.txt.train'))
|
||||
dtest = xgb.DMatrix(os.path.join(DEMO_DIR, 'data', 'agaricus.txt.test'))
|
||||
|
||||
# specify parameters via map, definition are same as c++ version
|
||||
param = {'max_depth':2, 'eta':1, 'silent':1, 'objective':'binary:logistic'}
|
||||
param = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'}
|
||||
|
||||
# specify validations set to watch performance
|
||||
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
|
||||
@@ -20,12 +26,14 @@ bst = xgb.train(param, dtrain, num_round, watchlist)
|
||||
# this is prediction
|
||||
preds = bst.predict(dtest)
|
||||
labels = dtest.get_label()
|
||||
print('error=%f' % (sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds))))
|
||||
print('error=%f' %
|
||||
(sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) /
|
||||
float(len(preds))))
|
||||
bst.save_model('0001.model')
|
||||
# dump model
|
||||
bst.dump_model('dump.raw.txt')
|
||||
# dump model with feature map
|
||||
bst.dump_model('dump.nice.txt', '../data/featmap.txt')
|
||||
bst.dump_model('dump.nice.txt', os.path.join(DEMO_DIR, 'data/featmap.txt'))
|
||||
|
||||
# save dmatrix into binary buffer
|
||||
dtest.save_binary('dtest.buffer')
|
||||
@@ -50,14 +58,18 @@ assert np.sum(np.abs(preds3 - preds)) == 0
|
||||
# build dmatrix from scipy.sparse
|
||||
print('start running example of build DMatrix from scipy.sparse CSR Matrix')
|
||||
labels = []
|
||||
row = []; col = []; dat = []
|
||||
row = []
|
||||
col = []
|
||||
dat = []
|
||||
i = 0
|
||||
for l in open('../data/agaricus.txt.train'):
|
||||
for l in open(os.path.join(DEMO_DIR, 'data', 'agaricus.txt.train')):
|
||||
arr = l.split()
|
||||
labels.append(int(arr[0]))
|
||||
for it in arr[1:]:
|
||||
k,v = it.split(':')
|
||||
row.append(i); col.append(int(k)); dat.append(float(v))
|
||||
k, v = it.split(':')
|
||||
row.append(i)
|
||||
col.append(int(k))
|
||||
dat.append(float(v))
|
||||
i += 1
|
||||
csr = scipy.sparse.csr_matrix((dat, (row, col)))
|
||||
dtrain = xgb.DMatrix(csr, label=labels)
|
||||
@@ -72,8 +84,8 @@ watchlist = [(dtest, 'eval'), (dtrain, 'train')]
|
||||
bst = xgb.train(param, dtrain, num_round, watchlist)
|
||||
|
||||
print('start running example of build DMatrix from numpy array')
|
||||
# NOTE: npymat is numpy array, we will convert it into scipy.sparse.csr_matrix in internal implementation
|
||||
# then convert to DMatrix
|
||||
# NOTE: npymat is numpy array, we will convert it into scipy.sparse.csr_matrix
|
||||
# in internal implementation then convert to DMatrix
|
||||
npymat = csr.todense()
|
||||
dtrain = xgb.DMatrix(npymat, label=labels)
|
||||
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
|
||||
|
||||
@@ -45,7 +45,7 @@ xgb.cv(param, dtrain, num_round, nfold=5,
|
||||
# you can also do cross validation with customized loss function
|
||||
# See custom_objective.py
|
||||
##
|
||||
print('running cross validation, with cutomsized loss function')
|
||||
print('running cross validation, with customized loss function')
|
||||
def logregobj(preds, dtrain):
|
||||
labels = dtrain.get_label()
|
||||
preds = 1.0 / (1.0 + np.exp(-preds))
|
||||
|
||||
@@ -15,6 +15,7 @@ import numpy as np
|
||||
import xgboost as xgb
|
||||
from typing import Tuple, Dict, List
|
||||
from time import time
|
||||
import argparse
|
||||
import matplotlib
|
||||
from matplotlib import pyplot as plt
|
||||
|
||||
@@ -150,12 +151,7 @@ def py_rmsle(dtrain: xgb.DMatrix, dtest: xgb.DMatrix) -> Dict:
|
||||
return results
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
dtrain, dtest = generate_data()
|
||||
rmse_evals = native_rmse(dtrain, dtest)
|
||||
rmsle_evals = native_rmsle(dtrain, dtest)
|
||||
py_rmsle_evals = py_rmsle(dtrain, dtest)
|
||||
|
||||
def plot_history(rmse_evals, rmsle_evals, py_rmsle_evals):
|
||||
fig, axs = plt.subplots(3, 1)
|
||||
ax0: matplotlib.axes.Axes = axs[0]
|
||||
ax1: matplotlib.axes.Axes = axs[1]
|
||||
@@ -177,3 +173,25 @@ if __name__ == '__main__':
|
||||
|
||||
plt.show()
|
||||
plt.close()
|
||||
|
||||
|
||||
def main(args):
|
||||
dtrain, dtest = generate_data()
|
||||
rmse_evals = native_rmse(dtrain, dtest)
|
||||
rmsle_evals = native_rmsle(dtrain, dtest)
|
||||
py_rmsle_evals = py_rmsle(dtrain, dtest)
|
||||
|
||||
if args.plot != 0:
|
||||
plot_history(rmse_evals, rmsle_evals, py_rmsle_evals)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Arguments for custom RMSLE objective function demo.')
|
||||
parser.add_argument(
|
||||
'--plot',
|
||||
type=int,
|
||||
default=1,
|
||||
help='Set to 0 to disable plotting the evaluation history.')
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
|
||||
148
demo/guide-python/custom_softmax.py
Normal file
148
demo/guide-python/custom_softmax.py
Normal file
@@ -0,0 +1,148 @@
|
||||
'''Demo for creating customized multi-class objective function. This demo is
|
||||
only applicable after (excluding) XGBoost 1.0.0, as before this version XGBoost
|
||||
returns transformed prediction for multi-class objective function. More
|
||||
details in comments.
|
||||
|
||||
'''
|
||||
|
||||
import numpy as np
|
||||
import xgboost as xgb
|
||||
from matplotlib import pyplot as plt
|
||||
import argparse
|
||||
|
||||
np.random.seed(1994)
|
||||
|
||||
kRows = 100
|
||||
kCols = 10
|
||||
kClasses = 4 # number of classes
|
||||
|
||||
kRounds = 10 # number of boosting rounds.
|
||||
|
||||
# Generate some random data for demo.
|
||||
X = np.random.randn(kRows, kCols)
|
||||
y = np.random.randint(0, 4, size=kRows)
|
||||
|
||||
m = xgb.DMatrix(X, y)
|
||||
|
||||
|
||||
def softmax(x):
|
||||
'''Softmax function with x as input vector.'''
|
||||
e = np.exp(x)
|
||||
return e / np.sum(e)
|
||||
|
||||
|
||||
def softprob_obj(predt: np.ndarray, data: xgb.DMatrix):
|
||||
'''Loss function. Computing the gradient and approximated hessian (diagonal).
|
||||
Reimplements the `multi:softprob` inside XGBoost.
|
||||
|
||||
'''
|
||||
labels = data.get_label()
|
||||
if data.get_weight().size == 0:
|
||||
# Use 1 as weight if we don't have custom weight.
|
||||
weights = np.ones((kRows, 1), dtype=float)
|
||||
else:
|
||||
weights = data.get_weight()
|
||||
|
||||
# The prediction is of shape (rows, classes), each element in a row
|
||||
# represents a raw prediction (leaf weight, hasn't gone through softmax
|
||||
# yet). In XGBoost 1.0.0, the prediction is transformed by a softmax
|
||||
# function, fixed in later versions.
|
||||
assert predt.shape == (kRows, kClasses)
|
||||
|
||||
grad = np.zeros((kRows, kClasses), dtype=float)
|
||||
hess = np.zeros((kRows, kClasses), dtype=float)
|
||||
|
||||
eps = 1e-6
|
||||
|
||||
# compute the gradient and hessian, slow iterations in Python, only
|
||||
# suitable for demo. Also the one in native XGBoost core is more robust to
|
||||
# numeric overflow as we don't do anything to mitigate the `exp` in
|
||||
# `softmax` here.
|
||||
for r in range(predt.shape[0]):
|
||||
target = labels[r]
|
||||
p = softmax(predt[r, :])
|
||||
for c in range(predt.shape[1]):
|
||||
assert target >= 0 or target <= kClasses
|
||||
g = p[c] - 1.0 if c == target else p[c]
|
||||
g = g * weights[r]
|
||||
h = max((2.0 * p[c] * (1.0 - p[c]) * weights[r]).item(), eps)
|
||||
grad[r, c] = g
|
||||
hess[r, c] = h
|
||||
|
||||
# Right now (XGBoost 1.0.0), reshaping is necessary
|
||||
grad = grad.reshape((kRows * kClasses, 1))
|
||||
hess = hess.reshape((kRows * kClasses, 1))
|
||||
return grad, hess
|
||||
|
||||
|
||||
def predict(booster, X):
|
||||
'''A customized prediction function that converts raw prediction to
|
||||
target class.
|
||||
|
||||
'''
|
||||
# Output margin means we want to obtain the raw prediction obtained from
|
||||
# tree leaf weight.
|
||||
predt = booster.predict(X, output_margin=True)
|
||||
out = np.zeros(kRows)
|
||||
for r in range(predt.shape[0]):
|
||||
# the class with maximum prob (not strictly prob as it haven't gone
|
||||
# through softmax yet so it doesn't sum to 1, but result is the same
|
||||
# for argmax).
|
||||
i = np.argmax(predt[r])
|
||||
out[r] = i
|
||||
return out
|
||||
|
||||
|
||||
def plot_history(custom_results, native_results):
|
||||
fig, axs = plt.subplots(2, 1)
|
||||
ax0 = axs[0]
|
||||
ax1 = axs[1]
|
||||
|
||||
x = np.arange(0, kRounds, 1)
|
||||
ax0.plot(x, custom_results['train']['merror'], label='Custom objective')
|
||||
ax0.legend()
|
||||
ax1.plot(x, native_results['train']['merror'], label='multi:softmax')
|
||||
ax1.legend()
|
||||
|
||||
plt.show()
|
||||
|
||||
|
||||
def main(args):
|
||||
custom_results = {}
|
||||
# Use our custom objective function
|
||||
booster_custom = xgb.train({'num_class': kClasses},
|
||||
m,
|
||||
num_boost_round=kRounds,
|
||||
obj=softprob_obj,
|
||||
evals_result=custom_results,
|
||||
evals=[(m, 'train')])
|
||||
|
||||
predt_custom = predict(booster_custom, m)
|
||||
|
||||
native_results = {}
|
||||
# Use the same objective function defined in XGBoost.
|
||||
booster_native = xgb.train({'num_class': kClasses},
|
||||
m,
|
||||
num_boost_round=kRounds,
|
||||
evals_result=native_results,
|
||||
evals=[(m, 'train')])
|
||||
predt_native = booster_native.predict(m)
|
||||
|
||||
# We are reimplementing the loss function in XGBoost, so it should
|
||||
# be the same for normal cases.
|
||||
assert np.all(predt_custom == predt_native)
|
||||
|
||||
if args.plot != 0:
|
||||
plot_history(custom_results, native_results)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Arguments for custom softmax objective function demo.')
|
||||
parser.add_argument(
|
||||
'--plot',
|
||||
type=int,
|
||||
default=1,
|
||||
help='Set to 0 to disable plotting the evaluation history.')
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
@@ -19,7 +19,7 @@ contributors = set()
|
||||
reviewers = set()
|
||||
|
||||
for line in git.log(f'{from_commit}..{to_commit}', '--pretty=format:%s', '--reverse'):
|
||||
m = re.search('\(#([0-9]+)\)', line.rstrip())
|
||||
m = re.search('\(#([0-9]+)\)$', line.rstrip())
|
||||
if m:
|
||||
pr_id = m.group(1)
|
||||
print(f'PR #{pr_id}')
|
||||
|
||||
Submodule dmlc-core updated: 552f7de748...5df8305fe6
203
doc/build.rst
203
doc/build.rst
@@ -15,16 +15,20 @@ Installation Guide
|
||||
|
||||
* The binary wheel will support GPU algorithms (`gpu_hist`) on machines with NVIDIA GPUs. Please note that **training with multiple GPUs is only supported for Linux platform**. See :doc:`gpu/index`.
|
||||
* Currently, we provide binary wheels for 64-bit Linux and Windows.
|
||||
* Nightly builds are available. You can now run *pip install https://s3-us-west-2.amazonaws.com/xgboost-nightly-builds/xgboost-[version]+[commit hash]-py2.py3-none-manylinux1_x86_64.whl* to install the nightly build with the given commit hash. See `this page <https://s3-us-west-2.amazonaws.com/xgboost-nightly-builds/list.html>`_ to see the list of all nightly builds.
|
||||
* Nightly builds are available. You can now run
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install https://s3-us-west-2.amazonaws.com/xgboost-nightly-builds/xgboost-[version]+[commithash]-py2.py3-none-manylinux1_x86_64.whl
|
||||
|
||||
to install the nightly build with the given commit hash. See `this page
|
||||
<https://s3-us-west-2.amazonaws.com/xgboost-nightly-builds/list.html>`_ to see the
|
||||
list of all nightly builds.
|
||||
|
||||
****************************
|
||||
Building XGBoost from source
|
||||
****************************
|
||||
This page gives instructions on how to build and install XGBoost from scratch on various systems. It consists of two steps:
|
||||
|
||||
1. First build the shared library from the C++ codes (``libxgboost.so`` for Linux/OSX and ``xgboost.dll`` for Windows).
|
||||
(For R-package installation, please directly refer to `R Package Installation`_.)
|
||||
2. Then install the language packages (e.g. Python Package).
|
||||
This page gives instructions on how to build and install XGBoost from scratch on various systems.
|
||||
|
||||
.. note:: Use of Git submodules
|
||||
|
||||
@@ -49,11 +53,10 @@ to ask questions at `the user forum <https://discuss.xgboost.ai>`_.
|
||||
|
||||
* `Building the Shared Library`_
|
||||
|
||||
- `Building on Ubuntu/Debian`_
|
||||
- `Building on Linux Distributions`_
|
||||
- `Building on OSX`_
|
||||
- `Building on Windows`_
|
||||
- `Building with GPU support`_
|
||||
- `Customized Building`_
|
||||
|
||||
* `Python Package Installation`_
|
||||
* `R Package Installation`_
|
||||
@@ -71,15 +74,17 @@ Our goal is to build the shared library:
|
||||
- On Linux/OSX the target library is ``libxgboost.so``
|
||||
- On Windows the target library is ``xgboost.dll``
|
||||
|
||||
The minimal building requirement is
|
||||
This shared library is used by different language bindings (with some additions depending
|
||||
on the binding you choose). For building language specific package, see corresponding
|
||||
sections in this document. The minimal building requirement is
|
||||
|
||||
- A recent C++ compiler supporting C++11 (g++-5.0 or higher)
|
||||
- CMake 3.3 or higher (3.12 for building with CUDA)
|
||||
- CMake 3.12 or higher.
|
||||
|
||||
For a list of CMake options, see ``#-- Options`` in CMakeLists.txt on top of source tree.
|
||||
For a list of CMake options, see ``#-- Options`` in CMakeLists.txt on top level of source tree.
|
||||
|
||||
Building on Ubuntu/Debian
|
||||
=========================
|
||||
Building on Linux distributions
|
||||
===============================
|
||||
|
||||
On Ubuntu, one builds XGBoost by running CMake:
|
||||
|
||||
@@ -90,7 +95,7 @@ On Ubuntu, one builds XGBoost by running CMake:
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
make -j4
|
||||
make -j$(nproc)
|
||||
|
||||
Building on OSX
|
||||
===============
|
||||
@@ -160,32 +165,14 @@ To build with Visual Studio, we will need CMake. Make sure to install a recent v
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -G"Visual Studio 14 2015 Win64"
|
||||
# for VS15: cmake .. -G"Visual Studio 15 2017" -A x64
|
||||
# for VS16: cmake .. -G"Visual Studio 16 2019" -A x64
|
||||
cmake --build . --config Release
|
||||
|
||||
This specifies an out of source build using the Visual Studio 64 bit generator. (Change the ``-G`` option appropriately if you have a different version of Visual Studio installed.) Open the ``.sln`` file in the build directory and build with Visual Studio.
|
||||
This specifies an out of source build using the Visual Studio 64 bit generator. (Change the ``-G`` option appropriately if you have a different version of Visual Studio installed.)
|
||||
|
||||
After the build process successfully ends, you will find a ``xgboost.dll`` library file inside ``./lib/`` folder.
|
||||
|
||||
Compile XGBoost using MinGW
|
||||
---------------------------
|
||||
After installing `Git for Windows <https://git-for-windows.github.io/>`_, you should have a shortcut named ``Git Bash``. You should run all subsequent steps in ``Git Bash``.
|
||||
|
||||
In MinGW, ``make`` command comes with the name ``mingw32-make``. You can add the following line into the ``.bashrc`` file:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
alias make='mingw32-make'
|
||||
|
||||
(On 64-bit Windows, you should get `MinGW64 <https://sourceforge.net/projects/mingw-w64/>`_ instead.) Make sure
|
||||
that the path to MinGW is in the system PATH.
|
||||
|
||||
To build with MinGW, type:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
cp make/mingw64.mk config.mk; make -j4
|
||||
|
||||
See :ref:`mingw_python` for buildilng XGBoost for Python.
|
||||
|
||||
.. _build_gpu_support:
|
||||
|
||||
Building with GPU support
|
||||
@@ -241,56 +228,101 @@ The above cmake configuration run will create an ``xgboost.sln`` solution file i
|
||||
|
||||
To speed up compilation, run multiple jobs in parallel by appending option ``-- /MP``.
|
||||
|
||||
Customized Building
|
||||
===================
|
||||
Makefiles
|
||||
=========
|
||||
|
||||
We recommend the use of CMake for most use cases. See the full range of building options in CMakeLists.txt.
|
||||
|
||||
Alternatively, you may use Makefile. The Makefile uses a configuration file ``config.mk``, which lets you modify several compilation flags:
|
||||
- Whether to enable support for various distributed filesystems such as HDFS and Amazon S3
|
||||
- Which compiler to use
|
||||
- And some more
|
||||
|
||||
To customize, first copy ``make/config.mk`` to the project root and then modify the copy.
|
||||
It's only used for creating shorthands for running linters, performing packaging tasks
|
||||
etc. So the remaining makefiles are legacy.
|
||||
|
||||
Python Package Installation
|
||||
===========================
|
||||
|
||||
The Python package is located at ``python-package/``.
|
||||
There are several ways to install the package:
|
||||
The Python package is located at ``python-package/``. There are several ways to build and
|
||||
install the package from source:
|
||||
|
||||
1. Install system-wide, which requires root permission:
|
||||
1. Use Python setuptools directly
|
||||
|
||||
The XGBoost Python package supports most of the setuptools commands, here is a list of tested commands:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
cd python-package; sudo python setup.py install
|
||||
python setup.py install # Install the XGBoost to your current Python environment.
|
||||
python setup.py build # Build the Python package.
|
||||
python setup.py build_ext # Build only the C++ core.
|
||||
python setup.py sdist # Create a source distribution
|
||||
python setup.py bdist # Create a binary distribution
|
||||
python setup.py bdist_wheel # Create a binary distribution with wheel format
|
||||
|
||||
You will however need Python ``distutils`` module for this to
|
||||
work. It is often part of the core Python package or it can be installed using your
|
||||
package manager, e.g. in Debian use
|
||||
Running ``python setup.py install`` will compile XGBoost using default CMake flags. For
|
||||
passing additional compilation options, append the flags to the command. For example, to
|
||||
enable CUDA acceleration and NCCL (distributed GPU) support:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
python setup.py install --use-cuda --use-nccl
|
||||
|
||||
Please refer to ``setup.py`` for a complete list of avaiable options. Some other options
|
||||
used for development are only available for using CMake directly. See next section on
|
||||
how to use CMake with setuptools manually.
|
||||
|
||||
You can install the created distribution packages using pip. For example, after running
|
||||
``sdist`` setuptools command, a tar ball similar to ``xgboost-1.0.0.tar.gz`` will be
|
||||
created under the ``dist`` directory. Then you can install it by invoking the following
|
||||
command under ``dist`` directory:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# under python-package directory
|
||||
cd dist
|
||||
pip install ./xgboost-1.0.0.tar.gz
|
||||
|
||||
|
||||
For details about these commands, please refer to the official document of `setuptools
|
||||
<https://setuptools.readthedocs.io/en/latest/>`_, or just Google "how to install Python
|
||||
package from source". XGBoost Python package follows the general convention. Setuptools
|
||||
is usually available with your Python distribution, if not you can install it via system
|
||||
command. For example on Debian or Ubuntu:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo apt-get install python-setuptools
|
||||
|
||||
.. note:: Re-compiling XGBoost
|
||||
|
||||
If you recompiled XGBoost, then you need to reinstall it again to make the new library take effect.
|
||||
For cleaning up the directory after running above commands, ``python setup.py clean`` is
|
||||
not sufficient. After copying out the build result, simply running ``git clean -xdf``
|
||||
under ``python-package`` is an efficient way to remove generated cache files. If you find
|
||||
weird behaviors in Python build or running linter, it might be caused by those cached
|
||||
files.
|
||||
|
||||
2. Only set the environment variable ``PYTHONPATH`` to tell Python where to find
|
||||
the library. For example, assume we cloned ``xgboost`` on the home directory
|
||||
``~``. then we can added the following line in ``~/.bashrc``.
|
||||
This option is **recommended for developers** who change the code frequently. The changes will be immediately reflected once you pulled the code and rebuild the project (no need to call ``setup`` again).
|
||||
For using develop command (editable installation), see next section.
|
||||
|
||||
.. code-block::
|
||||
|
||||
python setup.py develop # Create a editable installation.
|
||||
pip install -e . # Same as above, but carried out by pip.
|
||||
|
||||
|
||||
2. Build C++ core with CMake first
|
||||
|
||||
This is mostly for C++ developers who don't want to go through the hooks in Python
|
||||
setuptools. You can build C++ library directly using CMake as described in above
|
||||
sections. After compilation, a shared object (or called dynamic linked library, jargon
|
||||
depending on your platform) will appear in XGBoost's source tree under ``lib/`` directory.
|
||||
On Linux distributions it's ``lib/libxgboost.so``. From there all Python setuptools
|
||||
commands will reuse that shared object instead of compiling it again. This is especially
|
||||
convenient if you are using the editable installation, where the installed package is
|
||||
simply a link to the source tree. We can perform rapid testing during development. Here
|
||||
is a simple bash script does that:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
export PYTHONPATH=~/xgboost/python-package
|
||||
|
||||
3. Install only for the current user.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
cd python-package; python setup.py develop --user
|
||||
# Under xgboost source tree.
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
make -j$(nproc)
|
||||
cd ../python-package
|
||||
pip install -e . # or equivalently python setup.py develop
|
||||
|
||||
.. _mingw_python:
|
||||
|
||||
@@ -310,6 +342,7 @@ So you may want to build XGBoost with GCC own your own risk. This presents some
|
||||
4. Don't use ``-march=native`` gcc flag. Using it causes the Python interpreter to crash if the DLL was actually used.
|
||||
5. You may need to provide the lib with the runtime libs. If ``mingw32/bin`` is not in ``PATH``, build a wheel (``python setup.py bdist_wheel``), open it with an archiver and put the needed dlls to the directory where ``xgboost.dll`` is situated. Then you can install the wheel with ``pip``.
|
||||
|
||||
|
||||
R Package Installation
|
||||
======================
|
||||
|
||||
@@ -335,8 +368,9 @@ You can install XGBoost from CRAN just like any other R package:
|
||||
Installing the development version
|
||||
----------------------------------
|
||||
|
||||
Make sure you have installed git and a recent C++ compiler supporting C++11 (e.g., g++-4.8 or higher).
|
||||
On Windows, Rtools must be installed, and its bin directory has to be added to ``PATH`` during the installation.
|
||||
Make sure you have installed git and a recent C++ compiler supporting C++11 (See above
|
||||
sections for requirements of building C++ core). On Windows, Rtools must be installed,
|
||||
and its bin directory has to be added to ``PATH`` during the installation.
|
||||
|
||||
Due to the use of git-submodules, ``devtools::install_github`` can no longer be used to install the latest version of R package.
|
||||
Thus, one has to run git to check out the code first:
|
||||
@@ -350,10 +384,11 @@ Thus, one has to run git to check out the code first:
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DR_LIB=ON
|
||||
make -j4
|
||||
make -j$(nproc)
|
||||
make install
|
||||
|
||||
If all fails, try `Building the shared library`_ to see whether a problem is specific to R package or not.
|
||||
If all fails, try `Building the shared library`_ to see whether a problem is specific to R
|
||||
package or not. Notice that the R package is installed by CMake directly.
|
||||
|
||||
.. _r_gpu_support:
|
||||
|
||||
@@ -369,7 +404,7 @@ On Linux, starting from the XGBoost directory type:
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DUSE_CUDA=ON -DR_LIB=ON
|
||||
make install -j
|
||||
make install -j$(nproc)
|
||||
|
||||
When default target is used, an R package shared library would be built in the ``build`` area.
|
||||
The ``install`` target, in addition, assembles the package files with this shared library under ``build/R-package`` and runs ``R CMD INSTALL``.
|
||||
@@ -407,29 +442,6 @@ Trouble Shooting
|
||||
|
||||
git submodule update && make clean_all && make -j4
|
||||
|
||||
2. Compile failed after ``config.mk`` is modified
|
||||
|
||||
Need to clean all first:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
make clean_all && make -j4
|
||||
|
||||
3. ``Makefile: dmlc-core/make/dmlc.mk: No such file or directory``
|
||||
|
||||
We need to recursively clone the submodule:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git submodule init
|
||||
git submodule update
|
||||
|
||||
Alternatively, do another clone
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git clone https://github.com/dmlc/xgboost --recursive
|
||||
|
||||
|
||||
Building the Documentation
|
||||
==========================
|
||||
@@ -447,5 +459,8 @@ XGBoost uses `Sphinx <https://www.sphinx-doc.org/en/stable/>`_ for documentation
|
||||
- guzzle_sphinx_theme
|
||||
- recommonmark
|
||||
- mock
|
||||
- sh
|
||||
- graphviz
|
||||
- matplotlib
|
||||
|
||||
Under ``xgboost/doc`` directory, run ``make <format>`` with ``<format>`` replaced by the format you want. For a list of supported formats, run ``make help`` under the same directory.
|
||||
|
||||
99
doc/conf.py
99
doc/conf.py
@@ -18,26 +18,40 @@ from urllib.error import HTTPError
|
||||
from recommonmark.parser import CommonMarkParser
|
||||
import sys
|
||||
import re
|
||||
import os, subprocess
|
||||
import shlex
|
||||
import os
|
||||
import subprocess
|
||||
import guzzle_sphinx_theme
|
||||
|
||||
git_branch = os.getenv('SPHINX_GIT_BRANCH', default=None)
|
||||
if git_branch is None:
|
||||
# If SPHINX_GIT_BRANCH environment variable is not given, run git to determine branch name
|
||||
git_branch = [re.sub(r'origin/', '', x.lstrip(' ')) for x in str(git.branch('-r', '--contains', 'HEAD')).rstrip('\n').split('\n')]
|
||||
# If SPHINX_GIT_BRANCH environment variable is not given, run git
|
||||
# to determine branch name
|
||||
git_branch = [
|
||||
re.sub(r'origin/', '', x.lstrip(' ')) for x in str(
|
||||
git.branch('-r', '--contains', 'HEAD')).rstrip('\n').split('\n')
|
||||
]
|
||||
git_branch = [x for x in git_branch if 'HEAD' not in x]
|
||||
print('git_branch = {}'.format(git_branch[0]))
|
||||
try:
|
||||
filename, _ = urllib.request.urlretrieve('https://s3-us-west-2.amazonaws.com/xgboost-docs/{}.tar.bz2'.format(git_branch[0]))
|
||||
call('if [ -d tmp ]; then rm -rf tmp; fi; mkdir -p tmp/jvm; cd tmp/jvm; tar xvf {}'.format(filename), shell=True)
|
||||
filename, _ = urllib.request.urlretrieve(
|
||||
'https://s3-us-west-2.amazonaws.com/xgboost-docs/{}.tar.bz2'.format(
|
||||
git_branch[0]))
|
||||
call(
|
||||
'if [ -d tmp ]; then rm -rf tmp; fi; mkdir -p tmp/jvm; cd tmp/jvm; tar xvf {}'
|
||||
.format(filename),
|
||||
shell=True)
|
||||
except HTTPError:
|
||||
print('JVM doc not found. Skipping...')
|
||||
print('JVM doc not found. Skipping...')
|
||||
try:
|
||||
filename, _ = urllib.request.urlretrieve('https://s3-us-west-2.amazonaws.com/xgboost-docs/doxygen/{}.tar.bz2'.format(git_branch[0]))
|
||||
call('mkdir -p tmp/dev; cd tmp/dev; tar xvf {}; mv doc_doxygen/html/* .; rm -rf doc_doxygen'.format(filename), shell=True)
|
||||
filename, _ = urllib.request.urlretrieve(
|
||||
'https://s3-us-west-2.amazonaws.com/xgboost-docs/doxygen/{}.tar.bz2'.
|
||||
format(git_branch[0]))
|
||||
call(
|
||||
'mkdir -p tmp/dev; cd tmp/dev; tar xvf {}; mv doc_doxygen/html/* .; rm -rf doc_doxygen'
|
||||
.format(filename),
|
||||
shell=True)
|
||||
except HTTPError:
|
||||
print('C API doc not found. Skipping...')
|
||||
print('C API doc not found. Skipping...')
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
@@ -48,22 +62,22 @@ sys.path.insert(0, libpath)
|
||||
sys.path.insert(0, curr_path)
|
||||
|
||||
# -- mock out modules
|
||||
import mock
|
||||
import mock # NOQA
|
||||
MOCK_MODULES = ['scipy', 'scipy.sparse', 'sklearn', 'pandas']
|
||||
for mod_name in MOCK_MODULES:
|
||||
sys.modules[mod_name] = mock.Mock()
|
||||
sys.modules[mod_name] = mock.Mock()
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# General information about the project.
|
||||
project = u'xgboost'
|
||||
author = u'%s developers' % project
|
||||
copyright = u'2019, %s' % author
|
||||
copyright = u'2020, %s' % author
|
||||
github_doc_root = 'https://github.com/dmlc/xgboost/tree/master/doc/'
|
||||
|
||||
os.environ['XGBOOST_BUILD_DOC'] = '1'
|
||||
# Version information.
|
||||
import xgboost
|
||||
import xgboost # NOQA
|
||||
version = xgboost.__version__
|
||||
release = xgboost.__version__
|
||||
|
||||
@@ -99,7 +113,7 @@ source_parsers = {
|
||||
source_suffix = ['.rst', '.md']
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
# source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
@@ -115,9 +129,9 @@ autoclass_content = 'both'
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
# today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
@@ -126,27 +140,27 @@ html_extra_path = ['./tmp']
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
#default_role = None
|
||||
# default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
# add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
# add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
# show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
# modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built documents.
|
||||
#keep_warnings = False
|
||||
# keep_warnings = False
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = False
|
||||
@@ -191,27 +205,32 @@ latex_documents = [
|
||||
author, 'manual'),
|
||||
]
|
||||
|
||||
intersphinx_mapping = {'python': ('https://docs.python.org/3.6', None),
|
||||
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
|
||||
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
|
||||
'pandas': ('http://pandas-docs.github.io/pandas-docs-travis/', None),
|
||||
'sklearn': ('http://scikit-learn.org/stable', None)}
|
||||
intersphinx_mapping = {
|
||||
'python': ('https://docs.python.org/3.6', None),
|
||||
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
|
||||
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
|
||||
'pandas': ('http://pandas-docs.github.io/pandas-docs-travis/', None),
|
||||
'sklearn': ('http://scikit-learn.org/stable', None)
|
||||
}
|
||||
|
||||
|
||||
# hook for doxygen
|
||||
def run_doxygen(folder):
|
||||
"""Run the doxygen make command in the designated folder."""
|
||||
try:
|
||||
retcode = subprocess.call("cd %s; make doxygen" % folder, shell=True)
|
||||
if retcode < 0:
|
||||
sys.stderr.write("doxygen terminated by signal %s" % (-retcode))
|
||||
except OSError as e:
|
||||
sys.stderr.write("doxygen execution failed: %s" % e)
|
||||
"""Run the doxygen make command in the designated folder."""
|
||||
try:
|
||||
retcode = subprocess.call("cd %s; make doxygen" % folder, shell=True)
|
||||
if retcode < 0:
|
||||
sys.stderr.write("doxygen terminated by signal %s" % (-retcode))
|
||||
except OSError as e:
|
||||
sys.stderr.write("doxygen execution failed: %s" % e)
|
||||
|
||||
|
||||
def generate_doxygen_xml(app):
|
||||
"""Run the doxygen make commands if we're on the ReadTheDocs server"""
|
||||
read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
|
||||
if read_the_docs_build:
|
||||
run_doxygen('..')
|
||||
"""Run the doxygen make commands if we're on the ReadTheDocs server"""
|
||||
read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
|
||||
if read_the_docs_build:
|
||||
run_doxygen('..')
|
||||
|
||||
|
||||
def setup(app):
|
||||
app.add_stylesheet('custom.css')
|
||||
app.add_stylesheet('custom.css')
|
||||
|
||||
@@ -27,6 +27,8 @@ Committers are individuals who are granted the write access to the project. A co
|
||||
|
||||
The Project Management Committee(PMC) consists group of active committers that moderate the discussion, manage the project release, and proposes new committer/PMC members. Potential candidates are usually proposed via an internal discussion among PMCs, followed by a consensus approval, i.e. least 3 +1 votes, and no vetoes. Any veto must be accompanied by reasoning. PMCs should serve the community by upholding the community practices and guidelines XGBoost a better community for everyone. PMCs should strive to only nominate new candidates outside of their own organization.
|
||||
|
||||
The PMC is in charge of the project's `continuous integration (CI) <https://en.wikipedia.org/wiki/Continuous_integration>`_ and testing infrastructure. Currently, we host our own Jenkins server at https://xgboost-ci.net. The PMC shall appoint committer(s) to manage the CI infrastructure. The PMC may accept 3rd-party donations and sponsorships that would defray the cost of the CI infrastructure. See :ref:`donation_policy`.
|
||||
|
||||
|
||||
Reviewers
|
||||
---------
|
||||
|
||||
44
doc/contrib/donate.rst
Normal file
44
doc/contrib/donate.rst
Normal file
@@ -0,0 +1,44 @@
|
||||
.. _donation_policy:
|
||||
|
||||
Donations
|
||||
=========
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://opencollective.com/xgboost">Donate to dmlc/xgboost</a>
|
||||
|
||||
Motivation
|
||||
----------
|
||||
DMLC/XGBoost has grown from a research project incubated in academia to one of the most widely used gradient boosting framework in production environment. On one side, with the growth of volume and variety of data in the production environment, users are putting accordingly growing expectation to XGBoost in terms of more functions, scalability and robustness. On the other side, as an open source project which develops in a fast pace, XGBoost has been receiving contributions from many individuals and organizations around the world. Given the high expectation from the users and the increasing channels of contribution to the project, delivering the high quality software presents a challenge to the project maintainers.
|
||||
|
||||
A robust and efficient **continuous integration (CI)** infrastructure is one of the most critical solutions to address the above challenge. A CI service will monitor an open-source repository and run a suite of integration tests for every incoming contribution. This way, the CI ensures that every proposed change in the codebase is compatible with existing functionalities. Furthermore, XGBoost can enable more thorough tests with a powerful CI infrastructure to cover cases which are closer to the production environment.
|
||||
|
||||
There are several CI services available free to open source projects, such as Travis CI and AppVeyor. The XGBoost project already utilizes Travis and AppVeyor. However, the XGBoost project has needs that these free services do not adequately address. In particular, the limited usage quota of resources such as CPU and memory leaves XGBoost developers unable to bring "too-intensive" tests. In addition, they do not offer test machines with GPUs for testing XGBoost-GPU code base which has been attracting more and more interest across many organizations. Consequently, the XGBoost project self-hosts a cloud server with Jenkins software installed: https://xgboost-ci.net/.
|
||||
|
||||
The self-hosted Jenkins CI server has recurring operating expenses. It utilizes a leading cloud provider (AWS) to accommodate variable workload. The master node serving the web interface is available 24/7, to accomodate contributions from people around the globe. In addition, the master node launches slave nodes on demand, to run the test suite on incoming contributions. To save cost, the slave nodes are terminated when they are no longer needed.
|
||||
|
||||
To help defray the hosting cost, the XGBoost project seeks donations from third parties.
|
||||
|
||||
Donations and Sponsorships
|
||||
--------------------------
|
||||
Donors may choose to make one-time donations or recurring donations on monthly or yearly basis. Donors who commit to the Sponsor tier will have their logo displayed on the front page of the XGBoost project.
|
||||
|
||||
Fiscal host: Open Source Collective 501(c)(6)
|
||||
---------------------------------------------
|
||||
The Project Management Committee (PMC) of the XGBoost project appointed `Open Source Collective <https://opencollective.com/opensource>`_ as their **fiscal host**. The platform is a 501(c)(6) registered entity and will manage the funds on the behalf of the PMC so that PMC members will not have to manage the funds directly. The platform currently hosts several well-known Javascript frameworks such as Babel, Vue, and Webpack.
|
||||
|
||||
All expenses incurred for hosting CI will be submitted to the fiscal host with receipts. Only the expenses in the following categories will be approved for reimbursement:
|
||||
|
||||
* Cloud exprenses for the Jenkins CI server (https://xgboost-ci.net)
|
||||
* Cost of domain https://xgboost-ci.net
|
||||
* Meetup.com account for XGBoost project
|
||||
* Hosting cost of the User Forum (https://discuss.xgboost.ai)
|
||||
|
||||
Administration of Jenkins CI server
|
||||
-----------------------------------
|
||||
The PMC shall appoint committer(s) to administer the Jenkins CI server on their behalf. The current administrators are as follows:
|
||||
|
||||
* Primary administrator: `Hyunsu Cho <https://github.com/hcho3>`_
|
||||
* Secondary administrator: `Jiaming Yuan <https://github.com/trivialfis>`_
|
||||
|
||||
The administrators shall make good-faith effort to keep the CI expenses under control. The expenses shall not exceed the available funds. The administrators should post regular updates on CI expenses.
|
||||
@@ -21,6 +21,7 @@ Here are guidelines for contributing to various aspect of the XGBoost project:
|
||||
:maxdepth: 2
|
||||
|
||||
Community Guideline <community>
|
||||
donate
|
||||
coding_guide
|
||||
unit_tests
|
||||
Docs and Examples <docs>
|
||||
|
||||
@@ -26,7 +26,7 @@ Algorithms
|
||||
+-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| tree_method | Description |
|
||||
+=======================+=======================================================================================================================================================================+
|
||||
| gpu_hist | Equivalent to the XGBoost fast histogram algorithm. Much faster and uses considerably less memory. NOTE: Will run very slowly on GPUs older than Pascal architecture. |
|
||||
| gpu_hist | Equivalent to the XGBoost fast histogram algorithm. Much faster and uses considerably less memory. NOTE: May run very slowly on GPUs older than Pascal architecture. |
|
||||
+-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
Supported parameters
|
||||
@@ -40,6 +40,8 @@ Supported parameters
|
||||
+================================+==============+
|
||||
| ``subsample`` | |tick| |
|
||||
+--------------------------------+--------------+
|
||||
| ``sampling_method`` | |tick| |
|
||||
+--------------------------------+--------------+
|
||||
| ``colsample_bytree`` | |tick| |
|
||||
+--------------------------------+--------------+
|
||||
| ``colsample_bylevel`` | |tick| |
|
||||
@@ -50,8 +52,6 @@ Supported parameters
|
||||
+--------------------------------+--------------+
|
||||
| ``gpu_id`` | |tick| |
|
||||
+--------------------------------+--------------+
|
||||
| ``n_gpus`` (deprecated) | |tick| |
|
||||
+--------------------------------+--------------+
|
||||
| ``predictor`` | |tick| |
|
||||
+--------------------------------+--------------+
|
||||
| ``grow_policy`` | |tick| |
|
||||
@@ -85,10 +85,6 @@ The GPU algorithms currently work with CLI, Python and R packages. See :doc:`/bu
|
||||
XGBRegressor(tree_method='gpu_hist', gpu_id=0)
|
||||
|
||||
|
||||
Single Node Multi-GPU
|
||||
=====================
|
||||
.. note:: Single node multi-GPU training with `n_gpus` parameter is deprecated after 0.90. Please use distributed GPU training with one process per GPU.
|
||||
|
||||
Multi-node Multi-GPU Training
|
||||
=============================
|
||||
XGBoost supports fully distributed GPU training using `Dask <https://dask.org/>`_. For
|
||||
@@ -128,16 +124,17 @@ Most of the objective functions implemented in XGBoost can be run on GPU. Follo
|
||||
+--------------------+-------------+
|
||||
| survival:cox | |cross| |
|
||||
+--------------------+-------------+
|
||||
| rank:pairwise | |cross| |
|
||||
| rank:pairwise | |tick| |
|
||||
+--------------------+-------------+
|
||||
| rank:ndcg | |cross| |
|
||||
| rank:ndcg | |tick| |
|
||||
+--------------------+-------------+
|
||||
| rank:map | |cross| |
|
||||
| rank:map | |tick| |
|
||||
+--------------------+-------------+
|
||||
|
||||
Objective will run on GPU if GPU updater (``gpu_hist``), otherwise they will run on CPU by
|
||||
default. For unsupported objectives XGBoost will fall back to using CPU implementation by
|
||||
default.
|
||||
default. Note that when using GPU ranking objective, the result is not deterministic due
|
||||
to the non-associative aspect of floating point summation.
|
||||
|
||||
Metric functions
|
||||
===================
|
||||
@@ -160,13 +157,13 @@ Following table shows current support status for evaluation metrics on the GPU.
|
||||
+-----------------+-------------+
|
||||
| mlogloss | |tick| |
|
||||
+-----------------+-------------+
|
||||
| auc | |cross| |
|
||||
| auc | |tick| |
|
||||
+-----------------+-------------+
|
||||
| aucpr | |cross| |
|
||||
+-----------------+-------------+
|
||||
| ndcg | |cross| |
|
||||
| ndcg | |tick| |
|
||||
+-----------------+-------------+
|
||||
| map | |cross| |
|
||||
| map | |tick| |
|
||||
+-----------------+-------------+
|
||||
| poisson-nloglik | |tick| |
|
||||
+-----------------+-------------+
|
||||
@@ -188,21 +185,18 @@ You can run benchmarks on synthetic data for binary classification:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
python tests/benchmark/benchmark.py
|
||||
python tests/benchmark/benchmark_tree.py --tree_method=gpu_hist
|
||||
python tests/benchmark/benchmark_tree.py --tree_method=hist
|
||||
|
||||
Training time time on 1,000,000 rows x 50 columns with 500 boosting iterations and 0.25/0.75 test/train split on i7-6700K CPU @ 4.00GHz and Pascal Titan X yields the following results:
|
||||
Training time on 1,000,000 rows x 50 columns of random data with 500 boosting iterations and 0.25/0.75 test/train split with AMD Ryzen 7 2700 8 core @3.20GHz and Nvidia 1080ti yields the following results:
|
||||
|
||||
+--------------+----------+
|
||||
| tree_method | Time (s) |
|
||||
+==============+==========+
|
||||
| gpu_hist | 13.87 |
|
||||
| gpu_hist | 12.57 |
|
||||
+--------------+----------+
|
||||
| hist | 63.55 |
|
||||
| hist | 36.01 |
|
||||
+--------------+----------+
|
||||
| exact | 1082.20 |
|
||||
+--------------+----------+
|
||||
|
||||
See `GPU Accelerated XGBoost <https://xgboost.ai/2016/12/14/GPU-accelerated-xgboost.html>`_ and `Updates to the XGBoost GPU algorithms <https://xgboost.ai/2018/07/04/gpu-xgboost-update.html>`_ for additional performance benchmarks of the ``gpu_hist`` tree method.
|
||||
|
||||
Memory usage
|
||||
============
|
||||
@@ -220,6 +214,7 @@ Working memory is allocated inside the algorithm proportional to the number of r
|
||||
|
||||
The quantile finding algorithm also uses some amount of working device memory. It is able to operate in batches, but is not currently well optimised for sparse data.
|
||||
|
||||
If you are getting out-of-memory errors on a big dataset, try the `external memory version <../tutorials/external_memory.html>`_.
|
||||
|
||||
Developer notes
|
||||
===============
|
||||
@@ -241,8 +236,10 @@ Many thanks to the following contributors (alphabetical order):
|
||||
* Jonathan C. McKinney
|
||||
* Matthew Jones
|
||||
* Philip Cho
|
||||
* Rong Ou
|
||||
* Rory Mitchell
|
||||
* Shankara Rao Thejaswi Nanditale
|
||||
* Sriram Chandramouli
|
||||
* Vinay Deshpande
|
||||
|
||||
Please report bugs to the XGBoost issues list: https://github.com/dmlc/xgboost/issues. For general questions please visit our user form: https://discuss.xgboost.ai/.
|
||||
|
||||
@@ -8,15 +8,143 @@ XGBoost JVM Package
|
||||
<img alt="Build Status" src="https://travis-ci.org/dmlc/xgboost.svg?branch=master">
|
||||
</a>
|
||||
<a href="https://github.com/dmlc/xgboost/blob/master/LICENSE">
|
||||
<img alt="GitHub license" src="http://dmlc.github.io/img/apache2.svg">
|
||||
<img alt="GitHub license" src="https://dmlc.github.io/img/apache2.svg">
|
||||
</a>
|
||||
|
||||
You have found the XGBoost JVM Package!
|
||||
|
||||
.. _install_jvm_packages:
|
||||
|
||||
************
|
||||
Installation
|
||||
************
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
:backlinks: none
|
||||
|
||||
Installation from Maven repository
|
||||
==================================
|
||||
|
||||
Access release version
|
||||
----------------------
|
||||
You can use XGBoost4J in your Java/Scala application by adding XGBoost4J as a dependency:
|
||||
|
||||
.. code-block:: xml
|
||||
:caption: Maven
|
||||
|
||||
<properties>
|
||||
...
|
||||
<!-- Specify Scala version in package name -->
|
||||
<scala.binary.version>2.12</scala.binary.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
...
|
||||
<dependency>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost4j_${scala.binary.version}</artifactId>
|
||||
<version>latest_version_num</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost4j-spark_${scala.binary.version}</artifactId>
|
||||
<version>latest_version_num</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
.. code-block:: scala
|
||||
:caption: sbt
|
||||
|
||||
libraryDependencies ++= Seq(
|
||||
"ml.dmlc" %% "xgboost4j" % "latest_version_num",
|
||||
"ml.dmlc" %% "xgboost4j-spark" % "latest_version_num"
|
||||
)
|
||||
|
||||
This will check out the latest stable version from the Maven Central.
|
||||
|
||||
For the latest release version number, please check `here <https://github.com/dmlc/xgboost/releases>`_.
|
||||
|
||||
.. note:: Using Maven repository hosted by the XGBoost project
|
||||
|
||||
There may be some delay until a new release becomes available to Maven Central. If you would like to access the latest release immediately, add the Maven repository hosted by the XGBoost project:
|
||||
|
||||
.. code-block:: xml
|
||||
:caption: Maven
|
||||
|
||||
<repository>
|
||||
<id>XGBoost4J Release Repo</id>
|
||||
<name>XGBoost4J Release Repo</name>
|
||||
<url>https://s3-us-west-2.amazonaws.com/xgboost-maven-repo/release/</url>
|
||||
</repository>
|
||||
|
||||
.. code-block:: scala
|
||||
:caption: sbt
|
||||
|
||||
resolvers += "XGBoost4J Release Repo" at "https://s3-us-west-2.amazonaws.com/xgboost-maven-repo/release/"
|
||||
|
||||
Access SNAPSHOT version
|
||||
-----------------------
|
||||
|
||||
First add the following Maven repository hosted by the XGBoost project:
|
||||
|
||||
.. code-block:: xml
|
||||
:caption: Maven
|
||||
|
||||
<repository>
|
||||
<id>XGBoost4J Snapshot Repo</id>
|
||||
<name>XGBoost4J Snapshot Repo</name>
|
||||
<url>https://s3-us-west-2.amazonaws.com/xgboost-maven-repo/snapshot/</url>
|
||||
</repository>
|
||||
|
||||
.. code-block:: scala
|
||||
:caption: sbt
|
||||
|
||||
resolvers += "XGBoost4J Snapshot Repo" at "https://s3-us-west-2.amazonaws.com/xgboost-maven-repo/snapshot/"
|
||||
|
||||
Then add XGBoost4J as a dependency:
|
||||
|
||||
.. code-block:: xml
|
||||
:caption: maven
|
||||
|
||||
<properties>
|
||||
...
|
||||
<!-- Specify Scala version in package name -->
|
||||
<scala.binary.version>2.12</scala.binary.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
...
|
||||
<dependency>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost4j_${scala.binary.version}</artifactId>
|
||||
<version>latest_version_num-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost4j-spark_${scala.binary.version}</artifactId>
|
||||
<version>latest_version_num-SNAPSHOT</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
.. code-block:: scala
|
||||
:caption: sbt
|
||||
|
||||
libraryDependencies ++= Seq(
|
||||
"ml.dmlc" %% "xgboost4j" % "latest_version_num-SNAPSHOT",
|
||||
"ml.dmlc" %% "xgboost4j-spark" % "latest_version_num-SNAPSHOT"
|
||||
)
|
||||
|
||||
Look up the ``version`` field in `pom.xml <https://github.com/dmlc/xgboost/blob/master/jvm-packages/pom.xml>`_ to get the correct version number.
|
||||
|
||||
The SNAPSHOT JARs are hosted by the XGBoost project. Every commit in the ``master`` branch will automatically trigger generation of a new SNAPSHOT JAR. You can control how often Maven should upgrade your SNAPSHOT installation by specifying ``updatePolicy``. See `here <http://maven.apache.org/pom.html#Repositories>`_ for details.
|
||||
|
||||
You can browse the file listing of the Maven repository at https://s3-us-west-2.amazonaws.com/xgboost-maven-repo/list.html.
|
||||
|
||||
.. note:: Windows not supported by published JARs
|
||||
|
||||
The published JARs from the Maven Central and GitHub currently only supports Linux and MacOS. Windows users should consider building XGBoost4J / XGBoost4J-Spark from the source. Alternatively, checkout pre-built JARs from `criteo-forks/xgboost-jars <https://github.com/criteo-forks/xgboost-jars>`_.
|
||||
|
||||
Installation from source
|
||||
========================
|
||||
|
||||
@@ -64,73 +192,6 @@ If you want to use XGBoost4J-Spark, replace ``xgboost4j`` with ``xgboost4j-spark
|
||||
|
||||
Also, make sure to install Spark directly from `Apache website <https://spark.apache.org/>`_. **Upstream XGBoost is not guaranteed to work with third-party distributions of Spark, such as Cloudera Spark.** Consult appropriate third parties to obtain their distribution of XGBoost.
|
||||
|
||||
Installation from maven repo
|
||||
============================
|
||||
|
||||
Access release version
|
||||
----------------------
|
||||
|
||||
.. code-block:: xml
|
||||
:caption: maven
|
||||
|
||||
<dependency>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost4j</artifactId>
|
||||
<version>latest_version_num</version>
|
||||
</dependency>
|
||||
|
||||
.. code-block:: scala
|
||||
:caption: sbt
|
||||
|
||||
"ml.dmlc" % "xgboost4j" % "latest_version_num"
|
||||
|
||||
This will checkout the latest stable version from the Maven Central.
|
||||
|
||||
For the latest release version number, please check `here <https://github.com/dmlc/xgboost/releases>`_.
|
||||
|
||||
if you want to use XGBoost4J-Spark, replace ``xgboost4j`` with ``xgboost4j-spark``.
|
||||
|
||||
Access SNAPSHOT version
|
||||
-----------------------
|
||||
|
||||
You need to add GitHub as repo:
|
||||
|
||||
.. code-block:: xml
|
||||
:caption: maven
|
||||
|
||||
<repository>
|
||||
<id>GitHub Repo</id>
|
||||
<name>GitHub Repo</name>
|
||||
<url>https://raw.githubusercontent.com/CodingCat/xgboost/maven-repo/</url>
|
||||
</repository>
|
||||
|
||||
.. code-block:: scala
|
||||
:caption: sbt
|
||||
|
||||
resolvers += "GitHub Repo" at "https://raw.githubusercontent.com/CodingCat/xgboost/maven-repo/"
|
||||
|
||||
Then add dependency as following:
|
||||
|
||||
.. code-block:: xml
|
||||
:caption: maven
|
||||
|
||||
<dependency>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost4j</artifactId>
|
||||
<version>latest_version_num</version>
|
||||
</dependency>
|
||||
|
||||
.. code-block:: scala
|
||||
:caption: sbt
|
||||
|
||||
"ml.dmlc" % "xgboost4j" % "latest_version_num"
|
||||
|
||||
For the latest release version number, please check `here <https://github.com/CodingCat/xgboost/tree/maven-repo/ml/dmlc/xgboost4j>`_.
|
||||
|
||||
.. note:: Windows not supported by published JARs
|
||||
|
||||
The published JARs from the Maven Central and GitHub currently only supports Linux and MacOS. Windows users should consider building XGBoost4J / XGBoost4J-Spark from the source. Alternatively, checkout pre-built JARs from `criteo-forks/xgboost-jars <https://github.com/criteo-forks/xgboost-jars>`_.
|
||||
|
||||
Enabling OpenMP for Mac OS
|
||||
--------------------------
|
||||
If you are on Mac OS and using a compiler that supports OpenMP, you need to go to the file ``xgboost/jvm-packages/create_jni.py`` and comment out the line
|
||||
|
||||
@@ -27,39 +27,7 @@ Build an ML Application with XGBoost4J-Spark
|
||||
Refer to XGBoost4J-Spark Dependency
|
||||
===================================
|
||||
|
||||
Before we go into the tour of how to use XGBoost4J-Spark, we would bring a brief introduction about how to build a machine learning application with XGBoost4J-Spark. The first thing you need to do is to refer to the dependency in Maven Central.
|
||||
|
||||
You can add the following dependency in your ``pom.xml``.
|
||||
|
||||
.. code-block:: xml
|
||||
|
||||
<dependency>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost4j-spark</artifactId>
|
||||
<version>latest_version_num</version>
|
||||
</dependency>
|
||||
|
||||
For the latest release version number, please check `here <https://github.com/dmlc/xgboost/releases>`_.
|
||||
|
||||
We also publish some functionalities which would be included in the coming release in the form of snapshot version. To access these functionalities, you can add dependency to the snapshot artifacts. We publish snapshot version in github-based repo, so you can add the following repo in ``pom.xml``:
|
||||
|
||||
.. code-block:: xml
|
||||
|
||||
<repository>
|
||||
<id>XGBoost4J-Spark Snapshot Repo</id>
|
||||
<name>XGBoost4J-Spark Snapshot Repo</name>
|
||||
<url>https://raw.githubusercontent.com/CodingCat/xgboost/maven-repo/</url>
|
||||
</repository>
|
||||
|
||||
and then refer to the snapshot dependency by adding:
|
||||
|
||||
.. code-block:: xml
|
||||
|
||||
<dependency>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost4j-spark</artifactId>
|
||||
<version>next_version_num-SNAPSHOT</version>
|
||||
</dependency>
|
||||
Before we go into the tour of how to use XGBoost4J-Spark, you should first consult :ref:`Installation from Maven repository <install_jvm_packages>` in order to add XGBoost4J-Spark as a dependency for your project. We provide both stable releases and snapshots.
|
||||
|
||||
.. note:: XGBoost4J-Spark requires Apache Spark 2.4+
|
||||
|
||||
|
||||
@@ -195,12 +195,22 @@
|
||||
"properties": {
|
||||
"version": {
|
||||
"type": "array",
|
||||
"const": [
|
||||
1,
|
||||
0,
|
||||
0
|
||||
"items": [
|
||||
{
|
||||
"type": "number",
|
||||
"const": 1
|
||||
},
|
||||
{
|
||||
"type": "number",
|
||||
"minimum": 0
|
||||
},
|
||||
{
|
||||
"type": "number",
|
||||
"minimum": 0
|
||||
}
|
||||
],
|
||||
"additionalItems": false
|
||||
"minItems": 3,
|
||||
"maxItems": 3
|
||||
},
|
||||
"learner": {
|
||||
"type": "object",
|
||||
|
||||
@@ -23,10 +23,6 @@ General Parameters
|
||||
|
||||
- Which booster to use. Can be ``gbtree``, ``gblinear`` or ``dart``; ``gbtree`` and ``dart`` use tree based models while ``gblinear`` uses linear functions.
|
||||
|
||||
* ``silent`` [default=0] [Deprecated]
|
||||
|
||||
- Deprecated. Please use ``verbosity`` instead.
|
||||
|
||||
* ``verbosity`` [default=1]
|
||||
|
||||
- Verbosity of printing messages. Valid values are 0 (silent), 1 (warning), 2 (info), 3
|
||||
@@ -34,7 +30,7 @@ General Parameters
|
||||
is displayed as warning message. If there's unexpected behaviour, please try to
|
||||
increase value of verbosity.
|
||||
|
||||
* ``validate_parameters`` [default to false, except for Python ``train`` function]
|
||||
* ``validate_parameters`` [default to false, except for Python and R interface]
|
||||
|
||||
- When set to True, XGBoost will perform validation of input parameters to check whether
|
||||
a parameter is used or not. The feature is still experimental. It's expected to have
|
||||
@@ -88,6 +84,17 @@ Parameters for Tree Booster
|
||||
- Subsample ratio of the training instances. Setting it to 0.5 means that XGBoost would randomly sample half of the training data prior to growing trees. and this will prevent overfitting. Subsampling will occur once in every boosting iteration.
|
||||
- range: (0,1]
|
||||
|
||||
* ``sampling_method`` [default= ``uniform``]
|
||||
|
||||
- The method to use to sample the training instances.
|
||||
- ``uniform``: each training instance has an equal probability of being selected. Typically set
|
||||
``subsample`` >= 0.5 for good results.
|
||||
- ``gradient_based``: the selection probability for each training instance is proportional to the
|
||||
*regularized absolute value* of gradients (more specifically, :math:`\sqrt{g^2+\lambda h^2}`).
|
||||
``subsample`` may be set to as low as 0.1 without loss of model accuracy. Note that this
|
||||
sampling method is only supported when ``tree_method`` is set to ``gpu_hist``; other tree
|
||||
methods only support ``uniform`` sampling.
|
||||
|
||||
* ``colsample_bytree``, ``colsample_bylevel``, ``colsample_bynode`` [default=1]
|
||||
|
||||
- This is a family of parameters for subsampling of columns.
|
||||
@@ -150,7 +157,6 @@ Parameters for Tree Booster
|
||||
- A comma separated string defining the sequence of tree updaters to run, providing a modular way to construct and to modify the trees. This is an advanced parameter that is usually set automatically, depending on some other parameters. However, it could be also set explicitly by a user. The following updaters exist:
|
||||
|
||||
- ``grow_colmaker``: non-distributed column-based construction of trees.
|
||||
- ``distcol``: distributed tree construction with column-based data splitting mode.
|
||||
- ``grow_histmaker``: distributed tree construction with row-based data splitting based on global proposal of histogram counting.
|
||||
- ``grow_local_histmaker``: based on local histogram counting.
|
||||
- ``grow_skmaker``: uses the approximate sketching algorithm.
|
||||
@@ -219,6 +225,20 @@ Parameters for Tree Booster
|
||||
list is a group of indices of features that are allowed to interact with each other.
|
||||
See tutorial for more information
|
||||
|
||||
Additional parameters for `gpu_hist` tree method
|
||||
================================================
|
||||
|
||||
* ``single_precision_histogram``, [default=``false``]
|
||||
|
||||
- Use single precision to build histograms. See document for GPU support for more details.
|
||||
|
||||
* ``deterministic_histogram``, [default=``true``]
|
||||
|
||||
- Build histogram on GPU deterministically. Histogram building is not deterministic due
|
||||
to the non-associative aspect of floating point summation. We employ a pre-rounding
|
||||
routine to mitigate the issue, which may lead to slightly lower accuracy. Set to
|
||||
``false`` to disable it.
|
||||
|
||||
Additional parameters for Dart Booster (``booster=dart``)
|
||||
=========================================================
|
||||
|
||||
@@ -331,6 +351,9 @@ Specify the learning task and the corresponding learning objective. The objectiv
|
||||
|
||||
- ``survival:cox``: Cox regression for right censored survival time data (negative values are considered right censored).
|
||||
Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional hazard function ``h(t) = h0(t) * HR``).
|
||||
- ``survival:aft``: Accelerated failure time model for censored survival time data.
|
||||
See :doc:`/tutorials/aft_survival_analysis` for details.
|
||||
- ``aft_loss_distribution``: Probabilty Density Function used by ``survival:aft`` and ``aft-nloglik`` metric.
|
||||
- ``multi:softmax``: set XGBoost to do multiclass classification using the softmax objective, you also need to set num_class(number of classes)
|
||||
- ``multi:softprob``: same as softmax, but output a vector of ``ndata * nclass``, which can be further reshaped to ``ndata * nclass`` matrix. The result contains predicted probability of each data point belonging to each class.
|
||||
- ``rank:pairwise``: Use LambdaMART to perform pairwise ranking where the pairwise loss is minimized
|
||||
@@ -369,6 +392,8 @@ Specify the learning task and the corresponding learning objective. The objectiv
|
||||
- ``cox-nloglik``: negative partial log-likelihood for Cox proportional hazards regression
|
||||
- ``gamma-deviance``: residual deviance for gamma regression
|
||||
- ``tweedie-nloglik``: negative log-likelihood for Tweedie regression (at a specified value of the ``tweedie_variance_power`` parameter)
|
||||
- ``aft-nloglik``: Negative log likelihood of Accelerated Failure Time model.
|
||||
See :doc:`/tutorials/aft_survival_analysis` for details.
|
||||
|
||||
* ``seed`` [default=0]
|
||||
|
||||
|
||||
@@ -14,6 +14,9 @@ Core Data Structure
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: xgboost.DeviceQuantileDMatrix
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: xgboost.Booster
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
168
doc/tutorials/aft_survival_analysis.rst
Normal file
168
doc/tutorials/aft_survival_analysis.rst
Normal file
@@ -0,0 +1,168 @@
|
||||
###############################################
|
||||
Survival Analysis with Accelerated Failure Time
|
||||
###############################################
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
:backlinks: none
|
||||
|
||||
**************************
|
||||
What is survival analysis?
|
||||
**************************
|
||||
|
||||
**Survival analysis (regression)** models **time to an event of interest**. Survival analysis is a special kind of regression and differs from the conventional regression task as follows:
|
||||
|
||||
* The label is always positive, since you cannot wait a negative amount of time until the event occurs.
|
||||
* The label may not be fully known, or **censored**, because "it takes time to measure time."
|
||||
|
||||
The second bullet point is crucial and we should dwell on it more. As you may have guessed from the name, one of the earliest applications of survival analysis is to model mortality of a given population. Let's take `NCCTG Lung Cancer Dataset <https://stat.ethz.ch/R-manual/R-devel/library/survival/html/lung.html>`_ as an example. The first 8 columns represent features and the last column, Time to death, represents the label.
|
||||
|
||||
==== === === ======= ======== ========= ======== ======= ========================
|
||||
Inst Age Sex ph.ecog ph.karno pat.karno meal.cal wt.loss **Time to death (days)**
|
||||
==== === === ======= ======== ========= ======== ======= ========================
|
||||
3 74 1 1 90 100 1175 N/A 306
|
||||
3 68 1 0 90 90 1225 15 455
|
||||
3 56 1 0 90 90 N/A 15 :math:`[1010, +\infty)`
|
||||
5 57 1 1 90 60 1150 11 210
|
||||
1 60 1 0 100 90 N/A 0 883
|
||||
12 74 1 1 50 80 513 0 :math:`[1022, +\infty)`
|
||||
7 68 2 2 70 60 384 10 310
|
||||
==== === === ======= ======== ========= ======== ======= ========================
|
||||
|
||||
Take a close look at the label for the third patient. **His label is a range, not a single number.** The third patient's label is said to be **censored**, because for some reason the experimenters could not get a complete measurement for that label. One possible scenario: the patient survived the first 1010 days and walked out of the clinic on the 1011th day, so his death was not directly observed. Another possibility: The experiment was cut short (since you cannot run it forever) before his death could be observed. In any case, his label is :math:`[1010, +\infty)`, meaning his time to death can be any number that's higher than 1010, e.g. 2000, 3000, or 10000.
|
||||
|
||||
There are four kinds of censoring:
|
||||
|
||||
* **Uncensored**: the label is not censored and given as a single number.
|
||||
* **Right-censored**: the label is of form :math:`[a, +\infty)`, where :math:`a` is the lower bound.
|
||||
* **Left-censored**: the label is of form :math:`(-\infty, b]`, where :math:`b` is the upper bound.
|
||||
* **Interval-censored**: the label is of form :math:`[a, b]`, where :math:`a` and :math:`b` are the lower and upper bounds, respectively.
|
||||
|
||||
Right-censoring is the most commonly used.
|
||||
|
||||
******************************
|
||||
Accelerated Failure Time model
|
||||
******************************
|
||||
**Accelerated Failure Time (AFT)** model is one of the most commonly used models in survival analysis. The model is of the following form:
|
||||
|
||||
.. math::
|
||||
|
||||
\ln{Y} = \langle \mathbf{w}, \mathbf{x} \rangle + \sigma Z
|
||||
|
||||
where
|
||||
|
||||
* :math:`\mathbf{x}` is a vector in :math:`\mathbb{R}^d` representing the features.
|
||||
* :math:`\mathbf{w}` is a vector consisting of :math:`d` coefficients, each corresponding to a feature.
|
||||
* :math:`\langle \cdot, \cdot \rangle` is the usual dot product in :math:`\mathbb{R}^d`.
|
||||
* :math:`\ln{(\cdot)}` is the natural logarithm.
|
||||
* :math:`Y` and :math:`Z` are random variables.
|
||||
|
||||
- :math:`Y` is the output label.
|
||||
- :math:`Z` is a random variable of a known probability distribution. Common choices are the normal distribution, the logistic distribution, and the extreme distribution. Intuitively, :math:`Z` represents the "noise" that pulls the prediction :math:`\langle \mathbf{w}, \mathbf{x} \rangle` away from the true log label :math:`\ln{Y}`.
|
||||
|
||||
* :math:`\sigma` is a parameter that scales the size of :math:`Z`.
|
||||
|
||||
Note that this model is a generalized form of a linear regression model :math:`Y = \langle \mathbf{w}, \mathbf{x} \rangle`. In order to make AFT work with gradient boosting, we revise the model as follows:
|
||||
|
||||
.. math::
|
||||
|
||||
\ln{Y} = \mathcal{T}(\mathbf{x}) + \sigma Z
|
||||
|
||||
where :math:`\mathcal{T}(\mathbf{x})` represents the output from a decision tree ensemble, given input :math:`\mathbf{x}`. Since :math:`Z` is a random variable, we have a likelihood defined for the expression :math:`\ln{Y} = \mathcal{T}(\mathbf{x}) + \sigma Z`. So the goal for XGBoost is to maximize the (log) likelihood by fitting a good tree ensemble :math:`\mathcal{T}(\mathbf{x})`.
|
||||
|
||||
**********
|
||||
How to use
|
||||
**********
|
||||
The first step is to express the labels in the form of a range, so that **every data point has two numbers associated with it, namely the lower and upper bounds for the label.** For uncensored labels, use a degenerate interval of form :math:`[a, a]`.
|
||||
|
||||
.. |tick| unicode:: U+2714
|
||||
.. |cross| unicode:: U+2718
|
||||
|
||||
================= ==================== =================== ===================
|
||||
Censoring type Interval form Lower bound finite? Upper bound finite?
|
||||
================= ==================== =================== ===================
|
||||
Uncensored :math:`[a, a]` |tick| |tick|
|
||||
Right-censored :math:`[a, +\infty)` |tick| |cross|
|
||||
Left-censored :math:`(-\infty, b]` |cross| |tick|
|
||||
Interval-censored :math:`[a, b]` |tick| |tick|
|
||||
================= ==================== =================== ===================
|
||||
|
||||
Collect the lower bound numbers in one array (let's call it ``y_lower_bound``) and the upper bound number in another array (call it ``y_upper_bound``). The ranged labels are associated with a data matrix object via calls to :meth:`xgboost.DMatrix.set_float_info`:
|
||||
|
||||
.. code-block:: python
|
||||
:caption: Python
|
||||
|
||||
import numpy as np
|
||||
import xgboost as xgb
|
||||
|
||||
# 4-by-2 Data matrix
|
||||
X = np.array([[1, -1], [-1, 1], [0, 1], [1, 0]])
|
||||
dtrain = xgb.DMatrix(X)
|
||||
|
||||
# Associate ranged labels with the data matrix.
|
||||
# This example shows each kind of censored labels.
|
||||
# uncensored right left interval
|
||||
y_lower_bound = np.array([ 2.0, 3.0, -np.inf, 4.0])
|
||||
y_upper_bound = np.array([ 2.0, +np.inf, 4.0, 5.0])
|
||||
dtrain.set_float_info('label_lower_bound', y_lower_bound)
|
||||
dtrain.set_float_info('label_upper_bound', y_upper_bound)
|
||||
|
||||
.. code-block:: r
|
||||
:caption: R
|
||||
|
||||
library(xgboost)
|
||||
|
||||
# 4-by-2 Data matrix
|
||||
X <- matrix(c(1., -1., -1., 1., 0., 1., 1., 0.),
|
||||
nrow=4, ncol=2, byrow=TRUE)
|
||||
dtrain <- xgb.DMatrix(X)
|
||||
|
||||
# Associate ranged labels with the data matrix.
|
||||
# This example shows each kind of censored labels.
|
||||
# uncensored right left interval
|
||||
y_lower_bound <- c( 2., 3., -Inf, 4.)
|
||||
y_upper_bound <- c( 2., +Inf, 4., 5.)
|
||||
setinfo(dtrain, 'label_lower_bound', y_lower_bound)
|
||||
setinfo(dtrain, 'label_upper_bound', y_upper_bound)
|
||||
|
||||
Now we are ready to invoke the training API:
|
||||
|
||||
.. code-block:: python
|
||||
:caption: Python
|
||||
|
||||
params = {'objective': 'survival:aft',
|
||||
'eval_metric': 'aft-nloglik',
|
||||
'aft_loss_distribution': 'normal',
|
||||
'aft_loss_distribution_scale': 1.20,
|
||||
'tree_method': 'hist', 'learning_rate': 0.05, 'max_depth': 2}
|
||||
bst = xgb.train(params, dtrain, num_boost_round=5,
|
||||
evals=[(dtrain, 'train'), (dvalid, 'valid')])
|
||||
|
||||
.. code-block:: r
|
||||
:caption: R
|
||||
|
||||
params <- list(objective='survival:aft',
|
||||
eval_metric='aft-nloglik',
|
||||
aft_loss_distribution='normal',
|
||||
aft_loss_distribution_scale=1.20,
|
||||
tree_method='hist',
|
||||
learning_rate=0.05,
|
||||
max_depth=2)
|
||||
watchlist <- list(train = dtrain)
|
||||
bst <- xgb.train(params, dtrain, nrounds=5, watchlist)
|
||||
|
||||
We set ``objective`` parameter to ``survival:aft`` and ``eval_metric`` to ``aft-nloglik``, so that the log likelihood for the AFT model would be maximized. (XGBoost will actually minimize the negative log likelihood, hence the name ``aft-nloglik``.)
|
||||
|
||||
The parameter ``aft_loss_distribution`` corresponds to the distribution of the :math:`Z` term in the AFT model, and ``aft_loss_distribution_scale`` corresponds to the scaling factor :math:`\sigma`.
|
||||
|
||||
Currently, you can choose from three probability distributions for ``aft_loss_distribution``:
|
||||
|
||||
========================= ===========================================
|
||||
``aft_loss_distribution`` Probabilty Density Function (PDF)
|
||||
========================= ===========================================
|
||||
``normal`` :math:`\dfrac{\exp{(-z^2/2)}}{\sqrt{2\pi}}`
|
||||
``logistic`` :math:`\dfrac{e^z}{(1+e^z)^2}`
|
||||
``extreme`` :math:`e^z e^{-\exp{z}}`
|
||||
========================= ===========================================
|
||||
|
||||
Note that it is not yet possible to set the ranged label using the scikit-learn interface (e.g. :class:`xgboost.XGBRegressor`). For now, you should use :class:`xgboost.train` with :class:`xgboost.DMatrix`.
|
||||
@@ -14,7 +14,7 @@ concepts should be readily applicable to other language bindings.
|
||||
* The customized functions defined here are only applicable to single node training.
|
||||
Distributed environment requires syncing with ``xgboost.rabit``, the interface is
|
||||
subject to change hence beyond the scope of this tutorial.
|
||||
* We also plan to re-design the interface for multi-classes objective in the future.
|
||||
* We also plan to improve the interface for multi-classes objective in the future.
|
||||
|
||||
In the following sections, we will provide a step by step walk through of implementing
|
||||
``Squared Log Error(SLE)`` objective function:
|
||||
@@ -136,3 +136,12 @@ Notice that the parameter ``disable_default_eval_metric`` is used to suppress th
|
||||
in XGBoost.
|
||||
|
||||
For fully reproducible source code and comparison plots, see `custom_rmsle.py <https://github.com/dmlc/xgboost/tree/master/demo/guide-python/custom_rmsle.py>`_.
|
||||
|
||||
|
||||
******************************
|
||||
Multi-class objective function
|
||||
******************************
|
||||
|
||||
A similiar demo for multi-class objective funtion is also available, see
|
||||
`demo/guide-python/custom_softmax.py <https://github.com/dmlc/xgboost/tree/master/demo/guide-python/custom_rmsle.py>`_
|
||||
for details.
|
||||
|
||||
@@ -37,12 +37,11 @@ illustrates the basic usage:
|
||||
|
||||
output = xgb.dask.train(client,
|
||||
{'verbosity': 2,
|
||||
'nthread': 1,
|
||||
'tree_method': 'hist'},
|
||||
dtrain,
|
||||
num_boost_round=4, evals=[(dtrain, 'train')])
|
||||
|
||||
Here we first create a cluster in signle-node mode wtih ``distributed.LocalCluster``, then
|
||||
Here we first create a cluster in single-node mode wtih ``distributed.LocalCluster``, then
|
||||
connect a ``client`` to this cluster, setting up environment for later computation.
|
||||
Similar to non-distributed interface, we create a ``DMatrix`` object and pass it to
|
||||
``train`` along with some other parameters. Except in dask interface, client is an extra
|
||||
@@ -76,6 +75,32 @@ Another set of API is a Scikit-Learn wrapper, which mimics the stateful Scikit-L
|
||||
interface with ``DaskXGBClassifier`` and ``DaskXGBRegressor``. See ``xgboost/demo/dask``
|
||||
for more examples.
|
||||
|
||||
*******
|
||||
Threads
|
||||
*******
|
||||
|
||||
XGBoost has built in support for parallel computation through threads by the setting
|
||||
``nthread`` parameter (``n_jobs`` for scikit-learn). If these parameters are set, they
|
||||
will override the configuration in Dask. For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
|
||||
|
||||
There are 4 threads allocated for each dask worker. Then by default XGBoost will use 4
|
||||
threads in each process for both training and prediction. But if ``nthread`` parameter is
|
||||
set:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
output = xgb.dask.train(client,
|
||||
{'verbosity': 1,
|
||||
'nthread': 8,
|
||||
'tree_method': 'hist'},
|
||||
dtrain,
|
||||
num_boost_round=4, evals=[(dtrain, 'train')])
|
||||
|
||||
XGBoost will use 8 threads in each training process.
|
||||
|
||||
*****************************************************************************
|
||||
Why is the initialization of ``DaskDMatrix`` so slow and throws weird errors
|
||||
@@ -106,8 +131,14 @@ Basic functionalities including training and generating predictions for regressi
|
||||
classification are implemented. But there are still some other limitations we haven't
|
||||
addressed yet.
|
||||
|
||||
- Label encoding for Scikit-Learn classifier.
|
||||
- Ranking
|
||||
- Label encoding for Scikit-Learn classifier may not be supported. Meaning that user need
|
||||
to encode their training labels into discrete values first.
|
||||
- Ranking is not supported right now.
|
||||
- Empty worker is not well supported by classifier. If the training hangs for classifier
|
||||
with a warning about empty DMatrix, please consider balancing your data first. But
|
||||
regressor works fine with empty DMatrix.
|
||||
- Callback functions are not tested.
|
||||
- To use cross validation one needs to explicitly train different models instead of using
|
||||
a functional API like ``xgboost.cv``.
|
||||
- Only ``GridSearchCV`` from ``scikit-learn`` is supported for dask interface. Meaning
|
||||
that we can distribute data among workers but have to train one model at a time. If you
|
||||
want to scale up grid searching with model parallelism by ``dask-ml``, please consider
|
||||
using normal ``scikit-learn`` interface like `xgboost.XGBRegressor` for now.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
############################################
|
||||
Using XGBoost External Memory Version (beta)
|
||||
############################################
|
||||
#####################################
|
||||
Using XGBoost External Memory Version
|
||||
#####################################
|
||||
There is no big difference between using external memory version and in-memory version.
|
||||
The only difference is the filename format.
|
||||
|
||||
@@ -14,7 +14,13 @@ The ``filename`` is the normal path to libsvm format file you want to load in, a
|
||||
``cacheprefix`` is a path to a cache file that XGBoost will use for caching preprocessed
|
||||
data in binary form.
|
||||
|
||||
.. note:: External memory is also available with GPU algorithms (i.e. when ``tree_method`` is set to ``gpu_hist``)
|
||||
To load from csv files, use the following syntax:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
filename.csv?format=csv&label_column=0#cacheprefix
|
||||
|
||||
where ``label_column`` should point to the csv column acting as the label.
|
||||
|
||||
To provide a simple example for illustration, extracting the code from
|
||||
`demo/guide-python/external_memory.py <https://github.com/dmlc/xgboost/blob/master/demo/guide-python/external_memory.py>`_. If
|
||||
@@ -25,22 +31,26 @@ you have a dataset stored in a file similar to ``agaricus.txt.train`` with libSV
|
||||
dtrain = DMatrix('../data/agaricus.txt.train#dtrain.cache')
|
||||
|
||||
XGBoost will first load ``agaricus.txt.train`` in, preprocess it, then write to a new file named
|
||||
``dtrain.cache`` as an on disk cache for storing preprocessed data in a internal binary format. For
|
||||
``dtrain.cache`` as an on disk cache for storing preprocessed data in an internal binary format. For
|
||||
more notes about text input formats, see :doc:`/tutorials/input_format`.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
dtrain = xgb.DMatrix('../data/agaricus.txt.train#dtrain.cache')
|
||||
|
||||
For CLI version, simply add the cache suffix, e.g. ``"../data/agaricus.txt.train#dtrain.cache"``.
|
||||
|
||||
****************
|
||||
Performance Note
|
||||
****************
|
||||
* the parameter ``nthread`` should be set to number of **physical** cores
|
||||
***********
|
||||
GPU Version
|
||||
***********
|
||||
External memory is fully supported in GPU algorithms (i.e. when ``tree_method`` is set to ``gpu_hist``).
|
||||
|
||||
- Most modern CPUs use hyperthreading, which means a 4 core CPU may carry 8 threads
|
||||
- Set ``nthread`` to be 4 for maximum performance in such case
|
||||
If you are still getting out-of-memory errors after enabling external memory, try subsampling the
|
||||
data to further reduce GPU memory usage:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
param = {
|
||||
...
|
||||
'subsample': 0.1,
|
||||
'sampling_method': 'gradient_based',
|
||||
}
|
||||
|
||||
*******************
|
||||
Distributed Version
|
||||
@@ -51,14 +61,12 @@ The external memory mode naturally works on distributed version, you can simply
|
||||
|
||||
data = "hdfs://path-to-data/#dtrain.cache"
|
||||
|
||||
XGBoost will cache the data to the local position. When you run on YARN, the current folder is temporal
|
||||
XGBoost will cache the data to the local position. When you run on YARN, the current folder is temporary
|
||||
so that you can directly use ``dtrain.cache`` to cache to current folder.
|
||||
|
||||
**********
|
||||
Usage Note
|
||||
**********
|
||||
* This is an experimental version
|
||||
* Currently only importing from libsvm format is supported
|
||||
***********
|
||||
Limitations
|
||||
***********
|
||||
* The ``hist`` tree method hasn't been tested thoroughly with external memory support (see
|
||||
`this issue <https://github.com/dmlc/xgboost/issues/4093>`_).
|
||||
* OSX is not tested.
|
||||
|
||||
- Contribution of ingestion from other common external memory data source is welcomed
|
||||
|
||||
@@ -18,6 +18,7 @@ See `Awesome XGBoost <https://github.com/dmlc/xgboost/tree/master/demo>`_ for mo
|
||||
monotonic
|
||||
rf
|
||||
feature_interaction_constraint
|
||||
aft_survival_analysis
|
||||
input_format
|
||||
param_tuning
|
||||
external_memory
|
||||
|
||||
@@ -1,36 +1,34 @@
|
||||
###################################
|
||||
Distributed XGBoost with Kubernetes
|
||||
Distributed XGBoost on Kubernetes
|
||||
###################################
|
||||
|
||||
Kubeflow community provides `XGBoost Operator <https://github.com/kubeflow/xgboost-operator>`_ to support distributed XGBoost training and batch prediction in a Kubernetes cluster. It provides an easy and efficient XGBoost model training and batch prediction in distributed fashion.
|
||||
Distributed XGBoost training and batch prediction on `Kubernetes <https://kubernetes.io/>`_ are supported via `Kubeflow XGBoost Operator <https://github.com/kubeflow/xgboost-operator>`_.
|
||||
|
||||
**********
|
||||
How to use
|
||||
**********
|
||||
In order to run a XGBoost job in a Kubernetes cluster, carry out the following steps:
|
||||
************
|
||||
Instructions
|
||||
************
|
||||
In order to run a XGBoost job in a Kubernetes cluster, perform the following steps:
|
||||
|
||||
1. Install XGBoost Operator in Kubernetes.
|
||||
1. Install XGBoost Operator on the Kubernetes cluster.
|
||||
|
||||
a. XGBoost Operator is designed to manage XGBoost jobs, including job scheduling, monitoring, pods and services recovery etc. Follow the `installation guide <https://github.com/kubeflow/xgboost-operator#installing-xgboost-operator>`_ to install XGBoost Operator.
|
||||
a. XGBoost Operator is designed to manage the scheduling and monitoring of XGBoost jobs. Follow `this installation guide <https://github.com/kubeflow/xgboost-operator#installing-xgboost-operator>`_ to install XGBoost Operator.
|
||||
|
||||
2. Write application code to interface with the XGBoost operator.
|
||||
2. Write application code that will be executed by the XGBoost Operator.
|
||||
|
||||
a. You'll need to furnish a few scripts to inteface with the XGBoost operator. Refer to the `Iris classification example <https://github.com/kubeflow/xgboost-operator/tree/master/config/samples/xgboost-dist>`_.
|
||||
b. Data reader/writer: you need to have your data source reader and writer based on the requirement. For example, if your data is stored in a Hive Table, you have to write your own code to read/write Hive table based on the ID of worker.
|
||||
c. Model persistence: in this example, model is stored in the OSS storage. If you want to store your model into Amazon S3, Google NFS or other storage, you'll need to specify the model reader and writer based on the requirement of storage system.
|
||||
a. To use XGBoost Operator, you'll have to write a couple of Python scripts that implement the distributed training logic for XGBoost. Please refer to the `Iris classification example <https://github.com/kubeflow/xgboost-operator/tree/master/config/samples/xgboost-dist>`_.
|
||||
b. Data reader/writer: you need to implement the data reader and writer based on the specific requirements of your chosen data source. For example, if your dataset is stored in a Hive table, you have to write the code to read from or write to the Hive table based on the index of the worker.
|
||||
c. Model persistence: in the `Iris classification example <https://github.com/kubeflow/xgboost-operator/tree/master/config/samples/xgboost-dist>`_, the model is stored in `Alibaba OSS <https://www.alibabacloud.com/product/oss>`_. If you want to store your model in other storages such as Amazon S3 or Google NFS, you'll need to implement the model persistence logic based on the requirements of the chosen storage system.
|
||||
|
||||
3. Configure the XGBoost job using a YAML file.
|
||||
|
||||
a. YAML file is used to configure the computation resource and environment for your XGBoost job to run, e.g. the number of workers and masters. The template `YAML template <https://github.com/kubeflow/xgboost-operator/blob/master/config/samples/xgboost-dist/xgboostjob_v1alpha1_iris_train.yaml>`_ is provided for reference.
|
||||
a. YAML file is used to configure the computational resources and environment for your XGBoost job to run, e.g. the number of workers/masters and the number of CPU/GPUs. Please refer to this `YAML template <https://github.com/kubeflow/xgboost-operator/blob/master/config/samples/xgboost-dist/xgboostjob_v1alpha1_iris_train.yaml>`_ for an example.
|
||||
|
||||
4. Submit XGBoost job to Kubernetes cluster.
|
||||
4. Submit XGBoost job to a Kubernetes cluster.
|
||||
|
||||
a. `Kubectl command <https://github.com/kubeflow/xgboost-operator#creating-a-xgboost-trainingprediction-job>`_ is used to submit a XGBoost job, and then you can monitor the job status.
|
||||
a. Use `kubectl <https://kubernetes.io/docs/reference/kubectl/overview/>`_ to submit a distributed XGBoost job as illustrated `here <https://github.com/kubeflow/xgboost-operator#creating-a-xgboost-trainingprediction-job>`_.
|
||||
|
||||
****************
|
||||
Work in progress
|
||||
****************
|
||||
*******
|
||||
Support
|
||||
*******
|
||||
|
||||
- XGBoost Model serving
|
||||
- Distributed data reader/writer from/to HDFS, HBase, Hive etc.
|
||||
- Model persistence on Amazon S3, Google NFS etc.
|
||||
Please submit an issue on `XGBoost Operator repo <https://github.com/kubeflow/xgboost-operator>`_ for any feature requests or problems.
|
||||
|
||||
@@ -102,7 +102,7 @@ comments in the script for more details.
|
||||
Saving and Loading the internal parameters configuration
|
||||
********************************************************
|
||||
|
||||
XGBoost's ``C API`` and ``Python API`` supports saving and loading the internal
|
||||
XGBoost's ``C API``, ``Python API`` and ``R API`` support saving and loading the internal
|
||||
configuration directly as a JSON string. In Python package:
|
||||
|
||||
.. code-block:: python
|
||||
@@ -111,6 +111,14 @@ configuration directly as a JSON string. In Python package:
|
||||
config = bst.save_config()
|
||||
print(config)
|
||||
|
||||
|
||||
or
|
||||
|
||||
.. code-block:: R
|
||||
|
||||
config <- xgb.config(bst)
|
||||
print(config)
|
||||
|
||||
Will print out something similiar to (not actual output as it's too long for demonstration):
|
||||
|
||||
.. code-block:: json
|
||||
@@ -195,7 +203,9 @@ You can load it back to the model generated by same version of XGBoost by:
|
||||
|
||||
bst.load_config(config)
|
||||
|
||||
This way users can study the internal representation more closely.
|
||||
This way users can study the internal representation more closely. Please note that some
|
||||
JSON generators make use of locale dependent floating point serialization methods, which
|
||||
is not supported by XGBoost.
|
||||
|
||||
************
|
||||
Future Plans
|
||||
|
||||
@@ -106,7 +106,7 @@ using bst_uint = uint32_t; // NOLINT
|
||||
/*! \brief integer type. */
|
||||
using bst_int = int32_t; // NOLINT
|
||||
/*! \brief unsigned long integers */
|
||||
using bst_ulong = uint64_t;
|
||||
using bst_ulong = uint64_t; // NOLINT
|
||||
/*! \brief float type, used for storing statistics */
|
||||
using bst_float = float; // NOLINT
|
||||
|
||||
@@ -135,15 +135,15 @@ class GradientPairInternal {
|
||||
/*! \brief second order gradient statistics */
|
||||
T hess_;
|
||||
|
||||
XGBOOST_DEVICE void SetGrad(float g) { grad_ = g; }
|
||||
XGBOOST_DEVICE void SetHess(float h) { hess_ = h; }
|
||||
XGBOOST_DEVICE void SetGrad(T g) { grad_ = g; }
|
||||
XGBOOST_DEVICE void SetHess(T h) { hess_ = h; }
|
||||
|
||||
public:
|
||||
using ValueT = T;
|
||||
|
||||
XGBOOST_DEVICE GradientPairInternal() : grad_(0), hess_(0) {}
|
||||
|
||||
XGBOOST_DEVICE GradientPairInternal(float grad, float hess) {
|
||||
XGBOOST_DEVICE GradientPairInternal(T grad, T hess) {
|
||||
SetGrad(grad);
|
||||
SetHess(hess);
|
||||
}
|
||||
@@ -160,8 +160,8 @@ class GradientPairInternal {
|
||||
SetHess(g.GetHess());
|
||||
}
|
||||
|
||||
XGBOOST_DEVICE float GetGrad() const { return grad_; }
|
||||
XGBOOST_DEVICE float GetHess() const { return hess_; }
|
||||
XGBOOST_DEVICE T GetGrad() const { return grad_; }
|
||||
XGBOOST_DEVICE T GetHess() const { return hess_; }
|
||||
|
||||
XGBOOST_DEVICE GradientPairInternal<T> &operator+=(
|
||||
const GradientPairInternal<T> &rhs) {
|
||||
@@ -193,6 +193,36 @@ class GradientPairInternal {
|
||||
return g;
|
||||
}
|
||||
|
||||
XGBOOST_DEVICE GradientPairInternal<T> &operator*=(float multiplier) {
|
||||
grad_ *= multiplier;
|
||||
hess_ *= multiplier;
|
||||
return *this;
|
||||
}
|
||||
|
||||
XGBOOST_DEVICE GradientPairInternal<T> operator*(float multiplier) const {
|
||||
GradientPairInternal<T> g;
|
||||
g.grad_ = grad_ * multiplier;
|
||||
g.hess_ = hess_ * multiplier;
|
||||
return g;
|
||||
}
|
||||
|
||||
XGBOOST_DEVICE GradientPairInternal<T> &operator/=(float divisor) {
|
||||
grad_ /= divisor;
|
||||
hess_ /= divisor;
|
||||
return *this;
|
||||
}
|
||||
|
||||
XGBOOST_DEVICE GradientPairInternal<T> operator/(float divisor) const {
|
||||
GradientPairInternal<T> g;
|
||||
g.grad_ = grad_ / divisor;
|
||||
g.hess_ = hess_ / divisor;
|
||||
return g;
|
||||
}
|
||||
|
||||
XGBOOST_DEVICE bool operator==(const GradientPairInternal<T> &rhs) const {
|
||||
return grad_ == rhs.grad_ && hess_ == rhs.hess_;
|
||||
}
|
||||
|
||||
XGBOOST_DEVICE explicit GradientPairInternal(int value) {
|
||||
*this = GradientPairInternal<T>(static_cast<float>(value),
|
||||
static_cast<float>(value));
|
||||
@@ -204,24 +234,6 @@ class GradientPairInternal {
|
||||
return os;
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
inline XGBOOST_DEVICE float GradientPairInternal<int64_t>::GetGrad() const {
|
||||
return grad_ * 1e-4f;
|
||||
}
|
||||
template<>
|
||||
inline XGBOOST_DEVICE float GradientPairInternal<int64_t>::GetHess() const {
|
||||
return hess_ * 1e-4f;
|
||||
}
|
||||
template<>
|
||||
inline XGBOOST_DEVICE void GradientPairInternal<int64_t>::SetGrad(float g) {
|
||||
grad_ = static_cast<int64_t>(std::round(g * 1e4));
|
||||
}
|
||||
template<>
|
||||
inline XGBOOST_DEVICE void GradientPairInternal<int64_t>::SetHess(float h) {
|
||||
hess_ = static_cast<int64_t>(std::round(h * 1e4));
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
|
||||
/*! \brief gradient statistics pair usually needed in gradient boosting */
|
||||
@@ -230,11 +242,6 @@ using GradientPair = detail::GradientPairInternal<float>;
|
||||
/*! \brief High precision gradient statistics pair */
|
||||
using GradientPairPrecise = detail::GradientPairInternal<double>;
|
||||
|
||||
/*! \brief High precision gradient statistics pair with integer backed
|
||||
* storage. Operators are associative where floating point versions are not
|
||||
* associative. */
|
||||
using GradientPairInteger = detail::GradientPairInternal<int64_t>;
|
||||
|
||||
using Args = std::vector<std::pair<std::string, std::string> >;
|
||||
|
||||
/*! \brief small eps gap for minimum split decision. */
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*!
|
||||
* Copyright (c) 2015 by Contributors
|
||||
* Copyright (c) 2015~2020 by Contributors
|
||||
* \file c_api.h
|
||||
* \author Tianqi Chen
|
||||
* \brief C API of XGBoost, used for interfacing to other languages.
|
||||
@@ -20,7 +20,7 @@
|
||||
#if defined(_MSC_VER) || defined(_WIN32)
|
||||
#define XGB_DLL XGB_EXTERN_C __declspec(dllexport)
|
||||
#else
|
||||
#define XGB_DLL XGB_EXTERN_C
|
||||
#define XGB_DLL XGB_EXTERN_C __attribute__ ((visibility ("default")))
|
||||
#endif // defined(_MSC_VER) || defined(_WIN32)
|
||||
|
||||
// manually define unsigned long
|
||||
@@ -40,6 +40,8 @@ typedef void *DataHolderHandle; // NOLINT(*)
|
||||
typedef struct { // NOLINT(*)
|
||||
/*! \brief number of rows in the minibatch */
|
||||
size_t size;
|
||||
/* \brief number of columns in the minibatch. */
|
||||
size_t columns;
|
||||
/*! \brief row pointer to the rows in the data */
|
||||
#ifdef __APPLE__
|
||||
/* Necessary as Java on MacOS defines jlong as long int
|
||||
@@ -416,7 +418,14 @@ XGB_DLL int XGBoosterEvalOneIter(BoosterHandle handle,
|
||||
* 4:output feature contributions to individual predictions
|
||||
* \param ntree_limit limit number of trees used for prediction, this is only valid for boosted trees
|
||||
* when the parameter is set to 0, we will use all the trees
|
||||
* \param training Whether the prediction value is used for training.
|
||||
* \param training Whether the prediction function is used as part of a training loop.
|
||||
* Prediction can be run in 2 scenarios:
|
||||
* 1. Given data matrix X, obtain prediction y_pred from the model.
|
||||
* 2. Obtain the prediction for computing gradients. For example, DART booster performs dropout
|
||||
* during training, and the prediction result will be different from the one obtained by normal
|
||||
* inference step due to dropped trees.
|
||||
* Set training=false for the first scenario. Set training=true for the second scenario.
|
||||
* The second scenario applies when you are defining a custom objective function.
|
||||
* \param out_len used to store length of returning result
|
||||
* \param out_result used to set a pointer to array
|
||||
* \return 0 when success, -1 when failure happens
|
||||
@@ -531,6 +540,7 @@ XGB_DLL int XGBoosterSaveRabitCheckpoint(BoosterHandle handle);
|
||||
* notice.
|
||||
*
|
||||
* \param handle handle to Booster object.
|
||||
* \param out_len length of output string
|
||||
* \param out_str A valid pointer to array of characters. The characters array is
|
||||
* allocated and managed by XGBoost, while pointer to that array needs to
|
||||
* be managed by caller.
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
|
||||
#include <dmlc/base.h>
|
||||
#include <dmlc/data.h>
|
||||
#include <dmlc/serializer.h>
|
||||
#include <rabit/rabit.h>
|
||||
#include <xgboost/base.h>
|
||||
#include <xgboost/span.h>
|
||||
@@ -39,32 +40,42 @@ enum class DataType : uint8_t {
|
||||
class MetaInfo {
|
||||
public:
|
||||
/*! \brief number of data fields in MetaInfo */
|
||||
static constexpr uint64_t kNumField = 7;
|
||||
static constexpr uint64_t kNumField = 9;
|
||||
|
||||
/*! \brief number of rows in the data */
|
||||
uint64_t num_row_{0};
|
||||
uint64_t num_row_{0}; // NOLINT
|
||||
/*! \brief number of columns in the data */
|
||||
uint64_t num_col_{0};
|
||||
uint64_t num_col_{0}; // NOLINT
|
||||
/*! \brief number of nonzero entries in the data */
|
||||
uint64_t num_nonzero_{0};
|
||||
uint64_t num_nonzero_{0}; // NOLINT
|
||||
/*! \brief label of each instance */
|
||||
HostDeviceVector<bst_float> labels_;
|
||||
HostDeviceVector<bst_float> labels_; // NOLINT
|
||||
/*!
|
||||
* \brief the index of begin and end of a group
|
||||
* needed when the learning task is ranking.
|
||||
*/
|
||||
std::vector<bst_group_t> group_ptr_;
|
||||
std::vector<bst_group_t> group_ptr_; // NOLINT
|
||||
/*! \brief weights of each instance, optional */
|
||||
HostDeviceVector<bst_float> weights_;
|
||||
HostDeviceVector<bst_float> weights_; // NOLINT
|
||||
/*!
|
||||
* \brief initialized margins,
|
||||
* if specified, xgboost will start from this init margin
|
||||
* can be used to specify initial prediction to boost from.
|
||||
*/
|
||||
HostDeviceVector<bst_float> base_margin_;
|
||||
HostDeviceVector<bst_float> base_margin_; // NOLINT
|
||||
/*!
|
||||
* \brief lower bound of the label, to be used for survival analysis (censored regression)
|
||||
*/
|
||||
HostDeviceVector<bst_float> labels_lower_bound_; // NOLINT
|
||||
/*!
|
||||
* \brief upper bound of the label, to be used for survival analysis (censored regression)
|
||||
*/
|
||||
HostDeviceVector<bst_float> labels_upper_bound_; // NOLINT
|
||||
|
||||
/*! \brief default constructor */
|
||||
MetaInfo() = default;
|
||||
MetaInfo(MetaInfo&& that) = default;
|
||||
MetaInfo& operator=(MetaInfo&& that) = default;
|
||||
MetaInfo& operator=(MetaInfo const& that) {
|
||||
this->num_row_ = that.num_row_;
|
||||
this->num_col_ = that.num_col_;
|
||||
@@ -77,10 +88,24 @@ class MetaInfo {
|
||||
|
||||
this->weights_.Resize(that.weights_.Size());
|
||||
this->weights_.Copy(that.weights_);
|
||||
|
||||
this->base_margin_.Resize(that.base_margin_.Size());
|
||||
this->base_margin_.Copy(that.base_margin_);
|
||||
|
||||
this->labels_lower_bound_.Resize(that.labels_lower_bound_.Size());
|
||||
this->labels_lower_bound_.Copy(that.labels_lower_bound_);
|
||||
|
||||
this->labels_upper_bound_.Resize(that.labels_upper_bound_.Size());
|
||||
this->labels_upper_bound_.Copy(that.labels_upper_bound_);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Validate all metainfo.
|
||||
*/
|
||||
void Validate(int32_t device) const;
|
||||
|
||||
MetaInfo Slice(common::Span<int32_t const> ridxs) const;
|
||||
/*!
|
||||
* \brief Get weight of each instances.
|
||||
* \param i Instance index.
|
||||
@@ -168,17 +193,15 @@ struct BatchParam {
|
||||
/*! \brief The GPU device to use. */
|
||||
int gpu_id;
|
||||
/*! \brief Maximum number of bins per feature for histograms. */
|
||||
int max_bin;
|
||||
/*! \brief Number of rows in a GPU batch, used for finding quantiles on GPU. */
|
||||
int gpu_batch_nrows;
|
||||
int max_bin{0};
|
||||
/*! \brief Page size for external memory mode. */
|
||||
size_t gpu_page_size;
|
||||
|
||||
BatchParam() = default;
|
||||
BatchParam(int32_t device, int32_t max_bin, size_t gpu_page_size = 0)
|
||||
: gpu_id{device}, max_bin{max_bin}, gpu_page_size{gpu_page_size} {}
|
||||
inline bool operator!=(const BatchParam& other) const {
|
||||
return gpu_id != other.gpu_id ||
|
||||
max_bin != other.max_bin ||
|
||||
gpu_batch_nrows != other.gpu_batch_nrows ||
|
||||
gpu_page_size != other.gpu_page_size;
|
||||
return gpu_id != other.gpu_id || max_bin != other.max_bin ||
|
||||
gpu_page_size != other.gpu_page_size;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -354,7 +377,7 @@ class BatchIteratorImpl {
|
||||
template<typename T>
|
||||
class BatchIterator {
|
||||
public:
|
||||
using iterator_category = std::forward_iterator_tag;
|
||||
using iterator_category = std::forward_iterator_tag; // NOLINT
|
||||
explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); }
|
||||
|
||||
void operator++() {
|
||||
@@ -389,9 +412,9 @@ class BatchIterator {
|
||||
template<typename T>
|
||||
class BatchSet {
|
||||
public:
|
||||
explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(begin_iter) {}
|
||||
BatchIterator<T> begin() { return begin_iter_; }
|
||||
BatchIterator<T> end() { return BatchIterator<T>(nullptr); }
|
||||
explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(std::move(begin_iter)) {}
|
||||
BatchIterator<T> begin() { return begin_iter_; } // NOLINT
|
||||
BatchIterator<T> end() { return BatchIterator<T>(nullptr); } // NOLINT
|
||||
|
||||
private:
|
||||
BatchIterator<T> begin_iter_;
|
||||
@@ -438,21 +461,14 @@ class DMatrix {
|
||||
*/
|
||||
template<typename T>
|
||||
BatchSet<T> GetBatches(const BatchParam& param = {});
|
||||
template <typename T>
|
||||
bool PageExists() const;
|
||||
|
||||
// the following are column meta data, should be able to answer them fast.
|
||||
/*! \return Whether the data columns single column block. */
|
||||
virtual bool SingleColBlock() const = 0;
|
||||
/*! \brief get column density */
|
||||
virtual float GetColDensity(size_t cidx) = 0;
|
||||
/*! \brief virtual destructor */
|
||||
virtual ~DMatrix() = default;
|
||||
/*!
|
||||
* \brief Save DMatrix to local file.
|
||||
* The saved file only works for non-sharded dataset(single machine training).
|
||||
* This API is deprecated and dis-encouraged to use.
|
||||
* \param fname The file name to be saved.
|
||||
* \return The created DMatrix.
|
||||
*/
|
||||
virtual void SaveToLocalFile(const std::string& fname);
|
||||
|
||||
/*! \brief Whether the matrix is dense. */
|
||||
bool IsDense() const {
|
||||
@@ -475,16 +491,6 @@ class DMatrix {
|
||||
const std::string& file_format = "auto",
|
||||
size_t page_size = kPageSize);
|
||||
|
||||
/*!
|
||||
* \brief create a new DMatrix, by wrapping a row_iterator, and meta info.
|
||||
* \param source The source iterator of the data, the create function takes ownership of the source.
|
||||
* \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode.
|
||||
* This can be nullptr for common cases, and in-memory mode will be used.
|
||||
* \return a Created DMatrix.
|
||||
*/
|
||||
static DMatrix* Create(std::unique_ptr<DataSource<SparsePage>>&& source,
|
||||
const std::string& cache_prefix = "");
|
||||
|
||||
/**
|
||||
* \brief Creates a new DMatrix from an external data adapter.
|
||||
*
|
||||
@@ -502,7 +508,7 @@ class DMatrix {
|
||||
const std::string& cache_prefix = "",
|
||||
size_t page_size = kPageSize);
|
||||
|
||||
|
||||
virtual DMatrix* Slice(common::Span<int32_t const> ridxs) = 0;
|
||||
/*! \brief page size 32 MB */
|
||||
static const size_t kPageSize = 32UL << 20UL;
|
||||
|
||||
@@ -511,6 +517,9 @@ class DMatrix {
|
||||
virtual BatchSet<CSCPage> GetColumnBatches() = 0;
|
||||
virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0;
|
||||
virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0;
|
||||
|
||||
virtual bool EllpackExists() const = 0;
|
||||
virtual bool SparsePageExists() const = 0;
|
||||
};
|
||||
|
||||
template<>
|
||||
@@ -518,6 +527,16 @@ inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) {
|
||||
return GetRowBatches();
|
||||
}
|
||||
|
||||
template<>
|
||||
inline bool DMatrix::PageExists<EllpackPage>() const {
|
||||
return this->EllpackExists();
|
||||
}
|
||||
|
||||
template<>
|
||||
inline bool DMatrix::PageExists<SparsePage>() const {
|
||||
return this->SparsePageExists();
|
||||
}
|
||||
|
||||
template<>
|
||||
inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) {
|
||||
return GetColumnBatches();
|
||||
@@ -536,5 +555,21 @@ inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) {
|
||||
|
||||
namespace dmlc {
|
||||
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
|
||||
}
|
||||
|
||||
namespace serializer {
|
||||
|
||||
template <>
|
||||
struct Handler<xgboost::Entry> {
|
||||
inline static void Write(Stream* strm, const xgboost::Entry& data) {
|
||||
strm->Write(data.index);
|
||||
strm->Write(data.fvalue);
|
||||
}
|
||||
|
||||
inline static bool Read(Stream* strm, xgboost::Entry* data) {
|
||||
return strm->Read(&data->index) && strm->Read(&data->fvalue);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace serializer
|
||||
} // namespace dmlc
|
||||
#endif // XGBOOST_DATA_H_
|
||||
|
||||
@@ -65,7 +65,7 @@ class FeatureMap {
|
||||
return names_[idx].c_str();
|
||||
}
|
||||
/*! \return type of specific feature */
|
||||
Type type(size_t idx) const {
|
||||
Type TypeOf(size_t idx) const {
|
||||
CHECK_LT(idx, names_.size()) << "FeatureMap feature index exceed bound";
|
||||
return types_[idx];
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*!
|
||||
* Copyright by Contributors
|
||||
* Copyright 2014-2020 by Contributors
|
||||
* \file gbm.h
|
||||
* \brief Interface of gradient booster,
|
||||
* that learns through gradient statistics.
|
||||
@@ -9,6 +9,7 @@
|
||||
#define XGBOOST_GBM_H_
|
||||
|
||||
#include <dmlc/registry.h>
|
||||
#include <dmlc/any.h>
|
||||
#include <xgboost/base.h>
|
||||
#include <xgboost/data.h>
|
||||
#include <xgboost/host_device_vector.h>
|
||||
@@ -18,6 +19,7 @@
|
||||
#include <utility>
|
||||
#include <string>
|
||||
#include <functional>
|
||||
#include <unordered_map>
|
||||
#include <memory>
|
||||
|
||||
namespace xgboost {
|
||||
@@ -28,6 +30,8 @@ class ObjFunction;
|
||||
|
||||
struct GenericParameter;
|
||||
struct LearnerModelParam;
|
||||
struct PredictionCacheEntry;
|
||||
class PredictionContainer;
|
||||
|
||||
/*!
|
||||
* \brief interface of gradient boosting model.
|
||||
@@ -38,7 +42,7 @@ class GradientBooster : public Model, public Configurable {
|
||||
|
||||
public:
|
||||
/*! \brief virtual destructor */
|
||||
virtual ~GradientBooster() = default;
|
||||
~GradientBooster() override = default;
|
||||
/*!
|
||||
* \brief Set the configuration of gradient boosting.
|
||||
* User must call configure once before InitModel and Training.
|
||||
@@ -68,24 +72,43 @@ class GradientBooster : public Model, public Configurable {
|
||||
* \brief perform update to the model(boosting)
|
||||
* \param p_fmat feature matrix that provide access to features
|
||||
* \param in_gpair address of the gradient pair statistics of the data
|
||||
* \param obj The objective function, optional, can be nullptr when use customized version
|
||||
* \param prediction The output prediction cache entry that needs to be updated.
|
||||
* the booster may change content of gpair
|
||||
*/
|
||||
virtual void DoBoost(DMatrix* p_fmat,
|
||||
HostDeviceVector<GradientPair>* in_gpair,
|
||||
ObjFunction* obj = nullptr) = 0;
|
||||
virtual void DoBoost(DMatrix* p_fmat, HostDeviceVector<GradientPair>* in_gpair,
|
||||
PredictionCacheEntry *prediction) = 0;
|
||||
|
||||
/*!
|
||||
* \brief generate predictions for given feature matrix
|
||||
* \param dmat feature matrix
|
||||
* \param out_preds output vector to hold the predictions
|
||||
* \param ntree_limit limit the number of trees used in prediction, when it equals 0, this means
|
||||
* we do not limit number of trees, this parameter is only valid for gbtree, but not for gblinear
|
||||
* \param training Whether the prediction value is used for training. For dart booster
|
||||
* drop out is performed during training.
|
||||
* \param ntree_limit limit the number of trees used in prediction,
|
||||
* when it equals 0, this means we do not limit
|
||||
* number of trees, this parameter is only valid
|
||||
* for gbtree, but not for gblinear
|
||||
*/
|
||||
virtual void PredictBatch(DMatrix* dmat,
|
||||
HostDeviceVector<bst_float>* out_preds,
|
||||
PredictionCacheEntry* out_preds,
|
||||
bool training,
|
||||
unsigned ntree_limit = 0) = 0;
|
||||
|
||||
/*!
|
||||
* \brief Inplace prediction.
|
||||
*
|
||||
* \param x A type erased data adapter.
|
||||
* \param missing Missing value in the data.
|
||||
* \param [in,out] out_preds The output preds.
|
||||
* \param layer_begin (Optional) Begining of boosted tree layer used for prediction.
|
||||
* \param layer_end (Optional) End of booster layer. 0 means do not limit trees.
|
||||
*/
|
||||
virtual void InplacePredict(dmlc::any const &x, float missing,
|
||||
PredictionCacheEntry *out_preds,
|
||||
uint32_t layer_begin = 0,
|
||||
uint32_t layer_end = 0) const {
|
||||
LOG(FATAL) << "Inplace predict is not supported by current booster.";
|
||||
}
|
||||
/*!
|
||||
* \brief online prediction function, predict score for one instance at a time
|
||||
* NOTE: use the batch prediction interface if possible, batch prediction is usually
|
||||
@@ -152,20 +175,12 @@ class GradientBooster : public Model, public Configurable {
|
||||
* \param name name of gradient booster
|
||||
* \param generic_param Pointer to runtime parameters
|
||||
* \param learner_model_param pointer to global model parameters
|
||||
* \param cache_mats The cache data matrix of the Booster.
|
||||
* \return The created booster.
|
||||
*/
|
||||
static GradientBooster* Create(
|
||||
const std::string& name,
|
||||
GenericParameter const* generic_param,
|
||||
LearnerModelParam const* learner_model_param,
|
||||
const std::vector<std::shared_ptr<DMatrix> >& cache_mats);
|
||||
|
||||
static void AssertGPUSupport() {
|
||||
#ifndef XGBOOST_USE_CUDA
|
||||
LOG(FATAL) << "XGBoost version not compiled with GPU support.";
|
||||
#endif // XGBOOST_USE_CUDA
|
||||
}
|
||||
LearnerModelParam const* learner_model_param);
|
||||
};
|
||||
|
||||
/*!
|
||||
@@ -174,8 +189,7 @@ class GradientBooster : public Model, public Configurable {
|
||||
struct GradientBoosterReg
|
||||
: public dmlc::FunctionRegEntryBase<
|
||||
GradientBoosterReg,
|
||||
std::function<GradientBooster* (const std::vector<std::shared_ptr<DMatrix> > &cached_mats,
|
||||
LearnerModelParam const* learner_model_param)> > {
|
||||
std::function<GradientBooster* (LearnerModelParam const* learner_model_param)> > {
|
||||
};
|
||||
|
||||
/*!
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*!
|
||||
* Copyright 2014-2019 by Contributors
|
||||
* \file learner.cc
|
||||
* \file generic_parameters.h
|
||||
*/
|
||||
#ifndef XGBOOST_GENERIC_PARAMETERS_H_
|
||||
#define XGBOOST_GENERIC_PARAMETERS_H_
|
||||
@@ -29,7 +29,6 @@ struct GenericParameter : public XGBoostParameter<GenericParameter> {
|
||||
size_t gpu_page_size;
|
||||
bool enable_experimental_json_serialization {false};
|
||||
bool validate_parameters {false};
|
||||
bool validate_features {true};
|
||||
|
||||
void CheckDeprecated() {
|
||||
if (this->n_gpus != 0) {
|
||||
@@ -75,9 +74,6 @@ struct GenericParameter : public XGBoostParameter<GenericParameter> {
|
||||
DMLC_DECLARE_FIELD(validate_parameters)
|
||||
.set_default(false)
|
||||
.describe("Enable checking whether parameters are used or not.");
|
||||
DMLC_DECLARE_FIELD(validate_features)
|
||||
.set_default(false)
|
||||
.describe("Enable validating input DMatrix.");
|
||||
DMLC_DECLARE_FIELD(n_gpus)
|
||||
.set_default(0)
|
||||
.set_range(0, 1)
|
||||
@@ -89,7 +85,7 @@ struct GenericParameter : public XGBoostParameter<GenericParameter> {
|
||||
|
||||
private:
|
||||
// number of devices to use (deprecated).
|
||||
int n_gpus {0};
|
||||
int n_gpus {0}; // NOLINT
|
||||
};
|
||||
} // namespace xgboost
|
||||
|
||||
|
||||
@@ -105,6 +105,9 @@ class HostDeviceVector {
|
||||
const T* DevicePointer() const { return ConstDevicePointer(); }
|
||||
|
||||
T* HostPointer() { return HostVector().data(); }
|
||||
common::Span<T> HostSpan() { return common::Span<T>{HostVector()}; }
|
||||
common::Span<T const> HostSpan() const { return common::Span<T const>{HostVector()}; }
|
||||
common::Span<T const> ConstHostSpan() const { return HostSpan(); }
|
||||
const T* ConstHostPointer() const { return ConstHostVector().data(); }
|
||||
const T* HostPointer() const { return ConstHostPointer(); }
|
||||
|
||||
@@ -127,7 +130,7 @@ class HostDeviceVector {
|
||||
|
||||
void Resize(size_t new_size, T v = T());
|
||||
|
||||
using value_type = T;
|
||||
using value_type = T; // NOLINT
|
||||
|
||||
private:
|
||||
HostDeviceVectorImpl<T>* impl_;
|
||||
|
||||
@@ -24,13 +24,13 @@ class Value {
|
||||
public:
|
||||
/*!\brief Simplified implementation of LLVM RTTI. */
|
||||
enum class ValueKind {
|
||||
String,
|
||||
Number,
|
||||
Integer,
|
||||
Object, // std::map
|
||||
Array, // std::vector
|
||||
Boolean,
|
||||
Null
|
||||
kString,
|
||||
kNumber,
|
||||
kInteger,
|
||||
kObject, // std::map
|
||||
kArray, // std::vector
|
||||
kBoolean,
|
||||
kNull
|
||||
};
|
||||
|
||||
explicit Value(ValueKind _kind) : kind_{_kind} {}
|
||||
@@ -54,7 +54,7 @@ class Value {
|
||||
|
||||
template <typename T>
|
||||
bool IsA(Value const* value) {
|
||||
return T::isClassOf(value);
|
||||
return T::IsClassOf(value);
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
@@ -70,26 +70,26 @@ T* Cast(U* value) {
|
||||
class JsonString : public Value {
|
||||
std::string str_;
|
||||
public:
|
||||
JsonString() : Value(ValueKind::String) {}
|
||||
JsonString() : Value(ValueKind::kString) {}
|
||||
JsonString(std::string const& str) : // NOLINT
|
||||
Value(ValueKind::String), str_{str} {}
|
||||
Value(ValueKind::kString), str_{str} {}
|
||||
JsonString(std::string&& str) : // NOLINT
|
||||
Value(ValueKind::String), str_{std::move(str)} {}
|
||||
Value(ValueKind::kString), str_{std::move(str)} {}
|
||||
|
||||
void Save(JsonWriter* writer) override;
|
||||
|
||||
Json& operator[](std::string const & key) override;
|
||||
Json& operator[](int ind) override;
|
||||
|
||||
std::string const& getString() && { return str_; }
|
||||
std::string const& getString() const & { return str_; }
|
||||
std::string& getString() & { return str_; }
|
||||
std::string const& GetString() && { return str_; }
|
||||
std::string const& GetString() const & { return str_; }
|
||||
std::string& GetString() & { return str_; }
|
||||
|
||||
bool operator==(Value const& rhs) const override;
|
||||
Value& operator=(Value const& rhs) override;
|
||||
|
||||
static bool isClassOf(Value const* value) {
|
||||
return value->Type() == ValueKind::String;
|
||||
static bool IsClassOf(Value const* value) {
|
||||
return value->Type() == ValueKind::kString;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -97,11 +97,11 @@ class JsonArray : public Value {
|
||||
std::vector<Json> vec_;
|
||||
|
||||
public:
|
||||
JsonArray() : Value(ValueKind::Array) {}
|
||||
JsonArray() : Value(ValueKind::kArray) {}
|
||||
JsonArray(std::vector<Json>&& arr) : // NOLINT
|
||||
Value(ValueKind::Array), vec_{std::move(arr)} {}
|
||||
Value(ValueKind::kArray), vec_{std::move(arr)} {}
|
||||
JsonArray(std::vector<Json> const& arr) : // NOLINT
|
||||
Value(ValueKind::Array), vec_{arr} {}
|
||||
Value(ValueKind::kArray), vec_{arr} {}
|
||||
JsonArray(JsonArray const& that) = delete;
|
||||
JsonArray(JsonArray && that);
|
||||
|
||||
@@ -110,15 +110,15 @@ class JsonArray : public Value {
|
||||
Json& operator[](std::string const & key) override;
|
||||
Json& operator[](int ind) override;
|
||||
|
||||
std::vector<Json> const& getArray() && { return vec_; }
|
||||
std::vector<Json> const& getArray() const & { return vec_; }
|
||||
std::vector<Json>& getArray() & { return vec_; }
|
||||
std::vector<Json> const& GetArray() && { return vec_; }
|
||||
std::vector<Json> const& GetArray() const & { return vec_; }
|
||||
std::vector<Json>& GetArray() & { return vec_; }
|
||||
|
||||
bool operator==(Value const& rhs) const override;
|
||||
Value& operator=(Value const& rhs) override;
|
||||
|
||||
static bool isClassOf(Value const* value) {
|
||||
return value->Type() == ValueKind::Array;
|
||||
static bool IsClassOf(Value const* value) {
|
||||
return value->Type() == ValueKind::kArray;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -126,7 +126,7 @@ class JsonObject : public Value {
|
||||
std::map<std::string, Json> object_;
|
||||
|
||||
public:
|
||||
JsonObject() : Value(ValueKind::Object) {}
|
||||
JsonObject() : Value(ValueKind::kObject) {}
|
||||
JsonObject(std::map<std::string, Json>&& object); // NOLINT
|
||||
JsonObject(JsonObject const& that) = delete;
|
||||
JsonObject(JsonObject && that);
|
||||
@@ -136,17 +136,17 @@ class JsonObject : public Value {
|
||||
Json& operator[](std::string const & key) override;
|
||||
Json& operator[](int ind) override;
|
||||
|
||||
std::map<std::string, Json> const& getObject() && { return object_; }
|
||||
std::map<std::string, Json> const& getObject() const & { return object_; }
|
||||
std::map<std::string, Json> & getObject() & { return object_; }
|
||||
std::map<std::string, Json> const& GetObject() && { return object_; }
|
||||
std::map<std::string, Json> const& GetObject() const & { return object_; }
|
||||
std::map<std::string, Json> & GetObject() & { return object_; }
|
||||
|
||||
bool operator==(Value const& rhs) const override;
|
||||
Value& operator=(Value const& rhs) override;
|
||||
|
||||
static bool isClassOf(Value const* value) {
|
||||
return value->Type() == ValueKind::Object;
|
||||
static bool IsClassOf(Value const* value) {
|
||||
return value->Type() == ValueKind::kObject;
|
||||
}
|
||||
virtual ~JsonObject() = default;
|
||||
~JsonObject() override = default;
|
||||
};
|
||||
|
||||
class JsonNumber : public Value {
|
||||
@@ -154,18 +154,18 @@ class JsonNumber : public Value {
|
||||
using Float = float;
|
||||
|
||||
private:
|
||||
Float number_;
|
||||
Float number_ { 0 };
|
||||
|
||||
public:
|
||||
JsonNumber() : Value(ValueKind::Number) {}
|
||||
JsonNumber() : Value(ValueKind::kNumber) {}
|
||||
template <typename FloatT,
|
||||
typename std::enable_if<std::is_same<FloatT, Float>::value>::type* = nullptr>
|
||||
JsonNumber(FloatT value) : Value(ValueKind::Number) { // NOLINT
|
||||
JsonNumber(FloatT value) : Value(ValueKind::kNumber) { // NOLINT
|
||||
number_ = value;
|
||||
}
|
||||
template <typename FloatT,
|
||||
typename std::enable_if<std::is_same<FloatT, double>::value>::type* = nullptr>
|
||||
JsonNumber(FloatT value) : Value{ValueKind::Number}, // NOLINT
|
||||
JsonNumber(FloatT value) : Value{ValueKind::kNumber}, // NOLINT
|
||||
number_{static_cast<Float>(value)} {}
|
||||
|
||||
void Save(JsonWriter* writer) override;
|
||||
@@ -173,16 +173,16 @@ class JsonNumber : public Value {
|
||||
Json& operator[](std::string const & key) override;
|
||||
Json& operator[](int ind) override;
|
||||
|
||||
Float const& getNumber() && { return number_; }
|
||||
Float const& getNumber() const & { return number_; }
|
||||
Float& getNumber() & { return number_; }
|
||||
Float const& GetNumber() && { return number_; }
|
||||
Float const& GetNumber() const & { return number_; }
|
||||
Float& GetNumber() & { return number_; }
|
||||
|
||||
|
||||
bool operator==(Value const& rhs) const override;
|
||||
Value& operator=(Value const& rhs) override;
|
||||
|
||||
static bool isClassOf(Value const* value) {
|
||||
return value->Type() == ValueKind::Number;
|
||||
static bool IsClassOf(Value const* value) {
|
||||
return value->Type() == ValueKind::kNumber;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -191,27 +191,27 @@ class JsonInteger : public Value {
|
||||
using Int = int64_t;
|
||||
|
||||
private:
|
||||
Int integer_;
|
||||
Int integer_ {0};
|
||||
|
||||
public:
|
||||
JsonInteger() : Value(ValueKind::Integer), integer_{0} {} // NOLINT
|
||||
JsonInteger() : Value(ValueKind::kInteger) {} // NOLINT
|
||||
template <typename IntT,
|
||||
typename std::enable_if<std::is_same<IntT, Int>::value>::type* = nullptr>
|
||||
JsonInteger(IntT value) : Value(ValueKind::Integer), integer_{value} {} // NOLINT
|
||||
JsonInteger(IntT value) : Value(ValueKind::kInteger), integer_{value} {} // NOLINT
|
||||
template <typename IntT,
|
||||
typename std::enable_if<std::is_same<IntT, size_t>::value>::type* = nullptr>
|
||||
JsonInteger(IntT value) : Value(ValueKind::Integer), // NOLINT
|
||||
JsonInteger(IntT value) : Value(ValueKind::kInteger), // NOLINT
|
||||
integer_{static_cast<Int>(value)} {}
|
||||
template <typename IntT,
|
||||
typename std::enable_if<std::is_same<IntT, int32_t>::value>::type* = nullptr>
|
||||
JsonInteger(IntT value) : Value(ValueKind::Integer), // NOLINT
|
||||
JsonInteger(IntT value) : Value(ValueKind::kInteger), // NOLINT
|
||||
integer_{static_cast<Int>(value)} {}
|
||||
template <typename IntT,
|
||||
typename std::enable_if<
|
||||
std::is_same<IntT, uint32_t>::value &&
|
||||
!std::is_same<std::size_t, uint32_t>::value>::type * = nullptr>
|
||||
JsonInteger(IntT value) // NOLINT
|
||||
: Value(ValueKind::Integer),
|
||||
: Value(ValueKind::kInteger),
|
||||
integer_{static_cast<Int>(value)} {}
|
||||
|
||||
Json& operator[](std::string const & key) override;
|
||||
@@ -220,20 +220,20 @@ class JsonInteger : public Value {
|
||||
bool operator==(Value const& rhs) const override;
|
||||
Value& operator=(Value const& rhs) override;
|
||||
|
||||
Int const& getInteger() && { return integer_; }
|
||||
Int const& getInteger() const & { return integer_; }
|
||||
Int& getInteger() & { return integer_; }
|
||||
Int const& GetInteger() && { return integer_; }
|
||||
Int const& GetInteger() const & { return integer_; }
|
||||
Int& GetInteger() & { return integer_; }
|
||||
void Save(JsonWriter* writer) override;
|
||||
|
||||
static bool isClassOf(Value const* value) {
|
||||
return value->Type() == ValueKind::Integer;
|
||||
static bool IsClassOf(Value const* value) {
|
||||
return value->Type() == ValueKind::kInteger;
|
||||
}
|
||||
};
|
||||
|
||||
class JsonNull : public Value {
|
||||
public:
|
||||
JsonNull() : Value(ValueKind::Null) {}
|
||||
JsonNull(std::nullptr_t) : Value(ValueKind::Null) {} // NOLINT
|
||||
JsonNull() : Value(ValueKind::kNull) {}
|
||||
JsonNull(std::nullptr_t) : Value(ValueKind::kNull) {} // NOLINT
|
||||
|
||||
void Save(JsonWriter* writer) override;
|
||||
|
||||
@@ -243,8 +243,8 @@ class JsonNull : public Value {
|
||||
bool operator==(Value const& rhs) const override;
|
||||
Value& operator=(Value const& rhs) override;
|
||||
|
||||
static bool isClassOf(Value const* value) {
|
||||
return value->Type() == ValueKind::Null;
|
||||
static bool IsClassOf(Value const* value) {
|
||||
return value->Type() == ValueKind::kNull;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -253,33 +253,34 @@ class JsonBoolean : public Value {
|
||||
bool boolean_;
|
||||
|
||||
public:
|
||||
JsonBoolean() : Value(ValueKind::Boolean) {} // NOLINT
|
||||
JsonBoolean() : Value(ValueKind::kBoolean) {} // NOLINT
|
||||
// Ambigious with JsonNumber.
|
||||
template <typename Bool,
|
||||
typename std::enable_if<
|
||||
std::is_same<Bool, bool>::value ||
|
||||
std::is_same<Bool, bool const>::value>::type* = nullptr>
|
||||
JsonBoolean(Bool value) : // NOLINT
|
||||
Value(ValueKind::Boolean), boolean_{value} {}
|
||||
Value(ValueKind::kBoolean), boolean_{value} {}
|
||||
|
||||
void Save(JsonWriter* writer) override;
|
||||
|
||||
Json& operator[](std::string const & key) override;
|
||||
Json& operator[](int ind) override;
|
||||
|
||||
bool const& getBoolean() && { return boolean_; }
|
||||
bool const& getBoolean() const & { return boolean_; }
|
||||
bool& getBoolean() & { return boolean_; }
|
||||
bool const& GetBoolean() && { return boolean_; }
|
||||
bool const& GetBoolean() const & { return boolean_; }
|
||||
bool& GetBoolean() & { return boolean_; }
|
||||
|
||||
bool operator==(Value const& rhs) const override;
|
||||
Value& operator=(Value const& rhs) override;
|
||||
|
||||
static bool isClassOf(Value const* value) {
|
||||
return value->Type() == ValueKind::Boolean;
|
||||
static bool IsClassOf(Value const* value) {
|
||||
return value->Type() == ValueKind::kBoolean;
|
||||
}
|
||||
};
|
||||
|
||||
struct StringView {
|
||||
private:
|
||||
using CharT = char; // unsigned char
|
||||
CharT const* str_;
|
||||
size_t size_;
|
||||
@@ -392,7 +393,7 @@ class Json {
|
||||
}
|
||||
|
||||
// copy
|
||||
Json(Json const& other) : ptr_{other.ptr_} {}
|
||||
Json(Json const& other) = default;
|
||||
Json& operator=(Json const& other);
|
||||
// move
|
||||
Json(Json&& other) : ptr_{std::move(other.ptr_)} {}
|
||||
@@ -406,7 +407,7 @@ class Json {
|
||||
/*! \brief Index Json object with int, used for Json Array. */
|
||||
Json& operator[](int ind) const { return (*ptr_)[ind]; }
|
||||
|
||||
/*! \Brief Return the reference to stored Json value. */
|
||||
/*! \brief Return the reference to stored Json value. */
|
||||
Value const& GetValue() const & { return *ptr_; }
|
||||
Value const& GetValue() && { return *ptr_; }
|
||||
Value& GetValue() & { return *ptr_; }
|
||||
@@ -439,13 +440,13 @@ template <typename T,
|
||||
typename std::enable_if<
|
||||
std::is_same<T, JsonNumber>::value>::type* = nullptr>
|
||||
JsonNumber::Float& GetImpl(T& val) { // NOLINT
|
||||
return val.getNumber();
|
||||
return val.GetNumber();
|
||||
}
|
||||
template <typename T,
|
||||
typename std::enable_if<
|
||||
std::is_same<T, JsonNumber const>::value>::type* = nullptr>
|
||||
JsonNumber::Float const& GetImpl(T& val) { // NOLINT
|
||||
return val.getNumber();
|
||||
return val.GetNumber();
|
||||
}
|
||||
|
||||
// Integer
|
||||
@@ -453,13 +454,13 @@ template <typename T,
|
||||
typename std::enable_if<
|
||||
std::is_same<T, JsonInteger>::value>::type* = nullptr>
|
||||
JsonInteger::Int& GetImpl(T& val) { // NOLINT
|
||||
return val.getInteger();
|
||||
return val.GetInteger();
|
||||
}
|
||||
template <typename T,
|
||||
typename std::enable_if<
|
||||
std::is_same<T, JsonInteger const>::value>::type* = nullptr>
|
||||
JsonInteger::Int const& GetImpl(T& val) { // NOLINT
|
||||
return val.getInteger();
|
||||
return val.GetInteger();
|
||||
}
|
||||
|
||||
// String
|
||||
@@ -467,13 +468,13 @@ template <typename T,
|
||||
typename std::enable_if<
|
||||
std::is_same<T, JsonString>::value>::type* = nullptr>
|
||||
std::string& GetImpl(T& val) { // NOLINT
|
||||
return val.getString();
|
||||
return val.GetString();
|
||||
}
|
||||
template <typename T,
|
||||
typename std::enable_if<
|
||||
std::is_same<T, JsonString const>::value>::type* = nullptr>
|
||||
std::string const& GetImpl(T& val) { // NOLINT
|
||||
return val.getString();
|
||||
return val.GetString();
|
||||
}
|
||||
|
||||
// Boolean
|
||||
@@ -481,13 +482,13 @@ template <typename T,
|
||||
typename std::enable_if<
|
||||
std::is_same<T, JsonBoolean>::value>::type* = nullptr>
|
||||
bool& GetImpl(T& val) { // NOLINT
|
||||
return val.getBoolean();
|
||||
return val.GetBoolean();
|
||||
}
|
||||
template <typename T,
|
||||
typename std::enable_if<
|
||||
std::is_same<T, JsonBoolean const>::value>::type* = nullptr>
|
||||
bool const& GetImpl(T& val) { // NOLINT
|
||||
return val.getBoolean();
|
||||
return val.GetBoolean();
|
||||
}
|
||||
|
||||
// Array
|
||||
@@ -495,13 +496,13 @@ template <typename T,
|
||||
typename std::enable_if<
|
||||
std::is_same<T, JsonArray>::value>::type* = nullptr>
|
||||
std::vector<Json>& GetImpl(T& val) { // NOLINT
|
||||
return val.getArray();
|
||||
return val.GetArray();
|
||||
}
|
||||
template <typename T,
|
||||
typename std::enable_if<
|
||||
std::is_same<T, JsonArray const>::value>::type* = nullptr>
|
||||
std::vector<Json> const& GetImpl(T& val) { // NOLINT
|
||||
return val.getArray();
|
||||
return val.GetArray();
|
||||
}
|
||||
|
||||
// Object
|
||||
@@ -509,13 +510,13 @@ template <typename T,
|
||||
typename std::enable_if<
|
||||
std::is_same<T, JsonObject>::value>::type* = nullptr>
|
||||
std::map<std::string, Json>& GetImpl(T& val) { // NOLINT
|
||||
return val.getObject();
|
||||
return val.GetObject();
|
||||
}
|
||||
template <typename T,
|
||||
typename std::enable_if<
|
||||
std::is_same<T, JsonObject const>::value>::type* = nullptr>
|
||||
std::map<std::string, Json> const& GetImpl(T& val) { // NOLINT
|
||||
return val.getObject();
|
||||
return val.GetObject();
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
@@ -545,7 +546,7 @@ using Null = JsonNull;
|
||||
// Utils tailored for XGBoost.
|
||||
|
||||
template <typename Parameter>
|
||||
Object toJson(Parameter const& param) {
|
||||
Object ToJson(Parameter const& param) {
|
||||
Object obj;
|
||||
for (auto const& kv : param.__DICT__()) {
|
||||
obj[kv.first] = kv.second;
|
||||
@@ -554,7 +555,7 @@ Object toJson(Parameter const& param) {
|
||||
}
|
||||
|
||||
template <typename Parameter>
|
||||
void fromJson(Json const& obj, Parameter* param) {
|
||||
void FromJson(Json const& obj, Parameter* param) {
|
||||
auto const& j_param = get<Object const>(obj);
|
||||
std::map<std::string, std::string> m;
|
||||
for (auto const& kv : j_param) {
|
||||
|
||||
@@ -38,10 +38,11 @@ class JsonReader {
|
||||
std::numeric_limits<double>::max_digits10 + 1;
|
||||
|
||||
struct SourceLocation {
|
||||
size_t pos_; // current position in raw_str_
|
||||
private:
|
||||
size_t pos_ { 0 }; // current position in raw_str_
|
||||
|
||||
public:
|
||||
SourceLocation() : pos_(0) {}
|
||||
SourceLocation() = default;
|
||||
size_t Pos() const { return pos_; }
|
||||
|
||||
SourceLocation& Forward() {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*!
|
||||
* Copyright 2015-2019 by Contributors
|
||||
* Copyright 2015-2020 by Contributors
|
||||
* \file learner.h
|
||||
* \brief Learner interface that integrates objective, gbm and evaluation together.
|
||||
* This is the user facing XGBoost training module.
|
||||
@@ -8,9 +8,11 @@
|
||||
#ifndef XGBOOST_LEARNER_H_
|
||||
#define XGBOOST_LEARNER_H_
|
||||
|
||||
#include <dmlc/any.h>
|
||||
#include <rabit/rabit.h>
|
||||
#include <xgboost/base.h>
|
||||
#include <xgboost/feature_map.h>
|
||||
#include <xgboost/predictor.h>
|
||||
#include <xgboost/generic_parameters.h>
|
||||
#include <xgboost/host_device_vector.h>
|
||||
#include <xgboost/model.h>
|
||||
@@ -29,6 +31,22 @@ class ObjFunction;
|
||||
class DMatrix;
|
||||
class Json;
|
||||
|
||||
/*! \brief entry to to easily hold returning information */
|
||||
struct XGBAPIThreadLocalEntry {
|
||||
/*! \brief result holder for returning string */
|
||||
std::string ret_str;
|
||||
/*! \brief result holder for returning strings */
|
||||
std::vector<std::string> ret_vec_str;
|
||||
/*! \brief result holder for returning string pointers */
|
||||
std::vector<const char *> ret_vec_charp;
|
||||
/*! \brief returning float vector. */
|
||||
std::vector<bst_float> ret_vec_float;
|
||||
/*! \brief temp variable of gradient pairs. */
|
||||
std::vector<GradientPair> tmp_gpair;
|
||||
PredictionCacheEntry prediction_entry;
|
||||
};
|
||||
|
||||
|
||||
/*!
|
||||
* \brief Learner class that does training and prediction.
|
||||
* This is the user facing module of xgboost training.
|
||||
@@ -59,7 +77,7 @@ class Learner : public Model, public Configurable, public rabit::Serializable {
|
||||
* \param iter current iteration number
|
||||
* \param train reference to the data matrix.
|
||||
*/
|
||||
virtual void UpdateOneIter(int iter, DMatrix* train) = 0;
|
||||
virtual void UpdateOneIter(int iter, std::shared_ptr<DMatrix> train) = 0;
|
||||
/*!
|
||||
* \brief Do customized gradient boosting with in_gpair.
|
||||
* in_gair can be mutated after this call.
|
||||
@@ -68,7 +86,7 @@ class Learner : public Model, public Configurable, public rabit::Serializable {
|
||||
* \param in_gpair The input gradient statistics.
|
||||
*/
|
||||
virtual void BoostOneIter(int iter,
|
||||
DMatrix* train,
|
||||
std::shared_ptr<DMatrix> train,
|
||||
HostDeviceVector<GradientPair>* in_gpair) = 0;
|
||||
/*!
|
||||
* \brief evaluate the model for specific iteration using the configured metrics.
|
||||
@@ -78,7 +96,7 @@ class Learner : public Model, public Configurable, public rabit::Serializable {
|
||||
* \return a string corresponding to the evaluation result
|
||||
*/
|
||||
virtual std::string EvalOneIter(int iter,
|
||||
const std::vector<DMatrix*>& data_sets,
|
||||
const std::vector<std::shared_ptr<DMatrix>>& data_sets,
|
||||
const std::vector<std::string>& data_names) = 0;
|
||||
/*!
|
||||
* \brief get prediction given the model.
|
||||
@@ -87,12 +105,13 @@ class Learner : public Model, public Configurable, public rabit::Serializable {
|
||||
* \param out_preds output vector that stores the prediction
|
||||
* \param ntree_limit limit number of trees used for boosted tree
|
||||
* predictor, when it equals 0, this means we are using all the trees
|
||||
* \param training Whether the prediction result is used for training
|
||||
* \param pred_leaf whether to only predict the leaf index of each tree in a boosted tree predictor
|
||||
* \param pred_contribs whether to only predict the feature contributions
|
||||
* \param approx_contribs whether to approximate the feature contributions for speed
|
||||
* \param pred_interactions whether to compute the feature pair contributions
|
||||
*/
|
||||
virtual void Predict(DMatrix* data,
|
||||
virtual void Predict(std::shared_ptr<DMatrix> data,
|
||||
bool output_margin,
|
||||
HostDeviceVector<bst_float> *out_preds,
|
||||
unsigned ntree_limit = 0,
|
||||
@@ -102,6 +121,21 @@ class Learner : public Model, public Configurable, public rabit::Serializable {
|
||||
bool approx_contribs = false,
|
||||
bool pred_interactions = false) = 0;
|
||||
|
||||
/*!
|
||||
* \brief Inplace prediction.
|
||||
*
|
||||
* \param x A type erased data adapter.
|
||||
* \param type Prediction type.
|
||||
* \param missing Missing value in the data.
|
||||
* \param [in,out] out_preds Pointer to output prediction vector.
|
||||
* \param layer_begin (Optional) Begining of boosted tree layer used for prediction.
|
||||
* \param layer_end (Optional) End of booster layer. 0 means do not limit trees.
|
||||
*/
|
||||
virtual void InplacePredict(dmlc::any const& x, std::string const& type,
|
||||
float missing,
|
||||
HostDeviceVector<bst_float> **out_preds,
|
||||
uint32_t layer_begin = 0, uint32_t layer_end = 0) = 0;
|
||||
|
||||
void LoadModel(Json const& in) override = 0;
|
||||
void SaveModel(Json* out) const override = 0;
|
||||
|
||||
@@ -165,7 +199,9 @@ class Learner : public Model, public Configurable, public rabit::Serializable {
|
||||
*/
|
||||
virtual std::vector<std::string> DumpModel(const FeatureMap& fmap,
|
||||
bool with_stats,
|
||||
std::string format) const = 0;
|
||||
std::string format) = 0;
|
||||
|
||||
virtual XGBAPIThreadLocalEntry& GetThreadLocal() const = 0;
|
||||
/*!
|
||||
* \brief Create a new instance of learner.
|
||||
* \param cache_data The matrix to cache the prediction.
|
||||
@@ -198,13 +234,13 @@ struct LearnerModelParamLegacy;
|
||||
*/
|
||||
struct LearnerModelParam {
|
||||
/* \brief global bias */
|
||||
bst_float base_score;
|
||||
bst_float base_score { 0.5f };
|
||||
/* \brief number of features */
|
||||
uint32_t num_feature;
|
||||
uint32_t num_feature { 0 };
|
||||
/* \brief number of classes, if it is multi-class classification */
|
||||
uint32_t num_output_group;
|
||||
uint32_t num_output_group { 0 };
|
||||
|
||||
LearnerModelParam() : base_score {0.5}, num_feature{0}, num_output_group{0} {}
|
||||
LearnerModelParam() = default;
|
||||
// As the old `LearnerModelParamLegacy` is still used by binary IO, we keep
|
||||
// this one as an immutable copy.
|
||||
LearnerModelParam(LearnerModelParamLegacy const& user_param, float base_margin);
|
||||
|
||||
@@ -33,7 +33,7 @@ class LinearUpdater : public Configurable {
|
||||
|
||||
public:
|
||||
/*! \brief virtual destructor */
|
||||
virtual ~LinearUpdater() = default;
|
||||
~LinearUpdater() override = default;
|
||||
/*!
|
||||
* \brief Initialize the updater with given arguments.
|
||||
* \param args arguments to the objective function.
|
||||
|
||||
@@ -37,13 +37,9 @@ class BaseLogger {
|
||||
|
||||
// Parsing both silent and debug_verbose is to provide backward compatibility.
|
||||
struct ConsoleLoggerParam : public XGBoostParameter<ConsoleLoggerParam> {
|
||||
bool silent; // deprecated.
|
||||
int verbosity;
|
||||
|
||||
DMLC_DECLARE_PARAMETER(ConsoleLoggerParam) {
|
||||
DMLC_DECLARE_FIELD(silent)
|
||||
.set_default(false)
|
||||
.describe("Do not print information during training.");
|
||||
DMLC_DECLARE_FIELD(verbosity)
|
||||
.set_range(0, 3)
|
||||
.set_default(1) // shows only warning
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#define XGBOOST_METRIC_H_
|
||||
|
||||
#include <dmlc/registry.h>
|
||||
#include <xgboost/model.h>
|
||||
#include <xgboost/generic_parameters.h>
|
||||
#include <xgboost/data.h>
|
||||
#include <xgboost/base.h>
|
||||
@@ -23,7 +24,7 @@ namespace xgboost {
|
||||
* \brief interface of evaluation metric used to evaluate model performance.
|
||||
* This has nothing to do with training, but merely act as evaluation purpose.
|
||||
*/
|
||||
class Metric {
|
||||
class Metric : public Configurable {
|
||||
protected:
|
||||
GenericParameter const* tparam_;
|
||||
|
||||
@@ -34,6 +35,21 @@ class Metric {
|
||||
*/
|
||||
virtual void Configure(
|
||||
const std::vector<std::pair<std::string, std::string> >& args) {}
|
||||
/*!
|
||||
* \brief Load configuration from JSON object
|
||||
* By default, metric has no internal configuration;
|
||||
* override this function to maintain internal configuration
|
||||
* \param in JSON object containing the configuration
|
||||
*/
|
||||
void LoadConfig(Json const& in) override {}
|
||||
/*!
|
||||
* \brief Save configuration to JSON object
|
||||
* By default, metric has no internal configuration;
|
||||
* override this function to maintain internal configuration
|
||||
* \param out pointer to output JSON object
|
||||
*/
|
||||
void SaveConfig(Json* out) const override {}
|
||||
|
||||
/*!
|
||||
* \brief evaluate a specific metric
|
||||
* \param preds prediction
|
||||
@@ -48,12 +64,13 @@ class Metric {
|
||||
/*! \return name of metric */
|
||||
virtual const char* Name() const = 0;
|
||||
/*! \brief virtual destructor */
|
||||
virtual ~Metric() = default;
|
||||
~Metric() override = default;
|
||||
/*!
|
||||
* \brief create a metric according to name.
|
||||
* \param name name of the metric.
|
||||
* name can be in form metric[@]param
|
||||
* and the name will be matched in the registry.
|
||||
* name can be in form metric[@]param and the name will be matched in the
|
||||
* registry.
|
||||
* \param tparam A global generic parameter
|
||||
* \return the created metric.
|
||||
*/
|
||||
static Metric* Create(const std::string& name, GenericParameter const* tparam);
|
||||
|
||||
@@ -28,7 +28,7 @@ class ObjFunction : public Configurable {
|
||||
|
||||
public:
|
||||
/*! \brief virtual destructor */
|
||||
virtual ~ObjFunction() = default;
|
||||
~ObjFunction() override = default;
|
||||
/*!
|
||||
* \brief Configure the objective with the specified parameters.
|
||||
* \param args arguments to the objective function.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*!
|
||||
* Copyright 2018 by Contributors
|
||||
* \file enum_class_param.h
|
||||
* \file parameter.h
|
||||
* \brief macro for using C++11 enum class as DMLC parameter
|
||||
* \author Hyunsu Philip Cho
|
||||
*/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*!
|
||||
* Copyright by Contributors
|
||||
* Copyright 2017-2020 by Contributors
|
||||
* \file predictor.h
|
||||
* \brief Interface of predictor,
|
||||
* performs predictions for a gradient booster.
|
||||
@@ -16,6 +16,7 @@
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include <mutex>
|
||||
|
||||
// Forward declarations
|
||||
namespace xgboost {
|
||||
@@ -32,47 +33,84 @@ namespace xgboost {
|
||||
* \brief Contains pointer to input matrix and associated cached predictions.
|
||||
*/
|
||||
struct PredictionCacheEntry {
|
||||
std::shared_ptr<DMatrix> data;
|
||||
// A storage for caching prediction values
|
||||
HostDeviceVector<bst_float> predictions;
|
||||
// The version of current cache, corresponding number of layers of trees
|
||||
uint32_t version { 0 };
|
||||
// A weak pointer for checking whether the DMatrix object has expired.
|
||||
std::weak_ptr< DMatrix > ref;
|
||||
|
||||
PredictionCacheEntry() = default;
|
||||
/* \brief Update the cache entry by number of versions.
|
||||
*
|
||||
* \param v Added versions.
|
||||
*/
|
||||
void Update(uint32_t v) {
|
||||
version += v;
|
||||
}
|
||||
};
|
||||
|
||||
/* \brief A container for managed prediction caches.
|
||||
*/
|
||||
class PredictionContainer {
|
||||
std::unordered_map<DMatrix *, PredictionCacheEntry> container_;
|
||||
void ClearExpiredEntries();
|
||||
std::mutex cache_lock_;
|
||||
|
||||
public:
|
||||
PredictionContainer() = default;
|
||||
/* \brief Add a new DMatrix to the cache, at the same time this function will clear out
|
||||
* all expired caches by checking the `std::weak_ptr`. Caching an existing
|
||||
* DMatrix won't renew it.
|
||||
*
|
||||
* Passing in a `shared_ptr` is critical here. First to create a `weak_ptr` inside the
|
||||
* entry this shared pointer is necessary. More importantly, the life time of this
|
||||
* cache is tied to the shared pointer.
|
||||
*
|
||||
* Another way to make a safe cache is create a proxy to this entry, with anther shared
|
||||
* pointer defined inside, and pass this proxy around instead of the real entry. But
|
||||
* seems to be too messy. In XGBoost, functions like `UpdateOneIter` will have
|
||||
* (memory) safe access to the DMatrix as long as it's passed in as a `shared_ptr`.
|
||||
*
|
||||
* \param m shared pointer to the DMatrix that needs to be cached.
|
||||
* \param device Which device should the cache be allocated on. Pass
|
||||
* GenericParameter::kCpuId for CPU or positive integer for GPU id.
|
||||
*
|
||||
* \return the cache entry for passed in DMatrix, either an existing cache or newly
|
||||
* created.
|
||||
*/
|
||||
PredictionCacheEntry& Cache(std::shared_ptr<DMatrix> m, int32_t device);
|
||||
/* \brief Get a prediction cache entry. This entry must be already allocated by `Cache`
|
||||
* method. Otherwise a dmlc::Error is thrown.
|
||||
*
|
||||
* \param m pointer to the DMatrix.
|
||||
* \return The prediction cache for passed in DMatrix.
|
||||
*/
|
||||
PredictionCacheEntry& Entry(DMatrix* m);
|
||||
/* \brief Get a const reference to the underlying hash map. Clear expired caches before
|
||||
* returning.
|
||||
*/
|
||||
decltype(container_) const& Container();
|
||||
};
|
||||
|
||||
/**
|
||||
* \class Predictor
|
||||
*
|
||||
* \brief Performs prediction on individual training instances or batches of
|
||||
* instances for GBTree. The predictor also manages a prediction cache
|
||||
* associated with input matrices. If possible, it will use previously
|
||||
* calculated predictions instead of calculating new predictions.
|
||||
* Prediction functions all take a GBTreeModel and a DMatrix as input and
|
||||
* output a vector of predictions. The predictor does not modify any state of
|
||||
* the model itself.
|
||||
* \brief Performs prediction on individual training instances or batches of instances for
|
||||
* GBTree. Prediction functions all take a GBTreeModel and a DMatrix as input and
|
||||
* output a vector of predictions. The predictor does not modify any state of the
|
||||
* model itself.
|
||||
*/
|
||||
|
||||
class Predictor {
|
||||
protected:
|
||||
/*
|
||||
* \brief Runtime parameters.
|
||||
*/
|
||||
GenericParameter const* generic_param_;
|
||||
/**
|
||||
* \brief Map of matrices and associated cached predictions to facilitate
|
||||
* storing and looking up predictions.
|
||||
*/
|
||||
std::shared_ptr<std::unordered_map<DMatrix*, PredictionCacheEntry>> cache_;
|
||||
|
||||
std::unordered_map<DMatrix*, PredictionCacheEntry>::iterator FindCache(DMatrix const* dmat) {
|
||||
auto cache_emtry = std::find_if(
|
||||
cache_->begin(), cache_->end(),
|
||||
[dmat](std::pair<DMatrix *, PredictionCacheEntry const &> const &kv) {
|
||||
return kv.second.data.get() == dmat;
|
||||
});
|
||||
return cache_emtry;
|
||||
}
|
||||
|
||||
public:
|
||||
Predictor(GenericParameter const* generic_param,
|
||||
std::shared_ptr<std::unordered_map<DMatrix*, PredictionCacheEntry>> cache) :
|
||||
generic_param_{generic_param}, cache_{cache} {}
|
||||
explicit Predictor(GenericParameter const* generic_param) :
|
||||
generic_param_{generic_param} {}
|
||||
virtual ~Predictor() = default;
|
||||
|
||||
/**
|
||||
@@ -91,37 +129,25 @@ class Predictor {
|
||||
* \param model The model to predict from.
|
||||
* \param tree_begin The tree begin index.
|
||||
* \param ntree_limit (Optional) The ntree limit. 0 means do not
|
||||
* limit trees.
|
||||
* limit trees.
|
||||
*/
|
||||
|
||||
virtual void PredictBatch(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
|
||||
virtual void PredictBatch(DMatrix* dmat, PredictionCacheEntry* out_preds,
|
||||
const gbm::GBTreeModel& model, int tree_begin,
|
||||
unsigned ntree_limit = 0) = 0;
|
||||
uint32_t const ntree_limit = 0) = 0;
|
||||
|
||||
/**
|
||||
* \fn virtual void Predictor::UpdatePredictionCache( const gbm::GBTreeModel
|
||||
* &model, std::vector<std::unique_ptr<TreeUpdater> >* updaters, int
|
||||
* num_new_trees) = 0;
|
||||
*
|
||||
* \brief Update the internal prediction cache using newly added trees. Will
|
||||
* use the tree updater to do this if possible. Should be called as a part of
|
||||
* the tree boosting process to facilitate the look up of predictions
|
||||
* at a later time.
|
||||
*
|
||||
* \param model The model.
|
||||
* \param [in,out] updaters The updater sequence for gradient boosting.
|
||||
* \param num_new_trees Number of new trees.
|
||||
* \brief Inplace prediction.
|
||||
* \param x Type erased data adapter.
|
||||
* \param model The model to predict from.
|
||||
* \param missing Missing value in the data.
|
||||
* \param [in,out] out_preds The output preds.
|
||||
* \param tree_begin (Optional) Begining of boosted trees used for prediction.
|
||||
* \param tree_end (Optional) End of booster trees. 0 means do not limit trees.
|
||||
*/
|
||||
|
||||
virtual void UpdatePredictionCache(
|
||||
const gbm::GBTreeModel& model,
|
||||
std::vector<std::unique_ptr<TreeUpdater>>* updaters,
|
||||
int num_new_trees) = 0;
|
||||
|
||||
virtual void InplacePredict(dmlc::any const &x, const gbm::GBTreeModel &model,
|
||||
float missing, PredictionCacheEntry *out_preds,
|
||||
uint32_t tree_begin = 0, uint32_t tree_end = 0) const = 0;
|
||||
/**
|
||||
* \fn virtual void Predictor::PredictInstance( const SparsePage::Inst&
|
||||
* inst, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model,
|
||||
*
|
||||
* \brief online prediction function, predict score for one instance at a time
|
||||
* NOTE: use the batch prediction interface if possible, batch prediction is
|
||||
* usually more efficient than online prediction This function is NOT
|
||||
@@ -197,11 +223,9 @@ class Predictor {
|
||||
*
|
||||
* \param name Name of the predictor.
|
||||
* \param generic_param Pointer to runtime parameters.
|
||||
* \param cache Pointer to prediction cache.
|
||||
*/
|
||||
static Predictor* Create(
|
||||
std::string const& name, GenericParameter const* generic_param,
|
||||
std::shared_ptr<std::unordered_map<DMatrix*, PredictionCacheEntry>> cache);
|
||||
std::string const& name, GenericParameter const* generic_param);
|
||||
};
|
||||
|
||||
/*!
|
||||
@@ -209,9 +233,7 @@ class Predictor {
|
||||
*/
|
||||
struct PredictorReg
|
||||
: public dmlc::FunctionRegEntryBase<
|
||||
PredictorReg, std::function<Predictor*(
|
||||
GenericParameter const*,
|
||||
std::shared_ptr<std::unordered_map<DMatrix*, PredictionCacheEntry>>)>> {};
|
||||
PredictorReg, std::function<Predictor*(GenericParameter const*)>> {};
|
||||
|
||||
#define XGBOOST_REGISTER_PREDICTOR(UniqueId, Name) \
|
||||
static DMLC_ATTRIBUTE_UNUSED ::xgboost::PredictorReg& \
|
||||
|
||||
@@ -29,11 +29,14 @@
|
||||
#ifndef XGBOOST_SPAN_H_
|
||||
#define XGBOOST_SPAN_H_
|
||||
|
||||
#include <xgboost/logging.h> // CHECK
|
||||
#include <xgboost/base.h>
|
||||
#include <xgboost/logging.h>
|
||||
|
||||
#include <cinttypes> // size_t
|
||||
#include <numeric> // numeric_limits
|
||||
#include <limits> // numeric_limits
|
||||
#include <iterator>
|
||||
#include <type_traits>
|
||||
#include <cstdio>
|
||||
|
||||
/*!
|
||||
* The version number 1910 is picked up from GSL.
|
||||
@@ -69,26 +72,33 @@ namespace xgboost {
|
||||
namespace common {
|
||||
|
||||
// Usual logging facility is not available inside device code.
|
||||
// TODO(trivialfis): Make dmlc check more generic.
|
||||
// assert is not supported in mac as of CUDA 10.0
|
||||
#define KERNEL_CHECK(cond) \
|
||||
do { \
|
||||
if (!(cond)) { \
|
||||
printf("\nKernel error:\n" \
|
||||
"In: %s: %d\n" \
|
||||
"\t%s\n\tExpecting: %s\n" \
|
||||
"\tBlock: [%d, %d, %d], Thread: [%d, %d, %d]\n\n", \
|
||||
__FILE__, __LINE__, __PRETTY_FUNCTION__, #cond, \
|
||||
blockIdx.x, blockIdx.y, blockIdx.z, \
|
||||
threadIdx.x, threadIdx.y, threadIdx.z); \
|
||||
asm("trap;"); \
|
||||
} \
|
||||
#define KERNEL_CHECK(cond) \
|
||||
do { \
|
||||
if (!(cond)) { \
|
||||
printf("\nKernel error:\n" \
|
||||
"In: %s: %d\n" \
|
||||
"\t%s\n\tExpecting: %s\n" \
|
||||
"\tBlock: [%d, %d, %d], Thread: [%d, %d, %d]\n\n", \
|
||||
__FILE__, __LINE__, __PRETTY_FUNCTION__, #cond, blockIdx.x, \
|
||||
blockIdx.y, blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z); \
|
||||
asm("trap;"); \
|
||||
} \
|
||||
} while (0);
|
||||
|
||||
#ifdef __CUDA_ARCH__
|
||||
#if defined(__CUDA_ARCH__)
|
||||
#define SPAN_CHECK KERNEL_CHECK
|
||||
#else
|
||||
#elif defined(XGBOOST_STRICT_R_MODE) && XGBOOST_STRICT_R_MODE == 1 // R package
|
||||
#define SPAN_CHECK CHECK // check from dmlc
|
||||
#else // not CUDA, not R
|
||||
#define SPAN_CHECK(cond) \
|
||||
do { \
|
||||
if (XGBOOST_EXPECT(!(cond), false)) { \
|
||||
fprintf(stderr, "[xgboost] Condition %s failed.\n", #cond); \
|
||||
fflush(stderr); /* It seems stderr on Windows is beffered? */ \
|
||||
std::terminate(); \
|
||||
} \
|
||||
} while (0);
|
||||
#endif // __CUDA_ARCH__
|
||||
|
||||
namespace detail {
|
||||
@@ -98,8 +108,9 @@ namespace detail {
|
||||
* represent ptrdiff_t, which is just int64_t. So we make it determinstic
|
||||
* here.
|
||||
*/
|
||||
using ptrdiff_t = typename std::conditional<std::is_same<std::ptrdiff_t, std::int64_t>::value,
|
||||
std::ptrdiff_t, std::int64_t>::type;
|
||||
using ptrdiff_t = typename std::conditional< // NOLINT
|
||||
std::is_same<std::ptrdiff_t, std::int64_t>::value,
|
||||
std::ptrdiff_t, std::int64_t>::type;
|
||||
} // namespace detail
|
||||
|
||||
#if defined(_MSC_VER) && _MSC_VER < 1910
|
||||
@@ -129,7 +140,7 @@ class SpanIterator {
|
||||
IsConst, const ElementType, ElementType>::type&;
|
||||
using pointer = typename std::add_pointer<reference>::type; // NOLINT
|
||||
|
||||
XGBOOST_DEVICE constexpr SpanIterator() : span_{nullptr}, index_{0} {}
|
||||
constexpr SpanIterator() = default;
|
||||
|
||||
XGBOOST_DEVICE constexpr SpanIterator(
|
||||
const SpanType* _span,
|
||||
@@ -236,8 +247,8 @@ class SpanIterator {
|
||||
}
|
||||
|
||||
protected:
|
||||
const SpanType *span_;
|
||||
typename SpanType::index_type index_;
|
||||
const SpanType *span_ { nullptr };
|
||||
typename SpanType::index_type index_ { 0 };
|
||||
};
|
||||
|
||||
|
||||
@@ -402,8 +413,7 @@ class Span {
|
||||
using const_reverse_iterator = const detail::SpanIterator<Span<T, Extent>, true>; // NOLINT
|
||||
|
||||
// constructors
|
||||
|
||||
XGBOOST_DEVICE constexpr Span() __span_noexcept : size_(0), data_(nullptr) {}
|
||||
constexpr Span() __span_noexcept = default;
|
||||
|
||||
XGBOOST_DEVICE Span(pointer _ptr, index_type _count) :
|
||||
size_(_count), data_(_ptr) {
|
||||
@@ -496,11 +506,11 @@ class Span {
|
||||
|
||||
// element access
|
||||
|
||||
XGBOOST_DEVICE reference front() const {
|
||||
XGBOOST_DEVICE reference front() const { // NOLINT
|
||||
return (*this)[0];
|
||||
}
|
||||
|
||||
XGBOOST_DEVICE reference back() const {
|
||||
XGBOOST_DEVICE reference back() const { // NOLINT
|
||||
return (*this)[size() - 1];
|
||||
}
|
||||
|
||||
@@ -580,8 +590,8 @@ class Span {
|
||||
}
|
||||
|
||||
private:
|
||||
index_type size_;
|
||||
pointer data_;
|
||||
index_type size_ { 0 };
|
||||
pointer data_ { nullptr };
|
||||
};
|
||||
|
||||
template <class T, std::size_t X, class U, std::size_t Y>
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include <cstring>
|
||||
#include <algorithm>
|
||||
#include <tuple>
|
||||
#include <stack>
|
||||
|
||||
namespace xgboost {
|
||||
|
||||
@@ -65,6 +66,7 @@ struct TreeParam : public dmlc::Parameter<TreeParam> {
|
||||
DMLC_DECLARE_FIELD(num_nodes).set_lower_bound(1).set_default(1);
|
||||
DMLC_DECLARE_FIELD(num_feature)
|
||||
.describe("Number of features used in tree construction.");
|
||||
DMLC_DECLARE_FIELD(num_deleted);
|
||||
DMLC_DECLARE_FIELD(size_leaf_vector).set_lower_bound(0).set_default(0)
|
||||
.describe("Size of leaf vector, reserved for vector tree");
|
||||
}
|
||||
@@ -87,6 +89,10 @@ struct RTreeNodeStat {
|
||||
bst_float base_weight;
|
||||
/*! \brief number of child that is leaf node known up to now */
|
||||
int leaf_child_cnt {0};
|
||||
|
||||
RTreeNodeStat() = default;
|
||||
RTreeNodeStat(float loss_chg, float sum_hess, float weight) :
|
||||
loss_chg{loss_chg}, sum_hess{sum_hess}, base_weight{weight} {}
|
||||
bool operator==(const RTreeNodeStat& b) const {
|
||||
return loss_chg == b.loss_chg && sum_hess == b.sum_hess &&
|
||||
base_weight == b.base_weight && leaf_child_cnt == b.leaf_child_cnt;
|
||||
@@ -99,13 +105,15 @@ struct RTreeNodeStat {
|
||||
*/
|
||||
class RegTree : public Model {
|
||||
public:
|
||||
/*! \brief auxiliary statistics of node to help tree building */
|
||||
using SplitCondT = bst_float;
|
||||
static constexpr int32_t kInvalidNodeId {-1};
|
||||
static constexpr bst_node_t kInvalidNodeId {-1};
|
||||
static constexpr uint32_t kDeletedNodeMarker = std::numeric_limits<uint32_t>::max();
|
||||
static constexpr bst_node_t kRoot { 0 };
|
||||
|
||||
/*! \brief tree node */
|
||||
class Node {
|
||||
public:
|
||||
Node() {
|
||||
XGBOOST_DEVICE Node() {
|
||||
// assert compact alignment
|
||||
static_assert(sizeof(Node) == 4 * sizeof(int) + sizeof(Info),
|
||||
"Node: 64 bit align");
|
||||
@@ -113,6 +121,7 @@ class RegTree : public Model {
|
||||
Node(int32_t cleft, int32_t cright, int32_t parent,
|
||||
uint32_t split_ind, float split_cond, bool default_left) :
|
||||
parent_{parent}, cleft_{cleft}, cright_{cright} {
|
||||
this->SetParent(parent_);
|
||||
this->SetSplit(split_ind, split_cond, default_left);
|
||||
}
|
||||
|
||||
@@ -158,7 +167,7 @@ class RegTree : public Model {
|
||||
}
|
||||
/*! \brief whether this node is deleted */
|
||||
XGBOOST_DEVICE bool IsDeleted() const {
|
||||
return sindex_ == std::numeric_limits<unsigned>::max();
|
||||
return sindex_ == kDeletedNodeMarker;
|
||||
}
|
||||
/*! \brief whether current node is root */
|
||||
XGBOOST_DEVICE bool IsRoot() const { return parent_ == kInvalidNodeId; }
|
||||
@@ -201,7 +210,7 @@ class RegTree : public Model {
|
||||
}
|
||||
/*! \brief mark that this node is deleted */
|
||||
XGBOOST_DEVICE void MarkDelete() {
|
||||
this->sindex_ = std::numeric_limits<unsigned>::max();
|
||||
this->sindex_ = kDeletedNodeMarker;
|
||||
}
|
||||
/*! \brief Reuse this deleted node. */
|
||||
XGBOOST_DEVICE void Reuse() {
|
||||
@@ -318,6 +327,38 @@ class RegTree : public Model {
|
||||
return nodes_ == b.nodes_ && stats_ == b.stats_ &&
|
||||
deleted_nodes_ == b.deleted_nodes_ && param == b.param;
|
||||
}
|
||||
/* \brief Iterate through all nodes in this tree.
|
||||
*
|
||||
* \param Function that accepts a node index, and returns false when iteration should
|
||||
* stop, otherwise returns true.
|
||||
*/
|
||||
template <typename Func> void WalkTree(Func func) const {
|
||||
std::stack<bst_node_t> nodes;
|
||||
nodes.push(kRoot);
|
||||
auto &self = *this;
|
||||
while (!nodes.empty()) {
|
||||
auto nidx = nodes.top();
|
||||
nodes.pop();
|
||||
if (!func(nidx)) {
|
||||
return;
|
||||
}
|
||||
auto left = self[nidx].LeftChild();
|
||||
auto right = self[nidx].RightChild();
|
||||
if (left != RegTree::kInvalidNodeId) {
|
||||
nodes.push(left);
|
||||
}
|
||||
if (right != RegTree::kInvalidNodeId) {
|
||||
nodes.push(right);
|
||||
}
|
||||
}
|
||||
}
|
||||
/*!
|
||||
* \brief Compares whether 2 trees are equal from a user's perspective. The equality
|
||||
* compares only non-deleted nodes.
|
||||
*
|
||||
* \parm b The other tree.
|
||||
*/
|
||||
bool Equal(const RegTree& b) const;
|
||||
|
||||
/**
|
||||
* \brief Expands a leaf node into two additional leaf nodes.
|
||||
@@ -331,13 +372,16 @@ class RegTree : public Model {
|
||||
* \param right_leaf_weight The right leaf weight for prediction, modified by learning rate.
|
||||
* \param loss_change The loss change.
|
||||
* \param sum_hess The sum hess.
|
||||
* \param leaf_right_child The right child index of leaf, by default kInvalidNodeId,
|
||||
* some updaters use the right child index of leaf as a marker
|
||||
* \param left_sum The sum hess of left leaf.
|
||||
* \param right_sum The sum hess of right leaf.
|
||||
* \param leaf_right_child The right child index of leaf, by default kInvalidNodeId,
|
||||
* some updaters use the right child index of leaf as a marker
|
||||
*/
|
||||
void ExpandNode(int nid, unsigned split_index, bst_float split_value,
|
||||
bool default_left, bst_float base_weight,
|
||||
bst_float left_leaf_weight, bst_float right_leaf_weight,
|
||||
bst_float loss_change, float sum_hess,
|
||||
bst_float loss_change, float sum_hess, float left_sum,
|
||||
float right_sum,
|
||||
bst_node_t leaf_right_child = kInvalidNodeId) {
|
||||
int pleft = this->AllocNode();
|
||||
int pright = this->AllocNode();
|
||||
@@ -353,9 +397,9 @@ class RegTree : public Model {
|
||||
nodes_[pleft].SetLeaf(left_leaf_weight, leaf_right_child);
|
||||
nodes_[pright].SetLeaf(right_leaf_weight, leaf_right_child);
|
||||
|
||||
this->Stat(nid).loss_chg = loss_change;
|
||||
this->Stat(nid).base_weight = base_weight;
|
||||
this->Stat(nid).sum_hess = sum_hess;
|
||||
this->Stat(nid) = {loss_change, sum_hess, base_weight};
|
||||
this->Stat(pleft) = {0.0f, left_sum, left_leaf_weight};
|
||||
this->Stat(pright) = {0.0f, right_sum, right_leaf_weight};
|
||||
}
|
||||
|
||||
/*!
|
||||
@@ -392,6 +436,10 @@ class RegTree : public Model {
|
||||
return param.num_nodes - 1 - param.num_deleted;
|
||||
}
|
||||
|
||||
/* \brief Count number of leaves in tree. */
|
||||
bst_node_t GetNumLeaves() const;
|
||||
bst_node_t GetNumSplitNodes() const;
|
||||
|
||||
/*!
|
||||
* \brief dense feature vector that can be taken by RegTree
|
||||
* and can be construct from sparse feature vector.
|
||||
@@ -422,7 +470,7 @@ class RegTree : public Model {
|
||||
* \param i feature index.
|
||||
* \return the i-th feature value
|
||||
*/
|
||||
bst_float Fvalue(size_t i) const;
|
||||
bst_float GetFvalue(size_t i) const;
|
||||
/*!
|
||||
* \brief check whether i-th entry is missing
|
||||
* \param i feature index.
|
||||
@@ -534,6 +582,13 @@ class RegTree : public Model {
|
||||
// delete a tree node, keep the parent field to allow trace back
|
||||
void DeleteNode(int nid) {
|
||||
CHECK_GE(nid, 1);
|
||||
auto pid = (*this)[nid].Parent();
|
||||
if (nid == (*this)[pid].LeftChild()) {
|
||||
(*this)[pid].SetLeftChild(kInvalidNodeId);
|
||||
} else {
|
||||
(*this)[pid].SetRightChild(kInvalidNodeId);
|
||||
}
|
||||
|
||||
deleted_nodes_.push_back(nid);
|
||||
nodes_[nid].MarkDelete();
|
||||
++param.num_deleted;
|
||||
@@ -548,16 +603,20 @@ inline void RegTree::FVec::Init(size_t size) {
|
||||
}
|
||||
|
||||
inline void RegTree::FVec::Fill(const SparsePage::Inst& inst) {
|
||||
for (bst_uint i = 0; i < inst.size(); ++i) {
|
||||
if (inst[i].index >= data_.size()) continue;
|
||||
data_[inst[i].index].fvalue = inst[i].fvalue;
|
||||
for (auto const& entry : inst) {
|
||||
if (entry.index >= data_.size()) {
|
||||
continue;
|
||||
}
|
||||
data_[entry.index].fvalue = entry.fvalue;
|
||||
}
|
||||
}
|
||||
|
||||
inline void RegTree::FVec::Drop(const SparsePage::Inst& inst) {
|
||||
for (bst_uint i = 0; i < inst.size(); ++i) {
|
||||
if (inst[i].index >= data_.size()) continue;
|
||||
data_[inst[i].index].flag = -1;
|
||||
for (auto const& entry : inst) {
|
||||
if (entry.index >= data_.size()) {
|
||||
continue;
|
||||
}
|
||||
data_[entry.index].flag = -1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -565,7 +624,7 @@ inline size_t RegTree::FVec::Size() const {
|
||||
return data_.size();
|
||||
}
|
||||
|
||||
inline bst_float RegTree::FVec::Fvalue(size_t i) const {
|
||||
inline bst_float RegTree::FVec::GetFvalue(size_t i) const {
|
||||
return data_[i].fvalue;
|
||||
}
|
||||
|
||||
@@ -577,7 +636,7 @@ inline int RegTree::GetLeafIndex(const RegTree::FVec& feat) const {
|
||||
bst_node_t nid = 0;
|
||||
while (!(*this)[nid].IsLeaf()) {
|
||||
unsigned split_index = (*this)[nid].SplitIndex();
|
||||
nid = this->GetNext(nid, feat.Fvalue(split_index), feat.IsMissing(split_index));
|
||||
nid = this->GetNext(nid, feat.GetFvalue(split_index), feat.IsMissing(split_index));
|
||||
}
|
||||
return nid;
|
||||
}
|
||||
|
||||
@@ -34,12 +34,19 @@ class TreeUpdater : public Configurable {
|
||||
|
||||
public:
|
||||
/*! \brief virtual destructor */
|
||||
virtual ~TreeUpdater() = default;
|
||||
~TreeUpdater() override = default;
|
||||
/*!
|
||||
* \brief Initialize the updater with given arguments.
|
||||
* \param args arguments to the objective function.
|
||||
*/
|
||||
virtual void Configure(const Args& args) = 0;
|
||||
/*! \brief Whether this updater can be used for updating existing trees.
|
||||
*
|
||||
* Some updaters are used for building new trees (like `hist`), while some others are
|
||||
* used for modifying existing trees (like `prune`). Return true if it can modify
|
||||
* existing trees.
|
||||
*/
|
||||
virtual bool CanModifyTree() const { return false; }
|
||||
/*!
|
||||
* \brief perform update to the tree models
|
||||
* \param gpair the gradient pair statistics of the data
|
||||
@@ -73,6 +80,7 @@ class TreeUpdater : public Configurable {
|
||||
/*!
|
||||
* \brief Create a tree updater given name
|
||||
* \param name Name of the tree updater.
|
||||
* \param tparam A global runtime parameter
|
||||
*/
|
||||
static TreeUpdater* Create(const std::string& name, GenericParameter const* tparam);
|
||||
};
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user