Compare commits
120 Commits
release_0.
...
v0.82
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3f83dcd502 | ||
|
|
0c1d5f1120 | ||
|
|
92b7577c62 | ||
|
|
9fefa2128d | ||
|
|
7ea5675679 | ||
|
|
74009afcac | ||
|
|
1b7405f688 | ||
|
|
dc2add96c5 | ||
|
|
8e0a08fbcf | ||
|
|
54793544a2 | ||
|
|
2aaae2e7bb | ||
|
|
cecbe0cf71 | ||
|
|
c8c472f39a | ||
|
|
1dac5e2410 | ||
|
|
a985a99cf0 | ||
|
|
0ff84d950e | ||
|
|
60f05352c5 | ||
|
|
549c8d6ae9 | ||
|
|
e1240413c9 | ||
|
|
2e618af743 | ||
|
|
71a604fae3 | ||
|
|
1fe874e58a | ||
|
|
ff2d4c99fa | ||
|
|
754fe8142b | ||
|
|
37ddfd7d6e | ||
|
|
d506a8bc63 | ||
|
|
c18a3660fa | ||
|
|
3be1b9ae30 | ||
|
|
9b917cda4f | ||
|
|
99a290489c | ||
|
|
3320a52192 | ||
|
|
ba584e5e9f | ||
|
|
2a9b085bc8 | ||
|
|
f8ca2960fc | ||
|
|
05243642bb | ||
|
|
017c97b8ce | ||
|
|
325b16bccd | ||
|
|
ae3bb9c2d5 | ||
|
|
8905df4a18 | ||
|
|
1088dff42c | ||
|
|
7a652a8c64 | ||
|
|
59f868bc60 | ||
|
|
0d0ce32908 | ||
|
|
a60e224484 | ||
|
|
e0094d996e | ||
|
|
a1c35cadf0 | ||
|
|
4fac9874e0 | ||
|
|
301cef4638 | ||
|
|
1fc37e4749 | ||
|
|
0f8af85f64 | ||
|
|
5f151c5cf3 | ||
|
|
dade7c3aff | ||
|
|
773ddbcfcb | ||
|
|
e290ec9a80 | ||
|
|
6a569b8cd9 | ||
|
|
55bc149efb | ||
|
|
431c850c03 | ||
|
|
1f022929f4 | ||
|
|
f368d0de2b | ||
|
|
15fe2f1e7c | ||
|
|
be948df23f | ||
|
|
9897b5042f | ||
|
|
7735252925 | ||
|
|
85939c6a6e | ||
|
|
f75a21af25 | ||
|
|
84c99f86f4 | ||
|
|
c055a32609 | ||
|
|
c8c7b9649c | ||
|
|
a2dc929598 | ||
|
|
42bf90eb8f | ||
|
|
e0a279114e | ||
|
|
fd722d60cd | ||
|
|
53f695acf2 | ||
|
|
3d81c48d3f | ||
|
|
84a3af8dc0 | ||
|
|
4be5edaf92 | ||
|
|
93f9ce9ef9 | ||
|
|
9af6b689d6 | ||
|
|
4f26053b09 | ||
|
|
48dddfd635 | ||
|
|
a9d684db18 | ||
|
|
c5f92df475 | ||
|
|
c5130e487a | ||
|
|
9c4ff50e83 | ||
|
|
42cac4a30b | ||
|
|
f9302a56fb | ||
|
|
7d3149a21f | ||
|
|
86aac98e54 | ||
|
|
e9ab4a1c6c | ||
|
|
dc2bfbfde1 | ||
|
|
7ebe8dcf5b | ||
|
|
973fc8b1ff | ||
|
|
93f63324e6 | ||
|
|
aa48b7e903 | ||
|
|
0cd326c1bc | ||
|
|
3a150742c7 | ||
|
|
0a0d4239d3 | ||
|
|
fe999bf968 | ||
|
|
2ea0f887c1 | ||
|
|
c76d993681 | ||
|
|
a2a8954659 | ||
|
|
7af0946ac1 | ||
|
|
143475b27b | ||
|
|
926eb651fe | ||
|
|
daf77ca7b7 | ||
|
|
97984f4890 | ||
|
|
0ddb8a7661 | ||
|
|
d810e6dec9 | ||
|
|
be0bb7dd90 | ||
|
|
e38d5a6831 | ||
|
|
828d75714d | ||
|
|
ad6e0d55f1 | ||
|
|
19ee0a3579 | ||
|
|
2b045aa805 | ||
|
|
d9642cf757 | ||
|
|
1bf4083dc6 | ||
|
|
20d5abf919 | ||
|
|
f1275f52c1 | ||
|
|
1698fe64bb | ||
|
|
91cc14ea70 |
@@ -1,4 +1,4 @@
|
||||
Checks: 'modernize-*,-modernize-make-*,-modernize-raw-string-literal,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
|
||||
Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
|
||||
CheckOptions:
|
||||
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.StructCase, value: CamelCase }
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -91,3 +91,4 @@ lib/
|
||||
metastore_db
|
||||
|
||||
plugin/updater_gpu/test/cpp/data
|
||||
/include/xgboost/build_config.h
|
||||
|
||||
@@ -6,9 +6,7 @@ os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
osx_image: xcode8
|
||||
|
||||
group: deprecated-2017Q4
|
||||
osx_image: xcode9.3
|
||||
|
||||
# Use Build Matrix to do lint and build seperately
|
||||
env:
|
||||
@@ -68,6 +66,11 @@ addons:
|
||||
- g++-4.8
|
||||
- gcc-7
|
||||
- g++-7
|
||||
homebrew:
|
||||
packages:
|
||||
- gcc@7
|
||||
- graphviz
|
||||
update: true
|
||||
|
||||
before_install:
|
||||
- source dmlc-core/scripts/travis/travis_setup_env.sh
|
||||
|
||||
130
CMakeLists.txt
130
CMakeLists.txt
@@ -8,17 +8,23 @@ set_default_configuration_release()
|
||||
msvc_use_static_runtime()
|
||||
|
||||
# Options
|
||||
option(USE_CUDA "Build with GPU acceleration")
|
||||
option(JVM_BINDINGS "Build JVM bindings" OFF)
|
||||
option(GOOGLE_TEST "Build google tests" OFF)
|
||||
option(R_LIB "Build shared library for R package" OFF)
|
||||
## GPUs
|
||||
option(USE_CUDA "Build with GPU acceleration" OFF)
|
||||
option(USE_NCCL "Build with multiple GPUs support" OFF)
|
||||
set(GPU_COMPUTE_VER "" CACHE STRING
|
||||
"Space separated list of compute versions to be built against, e.g. '35 61'")
|
||||
|
||||
## Bindings
|
||||
option(JVM_BINDINGS "Build JVM bindings" OFF)
|
||||
option(R_LIB "Build shared library for R package" OFF)
|
||||
|
||||
## Devs
|
||||
option(USE_SANITIZER "Use santizer flags" OFF)
|
||||
option(SANITIZER_PATH "Path to sanitizes.")
|
||||
set(ENABLED_SANITIZERS "address" "leak" CACHE STRING
|
||||
"Semicolon separated list of sanitizer names. E.g 'address;leak'. Supported sanitizers are
|
||||
address, leak and thread.")
|
||||
option(GOOGLE_TEST "Build google tests" OFF)
|
||||
|
||||
# Plugins
|
||||
option(PLUGIN_LZ4 "Build lz4 plugin" OFF)
|
||||
@@ -49,6 +55,26 @@ if(WIN32 AND MINGW)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -static-libstdc++")
|
||||
endif()
|
||||
|
||||
# Check existence of software pre-fetching
|
||||
include(CheckCXXSourceCompiles)
|
||||
check_cxx_source_compiles("
|
||||
#include <xmmintrin.h>
|
||||
int main() {
|
||||
char data = 0;
|
||||
const char* address = &data;
|
||||
_mm_prefetch(address, _MM_HINT_NTA);
|
||||
return 0;
|
||||
}
|
||||
" XGBOOST_MM_PREFETCH_PRESENT)
|
||||
check_cxx_source_compiles("
|
||||
int main() {
|
||||
char data = 0;
|
||||
const char* address = &data;
|
||||
__builtin_prefetch(address, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
" XGBOOST_BUILTIN_PREFETCH_PRESENT)
|
||||
|
||||
# Sanitizer
|
||||
if(USE_SANITIZER)
|
||||
include(cmake/Sanitizer.cmake)
|
||||
@@ -82,6 +108,12 @@ include_directories (
|
||||
${PROJECT_SOURCE_DIR}/rabit/include
|
||||
)
|
||||
|
||||
# Generate configurable header
|
||||
set(CMAKE_LOCAL "${PROJECT_SOURCE_DIR}/cmake")
|
||||
set(INCLUDE_ROOT "${PROJECT_SOURCE_DIR}/include")
|
||||
message(STATUS "${CMAKE_LOCAL}/build_config.h.in -> ${INCLUDE_ROOT}/xgboost/build_config.h")
|
||||
configure_file("${CMAKE_LOCAL}/build_config.h.in" "${INCLUDE_ROOT}/xgboost/build_config.h")
|
||||
|
||||
file(GLOB_RECURSE SOURCES
|
||||
src/*.cc
|
||||
src/*.h
|
||||
@@ -91,8 +123,6 @@ file(GLOB_RECURSE SOURCES
|
||||
# Only add main function for executable target
|
||||
list(REMOVE_ITEM SOURCES ${PROJECT_SOURCE_DIR}/src/cli_main.cc)
|
||||
|
||||
file(GLOB_RECURSE TEST_SOURCES "tests/cpp/*.cc")
|
||||
|
||||
file(GLOB_RECURSE CUDA_SOURCES
|
||||
src/*.cu
|
||||
src/*.cuh
|
||||
@@ -108,7 +138,7 @@ if(PLUGIN_DENSE_PARSER)
|
||||
endif()
|
||||
|
||||
# rabit
|
||||
# TODO: Create rabit cmakelists.txt
|
||||
# TODO: Use CMakeLists.txt from rabit.
|
||||
set(RABIT_SOURCES
|
||||
rabit/src/allreduce_base.cc
|
||||
rabit/src/allreduce_robust.cc
|
||||
@@ -119,6 +149,7 @@ set(RABIT_EMPTY_SOURCES
|
||||
rabit/src/engine_empty.cc
|
||||
rabit/src/c_api.cc
|
||||
)
|
||||
|
||||
if(MINGW OR R_LIB)
|
||||
# build a dummy rabit library
|
||||
add_library(rabit STATIC ${RABIT_EMPTY_SOURCES})
|
||||
@@ -126,7 +157,11 @@ else()
|
||||
add_library(rabit STATIC ${RABIT_SOURCES})
|
||||
endif()
|
||||
|
||||
if(USE_CUDA)
|
||||
if (GENERATE_COMPILATION_DATABASE)
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||
endif (GENERATE_COMPILATION_DATABASE)
|
||||
|
||||
if(USE_CUDA AND (NOT GENERATE_COMPILATION_DATABASE))
|
||||
find_package(CUDA 8.0 REQUIRED)
|
||||
cmake_minimum_required(VERSION 3.5)
|
||||
|
||||
@@ -136,7 +171,7 @@ if(USE_CUDA)
|
||||
|
||||
if(USE_NCCL)
|
||||
find_package(Nccl REQUIRED)
|
||||
include_directories(${NCCL_INCLUDE_DIR})
|
||||
cuda_include_directories(${NCCL_INCLUDE_DIR})
|
||||
add_definitions(-DXGBOOST_USE_NCCL)
|
||||
endif()
|
||||
|
||||
@@ -156,6 +191,39 @@ if(USE_CUDA)
|
||||
target_link_libraries(gpuxgboost ${NCCL_LIB_NAME})
|
||||
endif()
|
||||
list(APPEND LINK_LIBRARIES gpuxgboost)
|
||||
|
||||
elseif (USE_CUDA AND GENERATE_COMPILATION_DATABASE)
|
||||
# Enable CUDA language to generate a compilation database.
|
||||
cmake_minimum_required(VERSION 3.8)
|
||||
|
||||
find_package(CUDA 8.0 REQUIRED)
|
||||
enable_language(CUDA)
|
||||
set(CMAKE_CUDA_COMPILER clang++)
|
||||
set(CUDA_SEPARABLE_COMPILATION ON)
|
||||
if (NOT CLANG_CUDA_GENCODE)
|
||||
set(CLANG_CUDA_GENCODE "--cuda-gpu-arch=sm_35")
|
||||
endif (NOT CLANG_CUDA_GENCODE)
|
||||
set(CMAKE_CUDA_FLAGS " -Wno-deprecated ${CLANG_CUDA_GENCODE} -fPIC ${GENCODE} -std=c++11 -x cuda")
|
||||
message(STATUS "CMAKE_CUDA_FLAGS: ${CMAKE_CUDA_FLAGS}")
|
||||
|
||||
add_library(gpuxgboost STATIC ${CUDA_SOURCES})
|
||||
|
||||
if(USE_NCCL)
|
||||
find_package(Nccl REQUIRED)
|
||||
target_include_directories(gpuxgboost PUBLIC ${NCCL_INCLUDE_DIR})
|
||||
target_compile_definitions(gpuxgboost PUBLIC -DXGBOOST_USE_NCCL)
|
||||
target_link_libraries(gpuxgboost PUBLIC ${NCCL_LIB_NAME})
|
||||
endif()
|
||||
|
||||
target_compile_definitions(gpuxgboost PUBLIC -DXGBOOST_USE_CUDA)
|
||||
# A hack for CMake to make arguments valid for clang++
|
||||
string(REPLACE "-x cu" "-x cuda" CMAKE_CUDA_COMPILE_PTX_COMPILATION
|
||||
${CMAKE_CUDA_COMPILE_PTX_COMPILATION})
|
||||
string(REPLACE "-x cu" "-x cuda" CMAKE_CUDA_COMPILE_WHOLE_COMPILATION
|
||||
${CMAKE_CUDA_COMPILE_WHOLE_COMPILATION})
|
||||
string(REPLACE "-x cu" "-x cuda" CMAKE_CUDA_COMPILE_SEPARABLE_COMPILATION
|
||||
${CMAKE_CUDA_COMPILE_SEPARABLE_COMPILATION})
|
||||
target_include_directories(gpuxgboost PUBLIC cub)
|
||||
endif()
|
||||
|
||||
|
||||
@@ -171,7 +239,6 @@ endif()
|
||||
|
||||
add_library(objxgboost OBJECT ${SOURCES})
|
||||
|
||||
|
||||
# building shared library for R package
|
||||
if(R_LIB)
|
||||
find_package(LibR REQUIRED)
|
||||
@@ -179,13 +246,13 @@ if(R_LIB)
|
||||
list(APPEND LINK_LIBRARIES "${LIBR_CORE_LIBRARY}")
|
||||
MESSAGE(STATUS "LIBR_CORE_LIBRARY " ${LIBR_CORE_LIBRARY})
|
||||
|
||||
include_directories(
|
||||
# Shared library target for the R package
|
||||
add_library(xgboost SHARED $<TARGET_OBJECTS:objxgboost>)
|
||||
include_directories(xgboost
|
||||
"${LIBR_INCLUDE_DIRS}"
|
||||
"${PROJECT_SOURCE_DIR}"
|
||||
)
|
||||
|
||||
# Shared library target for the R package
|
||||
add_library(xgboost SHARED $<TARGET_OBJECTS:objxgboost>)
|
||||
target_link_libraries(xgboost ${LINK_LIBRARIES})
|
||||
# R uses no lib prefix in shared library names of its packages
|
||||
set_target_properties(xgboost PROPERTIES PREFIX "")
|
||||
@@ -197,7 +264,7 @@ if(R_LIB)
|
||||
# use a dummy location for any other remaining installs
|
||||
set(CMAKE_INSTALL_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/dummy_inst")
|
||||
|
||||
# main targets: shared library & exe
|
||||
# main targets: shared library & exe
|
||||
else()
|
||||
# Executable
|
||||
add_executable(runxgboost $<TARGET_OBJECTS:objxgboost> src/cli_main.cc)
|
||||
@@ -220,20 +287,20 @@ else()
|
||||
add_dependencies(xgboost runxgboost)
|
||||
endif()
|
||||
|
||||
|
||||
# JVM
|
||||
if(JVM_BINDINGS)
|
||||
find_package(JNI QUIET REQUIRED)
|
||||
|
||||
include_directories(${JNI_INCLUDE_DIRS} jvm-packages/xgboost4j/src/native)
|
||||
|
||||
add_library(xgboost4j SHARED
|
||||
$<TARGET_OBJECTS:objxgboost>
|
||||
jvm-packages/xgboost4j/src/native/xgboost4j.cpp)
|
||||
set_output_directory(xgboost4j ${PROJECT_SOURCE_DIR}/lib)
|
||||
$<TARGET_OBJECTS:objxgboost>
|
||||
jvm-packages/xgboost4j/src/native/xgboost4j.cpp)
|
||||
target_include_directories(xgboost4j
|
||||
PRIVATE ${JNI_INCLUDE_DIRS}
|
||||
PRIVATE jvm-packages/xgboost4j/src/native)
|
||||
target_link_libraries(xgboost4j
|
||||
${LINK_LIBRARIES}
|
||||
${JAVA_JVM_LIBRARY})
|
||||
${LINK_LIBRARIES}
|
||||
${JAVA_JVM_LIBRARY})
|
||||
set_output_directory(xgboost4j ${PROJECT_SOURCE_DIR}/lib)
|
||||
endif()
|
||||
|
||||
|
||||
@@ -242,18 +309,31 @@ if(GOOGLE_TEST)
|
||||
enable_testing()
|
||||
find_package(GTest REQUIRED)
|
||||
|
||||
file(GLOB_RECURSE TEST_SOURCES "tests/cpp/*.cc")
|
||||
auto_source_group("${TEST_SOURCES}")
|
||||
include_directories(${GTEST_INCLUDE_DIRS})
|
||||
|
||||
if(USE_CUDA)
|
||||
if(USE_CUDA AND (NOT GENERATE_COMPILATION_DATABASE))
|
||||
file(GLOB_RECURSE CUDA_TEST_SOURCES "tests/cpp/*.cu")
|
||||
cuda_include_directories(${GTEST_INCLUDE_DIRS})
|
||||
cuda_compile(CUDA_TEST_OBJS ${CUDA_TEST_SOURCES})
|
||||
elseif (USE_CUDA AND GENERATE_COMPILATION_DATABASE)
|
||||
file(GLOB_RECURSE CUDA_TEST_SOURCES "tests/cpp/*.cu")
|
||||
else()
|
||||
set(CUDA_TEST_OBJS "")
|
||||
endif()
|
||||
|
||||
add_executable(testxgboost ${TEST_SOURCES} ${CUDA_TEST_OBJS} $<TARGET_OBJECTS:objxgboost>)
|
||||
if (USE_CUDA AND GENERATE_COMPILATION_DATABASE)
|
||||
add_executable(testxgboost ${TEST_SOURCES} ${CUDA_TEST_SOURCES}
|
||||
$<TARGET_OBJECTS:objxgboost>)
|
||||
target_include_directories(testxgboost PRIVATE cub)
|
||||
else ()
|
||||
add_executable(testxgboost ${TEST_SOURCES} ${CUDA_TEST_OBJS}
|
||||
$<TARGET_OBJECTS:objxgboost>)
|
||||
endif ()
|
||||
|
||||
set_output_directory(testxgboost ${PROJECT_SOURCE_DIR})
|
||||
target_include_directories(testxgboost
|
||||
PRIVATE ${GTEST_INCLUDE_DIRS})
|
||||
target_link_libraries(testxgboost ${GTEST_LIBRARIES} ${LINK_LIBRARIES})
|
||||
|
||||
add_test(TestXGBoost testxgboost)
|
||||
|
||||
@@ -85,4 +85,6 @@ List of Contributors
|
||||
* [Andrew Thia](https://github.com/BlueTea88)
|
||||
- Andrew Thia implemented feature interaction constraints
|
||||
* [Wei Tian](https://github.com/weitian)
|
||||
* [Chen Qin] (https://github.com/chenqin)
|
||||
* [Chen Qin](https://github.com/chenqin)
|
||||
* [Sam Wilkinson](https://samwilkinson.io)
|
||||
* [Matthew Jones](https://github.com/mt-jones)
|
||||
|
||||
27
Jenkinsfile
vendored
27
Jenkinsfile
vendored
@@ -53,7 +53,7 @@ pipeline {
|
||||
parallel (buildMatrix.findAll{it['enabled']}.collectEntries{ c ->
|
||||
def buildName = utils.getBuildName(c)
|
||||
utils.buildFactory(buildName, c, false, this.&buildPlatformCmake)
|
||||
})
|
||||
} + [ "clang-tidy" : { buildClangTidyJob() } ])
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -73,7 +73,7 @@ def buildPlatformCmake(buildName, conf, nodeReq, dockerTarget) {
|
||||
}
|
||||
def test_suite = conf["withGpu"] ? (conf["multiGpu"] ? "mgpu" : "gpu") : "cpu"
|
||||
// Build node - this is returned result
|
||||
retry(3) {
|
||||
retry(1) {
|
||||
node(nodeReq) {
|
||||
unstash name: 'srcs'
|
||||
echo """
|
||||
@@ -96,13 +96,32 @@ def buildPlatformCmake(buildName, conf, nodeReq, dockerTarget) {
|
||||
# Test the wheel for compatibility on a barebones CPU container
|
||||
${dockerRun} release ${dockerArgs} bash -c " \
|
||||
pip install --user python-package/dist/xgboost-*-none-any.whl && \
|
||||
python -m nose -v tests/python"
|
||||
pytest -v --fulltrace -s tests/python"
|
||||
# Test the wheel for compatibility on CUDA 10.0 container
|
||||
${dockerRun} gpu --build-arg CUDA_VERSION=10.0 bash -c " \
|
||||
pip install --user python-package/dist/xgboost-*-none-any.whl && \
|
||||
python -m nose -v --eval-attr='(not slow) and (not mgpu)' tests/python-gpu"
|
||||
pytest -v -s --fulltrace -m '(not mgpu) and (not slow)' tests/python-gpu"
|
||||
"""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Run a clang-tidy job on a GPU machine
|
||||
*/
|
||||
def buildClangTidyJob() {
|
||||
def nodeReq = "linux && gpu && unrestricted"
|
||||
node(nodeReq) {
|
||||
unstash name: 'srcs'
|
||||
echo "Running clang-tidy job..."
|
||||
// Invoke command inside docker
|
||||
// Install Google Test and Python yaml
|
||||
dockerTarget = "clang_tidy"
|
||||
dockerArgs = "--build-arg CUDA_VERSION=9.2"
|
||||
sh """
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/clang_tidy.sh
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ pipeline {
|
||||
stage('Jenkins: Build doc') {
|
||||
steps {
|
||||
script {
|
||||
retry(3) {
|
||||
retry(1) {
|
||||
node('linux && cpu && restricted') {
|
||||
unstash name: 'srcs'
|
||||
echo 'Building doc...'
|
||||
@@ -99,7 +99,7 @@ def buildPlatformCmake(buildName, conf, nodeReq, dockerTarget) {
|
||||
dockerArgs = "--build-arg CUDA_VERSION=" + conf["cudaVersion"]
|
||||
}
|
||||
// Build node - this is returned result
|
||||
retry(3) {
|
||||
retry(1) {
|
||||
node(nodeReq) {
|
||||
unstash name: 'srcs'
|
||||
echo """
|
||||
|
||||
3
Makefile
3
Makefile
@@ -260,7 +260,8 @@ Rpack: clean_all
|
||||
cp ./LICENSE xgboost
|
||||
cat R-package/src/Makevars.in|sed '2s/.*/PKGROOT=./' | sed '3s/.*/ENABLE_STD_THREAD=0/' > xgboost/src/Makevars.in
|
||||
cp xgboost/src/Makevars.in xgboost/src/Makevars.win
|
||||
sed -i -e 's/@OPENMP_CXXFLAGS@/$$\(SHLIB_OPENMP_CFLAGS\)/g' xgboost/src/Makevars.win
|
||||
sed -i -e 's/@OPENMP_CXXFLAGS@/$$\(SHLIB_OPENMP_CXXFLAGS\)/g' xgboost/src/Makevars.win
|
||||
sed -i -e 's/-pthread/$$\(SHLIB_PTHREAD_FLAGS\)/g' xgboost/src/Makevars.win
|
||||
bash R-package/remove_warning_suppression_pragma.sh
|
||||
rm xgboost/remove_warning_suppression_pragma.sh
|
||||
|
||||
|
||||
172
NEWS.md
172
NEWS.md
@@ -3,6 +3,165 @@ XGBoost Change Log
|
||||
|
||||
This file records the changes in xgboost library in reverse chronological order.
|
||||
|
||||
## v0.82 (2019.03.03)
|
||||
This release is packed with many new features and bug fixes.
|
||||
|
||||
### Roadmap: better performance scaling for multi-core CPUs (#3957)
|
||||
* Poor performance scaling of the `hist` algorithm for multi-core CPUs has been under investigation (#3810). #3957 marks an important step toward better performance scaling, by using software pre-fetching and replacing STL vectors with C-style arrays. Special thanks to @Laurae2 and @SmirnovEgorRu.
|
||||
* See #3810 for latest progress on this roadmap.
|
||||
|
||||
### New feature: Distributed Fast Histogram Algorithm (`hist`) (#4011, #4102, #4140, #4128)
|
||||
* It is now possible to run the `hist` algorithm in distributed setting. Special thanks to @CodingCat. The benefits include:
|
||||
1. Faster local computation via feature binning
|
||||
2. Support for monotonic constraints and feature interaction constraints
|
||||
3. Simpler codebase than `approx`, allowing for future improvement
|
||||
* Depth-wise tree growing is now performed in a separate code path, so that cross-node syncronization is performed only once per level.
|
||||
|
||||
### New feature: Multi-Node, Multi-GPU training (#4095)
|
||||
* Distributed training is now able to utilize clusters equipped with NVIDIA GPUs. In particular, the rabit AllReduce layer will communicate GPU device information. Special thanks to @mt-jones, @RAMitchell, @rongou, @trivialfis, @canonizer, and @jeffdk.
|
||||
* Resource management systems will be able to assign a rank for each GPU in the cluster.
|
||||
* In Dask, users will be able to construct a collection of XGBoost processes over an inhomogeneous device cluster (i.e. workers with different number and/or kinds of GPUs).
|
||||
|
||||
### New feature: Multiple validation datasets in XGBoost4J-Spark (#3904, #3910)
|
||||
* You can now track the performance of the model during training with multiple evaluation datasets. By specifying `eval_sets` or call `setEvalSets` over a `XGBoostClassifier` or `XGBoostRegressor`, you can pass in multiple evaluation datasets typed as a `Map` from `String` to `DataFrame`. Special thanks to @CodingCat.
|
||||
* See the usage of multiple validation datasets [here](https://github.com/dmlc/xgboost/blob/0c1d5f1120c0a159f2567b267f0ec4ffadee00d0/jvm-packages/xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/spark/SparkTraining.scala#L66-L78)
|
||||
|
||||
### New feature: Additional metric functions for GPUs (#3952)
|
||||
* Element-wise metrics have been ported to GPU: `rmse`, `mae`, `logloss`, `poisson-nloglik`, `gamma-deviance`, `gamma-nloglik`, `error`, `tweedie-nloglik`. Special thanks to @trivialfis and @RAMitchell.
|
||||
* With supported metrics, XGBoost will select the correct devices based on your system and `n_gpus` parameter.
|
||||
|
||||
### New feature: Column sampling at individual nodes (splits) (#3971)
|
||||
* Columns (features) can now be sampled at individual tree nodes, in addition to per-tree and per-level sampling. To enable per-node sampling, set `colsample_bynode` parameter, which represents the fraction of columns sampled at each node. This parameter is set to 1.0 by default (i.e. no sampling per node). Special thanks to @canonizer.
|
||||
* The `colsample_bynode` parameter works cumulatively with other `colsample_by*` parameters: for example, `{'colsample_bynode':0.5, 'colsample_bytree':0.5}` with 100 columns will give 25 features to choose from at each split.
|
||||
|
||||
### Major API change: consistent logging level via `verbosity` (#3982, #4002, #4138)
|
||||
* XGBoost now allows fine-grained control over logging. You can set `verbosity` to 0 (silent), 1 (warning), 2 (info), and 3 (debug). This is useful for controlling the amount of logging outputs. Special thanks to @trivialfis.
|
||||
* Parameters `silent` and `debug_verbose` are now deprecated.
|
||||
* Note: Sometimes XGBoost tries to change configurations based on heuristics, which is displayed as warning message. If there's unexpected behaviour, please try to increase value of verbosity.
|
||||
|
||||
### Major bug fix: external memory (#4040, #4193)
|
||||
* Clarify object ownership in multi-threaded prefetcher, to avoid memory error.
|
||||
* Correctly merge two column batches (which uses [CSC layout](https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_column_(CSC_or_CCS))).
|
||||
* Add unit tests for external memory.
|
||||
* Special thanks to @trivialfis and @hcho3.
|
||||
|
||||
### Major bug fix: early stopping fixed in XGBoost4J and XGBoost4J-Spark (#3928, #4176)
|
||||
* Early stopping in XGBoost4J and XGBoost4J-Spark is now consistent with its counterpart in the Python package. Training stops if the current iteration is `earlyStoppingSteps` away from the best iteration. If there are multiple evaluation sets, only the last one is used to determinate early stop.
|
||||
* See the updated documentation [here](https://xgboost.readthedocs.io/en/release_0.82/jvm/xgboost4j_spark_tutorial.html#early-stopping)
|
||||
* Special thanks to @CodingCat, @yanboliang, and @mingyang.
|
||||
|
||||
### Major bug fix: infrequent features should not crash distributed training (#4045)
|
||||
* For infrequently occuring features, some partitions may not get any instance. This scenario used to crash distributed training due to mal-formed ranges. The problem has now been fixed.
|
||||
* In practice, one-hot-encoded categorical variables tend to produce rare features, particularly when the cardinality is high.
|
||||
* Special thanks to @CodingCat.
|
||||
|
||||
### Performance improvements
|
||||
* Faster, more space-efficient radix sorting in `gpu_hist` (#3895)
|
||||
* Subtraction trick in histogram calculation in `gpu_hist` (#3945)
|
||||
* More performant re-partition in XGBoost4J-Spark (#4049)
|
||||
|
||||
### Bug-fixes
|
||||
* Fix semantics of `gpu_id` when running multiple XGBoost processes on a multi-GPU machine (#3851)
|
||||
* Fix page storage path for external memory on Windows (#3869)
|
||||
* Fix configuration setup so that DART utilizes GPU (#4024)
|
||||
* Eliminate NAN values from SHAP prediction (#3943)
|
||||
* Prevent empty quantile sketches in `hist` (#4155)
|
||||
* Enable running objectives with 0 GPU (#3878)
|
||||
* Parameters are no longer dependent on system locale (#3891, #3907)
|
||||
* Use consistent data type in the GPU coordinate descent code (#3917)
|
||||
* Remove undefined behavior in the CLI config parser on the ARM platform (#3976)
|
||||
* Initialize counters in GPU AllReduce (#3987)
|
||||
* Prevent deadlocks in GPU AllReduce (#4113)
|
||||
* Load correct values from sliced NumPy arrays (#4147, #4165)
|
||||
* Fix incorrect GPU device selection (#4161)
|
||||
* Make feature binning logic in `hist` aware of query groups when running a ranking task (#4115). For ranking task, query groups are weighted, not individual instances.
|
||||
* Generate correct C++ exception type for `LOG(FATAL)` macro (#4159)
|
||||
* Python package
|
||||
- Python package should run on system without `PATH` environment variable (#3845)
|
||||
- Fix `coef_` and `intercept_` signature to be compatible with `sklearn.RFECV` (#3873)
|
||||
- Use UTF-8 encoding in Python package README, to support non-English locale (#3867)
|
||||
- Add AUC-PR to list of metrics to maximize for early stopping (#3936)
|
||||
- Allow loading pickles without `self.booster` attribute, for backward compatibility (#3938, #3944)
|
||||
- White-list DART for feature importances (#4073)
|
||||
- Update usage of [h2oai/datatable](https://github.com/h2oai/datatable) (#4123)
|
||||
* XGBoost4J-Spark
|
||||
- Address scalability issue in prediction (#4033)
|
||||
- Enforce the use of per-group weights for ranking task (#4118)
|
||||
- Fix vector size of `rawPredictionCol` in `XGBoostClassificationModel` (#3932)
|
||||
- More robust error handling in Spark tracker (#4046, #4108)
|
||||
- Fix return type of `setEvalSets` (#4105)
|
||||
- Return correct value of `getMaxLeaves` (#4114)
|
||||
|
||||
### API changes
|
||||
* Add experimental parameter `single_precision_histogram` to use single-precision histograms for the `gpu_hist` algorithm (#3965)
|
||||
* Python package
|
||||
- Add option to select type of feature importances in the scikit-learn inferface (#3876)
|
||||
- Add `trees_to_df()` method to dump decision trees as Pandas data frame (#4153)
|
||||
- Add options to control node shapes in the GraphViz plotting function (#3859)
|
||||
- Add `xgb_model` option to `XGBClassifier`, to load previously saved model (#4092)
|
||||
- Passing lists into `DMatrix` is now deprecated (#3970)
|
||||
* XGBoost4J
|
||||
- Support multiple feature importance features (#3801)
|
||||
|
||||
### Maintenance: Refactor C++ code for legibility and maintainability
|
||||
* Refactor `hist` algorithm code and add unit tests (#3836)
|
||||
* Minor refactoring of split evaluator in `gpu_hist` (#3889)
|
||||
* Removed unused leaf vector field in the tree model (#3989)
|
||||
* Simplify the tree representation by combining `TreeModel` and `RegTree` classes (#3995)
|
||||
* Simplify and harden tree expansion code (#4008, #4015)
|
||||
* De-duplicate parameter classes in the linear model algorithms (#4013)
|
||||
* Robust handling of ranges with C++20 span in `gpu_exact` and `gpu_coord_descent` (#4020, #4029)
|
||||
* Simplify tree training code (#3825). Also use Span class for robust handling of ranges.
|
||||
|
||||
### Maintenance: testing, continuous integration, build system
|
||||
* Disallow `std::regex` since it's not supported by GCC 4.8.x (#3870)
|
||||
* Add multi-GPU tests for coordinate descent algorithm for linear models (#3893, #3974)
|
||||
* Enforce naming style in Python lint (#3896)
|
||||
* Refactor Python tests (#3897, #3901): Use pytest exclusively, display full trace upon failure
|
||||
* Address `DeprecationWarning` when using Python collections (#3909)
|
||||
* Use correct group for maven site plugin (#3937)
|
||||
* Jenkins CI is now using on-demand EC2 instances exclusively, due to unreliability of Spot instances (#3948)
|
||||
* Better GPU performance logging (#3945)
|
||||
* Fix GPU tests on machines with only 1 GPU (#4053)
|
||||
* Eliminate CRAN check warnings and notes (#3988)
|
||||
* Add unit tests for tree serialization (#3989)
|
||||
* Add unit tests for tree fitting functions in `hist` (#4155)
|
||||
* Add a unit test for `gpu_exact` algorithm (#4020)
|
||||
* Correct JVM CMake GPU flag (#4071)
|
||||
* Fix failing Travis CI on Mac (#4086)
|
||||
* Speed up Jenkins by not compiling CMake (#4099)
|
||||
* Analyze C++ and CUDA code using clang-tidy, as part of Jenkins CI pipeline (#4034)
|
||||
* Fix broken R test: Install Homebrew GCC (#4142)
|
||||
* Check for empty datasets in GPU unit tests (#4151)
|
||||
* Fix Windows compilation (#4139)
|
||||
* Comply with latest convention of cpplint (#4157)
|
||||
* Fix a unit test in `gpu_hist` (#4158)
|
||||
* Speed up data generation in Python tests (#4164)
|
||||
|
||||
### Usability Improvements
|
||||
* Add link to [InfoWorld 2019 Technology of the Year Award](https://www.infoworld.com/article/3336072/application-development/infoworlds-2019-technology-of-the-year-award-winners.html) (#4116)
|
||||
* Remove outdated AWS YARN tutorial (#3885)
|
||||
* Document current limitation in number of features (#3886)
|
||||
* Remove unnecessary warning when `gblinear` is selected (#3888)
|
||||
* Document limitation of CSV parser: header not supported (#3934)
|
||||
* Log training parameters in XGBoost4J-Spark (#4091)
|
||||
* Clarify early stopping behavior in the scikit-learn interface (#3967)
|
||||
* Clarify behavior of `max_depth` parameter (#4078)
|
||||
* Revise Python docstrings for ranking task (#4121). In particular, weights must be per-group in learning-to-rank setting.
|
||||
* Document parameter `num_parallel_tree` (#4022)
|
||||
* Add Jenkins status badge (#4090)
|
||||
* Warn users against using internal functions of `Booster` object (#4066)
|
||||
* Reformat `benchmark_tree.py` to comply with Python style convention (#4126)
|
||||
* Clarify a comment in `objectiveTrait` (#4174)
|
||||
* Fix typos and broken links in documentation (#3890, #3872, #3902, #3919, #3975, #4027, #4156, #4167)
|
||||
|
||||
### Acknowledgement
|
||||
**Contributors** (in no particular order): Jiaming Yuan (@trivialfis), Hyunsu Cho (@hcho3), Nan Zhu (@CodingCat), Rory Mitchell (@RAMitchell), Yanbo Liang (@yanboliang), Andy Adinets (@canonizer), Tong He (@hetong007), Yuan Tang (@terrytangyuan)
|
||||
|
||||
**First-time Contributors** (in no particular order): Jelle Zijlstra (@JelleZijlstra), Jiacheng Xu (@jiachengxu), @ajing, Kashif Rasul (@kashif), @theycallhimavi, Joey Gao (@pjgao), Prabakaran Kumaresshan (@nixphix), Huafeng Wang (@huafengw), @lyxthe, Sam Wilkinson (@scwilkinson), Tatsuhito Kato (@stabacov), Shayak Banerjee (@shayakbanerjee), Kodi Arfer (@Kodiologist), @KyleLi1985, Egor Smirnov (@SmirnovEgorRu), @tmitanitky, Pasha Stetsenko (@st-pasha), Kenichi Nagahara (@keni-chi), Abhai Kollara Dilip (@abhaikollara), Patrick Ford (@pford221), @hshujuan, Matthew Jones (@mt-jones), Thejaswi Rao (@teju85), Adam November (@anovember)
|
||||
|
||||
**First-time Reviewers** (in no particular order): Mingyang Hu (@mingyang), Theodore Vasiloudis (@thvasilo), Jakub Troszok (@troszok), Rong Ou (@rongou), @Denisevi4, Matthew Jones (@mt-jones), Jeff Kaplan (@jeffdk)
|
||||
|
||||
## v0.81 (2018.11.04)
|
||||
### New feature: feature interaction constraints
|
||||
* Users are now able to control which features (independent variables) are allowed to interact by specifying feature interaction constraints (#3466).
|
||||
@@ -23,6 +182,10 @@ This file records the changes in xgboost library in reverse chronological order.
|
||||
* Mitigate tracker "thundering herd" issue on large cluster. Add exponential backoff retry when workers connect to tracker.
|
||||
* With this change, we were able to scale to 1.5k executors on a 12 billion row dataset after some tweaks here and there.
|
||||
|
||||
### New feature: Additional objective functions for GPUs
|
||||
* New objective functions ported to GPU: `hinge`, `multi:softmax`, `multi:softprob`, `count:poisson`, `reg:gamma`, `"reg:tweedie`.
|
||||
* With supported objectives, XGBoost will select the correct devices based on your system and `n_gpus` parameter.
|
||||
|
||||
### Major bug fix: learning to rank with XGBoost4J-Spark
|
||||
* Previously, `repartitionForData` would shuffle data and lose ordering necessary for ranking task.
|
||||
* To fix this issue, data points within each RDD partition is explicitly group by their group (query session) IDs (#3654). Also handle empty RDD partition carefully (#3750).
|
||||
@@ -33,6 +196,7 @@ This file records the changes in xgboost library in reverse chronological order.
|
||||
|
||||
### API changes
|
||||
* Column sampling by level (`colsample_bylevel`) is now functional for `hist` algorithm (#3635, #3862)
|
||||
* GPU tag `gpu:` for regression objectives are now deprecated. XGBoost will select the correct devices automatically (#3643)
|
||||
* Add `disable_default_eval_metric` parameter to disable default metric (#3606)
|
||||
* Experimental AVX support for gradient computation is removed (#3752)
|
||||
* XGBoost4J-Spark
|
||||
@@ -159,7 +323,7 @@ This file records the changes in xgboost library in reverse chronological order.
|
||||
### Acknowledgement
|
||||
**Contributors** (in no particular order): Hyunsu Cho (@hcho3), Jiaming Yuan (@trivialfis), Nan Zhu (@CodingCat), Rory Mitchell (@RAMitchell), Andy Adinets (@canonizer), Vadim Khotilovich (@khotilov), Sergei Lebedev (@superbobry)
|
||||
|
||||
**First-time Contributors** (in no particular order): Matthew Tovbin (@tovbinm), Jakob Richter (@jakob-r), Grace Lam (@grace-lam), Grant W Schneider (@grantschneider), Andrew Thia (@BlueTea88), Sergei Chipiga (@schipiga), Joseph Bradley (@jkbradley), Chen Qin (@chenqin), Jerry Lin (@linjer), Dmitriy Rybalko (@rdtft), Michael Mui (@mmui), Takahiro Kojima (@515hikaru), Bruce Zhao (@BruceZhaoR), Wei Tian (@weitian), Saumya Bhatnagar (@Sam1301), Juzer Shakir (@JuzerShakir), Zhao Hang (@cleghom), Jonathan Friedman (@jontonsoup), Bruno Tremblay (@meztez), @Shiki-H, @mrgutkun, @gorogm, @htgeis, @jakehoare, @zengxy, @KOLANICH
|
||||
**First-time Contributors** (in no particular order): Matthew Tovbin (@tovbinm), Jakob Richter (@jakob-r), Grace Lam (@grace-lam), Grant W Schneider (@grantschneider), Andrew Thia (@BlueTea88), Sergei Chipiga (@schipiga), Joseph Bradley (@jkbradley), Chen Qin (@chenqin), Jerry Lin (@linjer), Dmitriy Rybalko (@rdtft), Michael Mui (@mmui), Takahiro Kojima (@515hikaru), Bruce Zhao (@BruceZhaoR), Wei Tian (@weitian), Saumya Bhatnagar (@Sam1301), Juzer Shakir (@JuzerShakir), Zhao Hang (@cleghom), Jonathan Friedman (@jontonsoup), Bruno Tremblay (@meztez), Boris Filippov (@frenzykryger), @Shiki-H, @mrgutkun, @gorogm, @htgeis, @jakehoare, @zengxy, @KOLANICH
|
||||
|
||||
**First-time Reviewers** (in no particular order): Nikita Titov (@StrikerRUS), Xiangrui Meng (@mengxr), Nirmal Borah (@Nirmal-Neel)
|
||||
|
||||
@@ -174,7 +338,7 @@ This file records the changes in xgboost library in reverse chronological order.
|
||||
- Latest master: https://xgboost.readthedocs.io/en/latest
|
||||
- 0.80 stable: https://xgboost.readthedocs.io/en/release_0.80
|
||||
- 0.72 stable: https://xgboost.readthedocs.io/en/release_0.72
|
||||
* Ranking task now uses instance weights (#3379)
|
||||
* Support for per-group weights in ranking objective (#3379)
|
||||
* Fix inaccurate decimal parsing (#3546)
|
||||
* New functionality
|
||||
- Query ID column support in LIBSVM data files (#2749). This is convenient for performing ranking task in distributed setting.
|
||||
@@ -334,7 +498,7 @@ This version is only applicable for the Python package. The content is identical
|
||||
- Compatibility fix for Python 2.6
|
||||
- Call `print_evaluation` callback at last iteration
|
||||
- Use appropriate integer types when calling native code, to prevent truncation and memory error
|
||||
- Fix shared library loading on Mac OS X
|
||||
- Fix shared library loading on Mac OS X
|
||||
* R package:
|
||||
- New parameters:
|
||||
- `silent` in `xgb.DMatrix()`
|
||||
@@ -375,7 +539,7 @@ This version is only applicable for the Python package. The content is identical
|
||||
- Support instance weights
|
||||
- Use `SparkParallelismTracker` to prevent jobs from hanging forever
|
||||
- Expose train-time evaluation metrics via `XGBoostModel.summary`
|
||||
- Option to specify `host-ip` explicitly in the Rabit tracker
|
||||
- Option to specify `host-ip` explicitly in the Rabit tracker
|
||||
* Documentation
|
||||
- Better math notation for gradient boosting
|
||||
- Updated build instructions for Mac OS X
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
#' a tree's median absolute leaf weight changes through the iterations.
|
||||
#'
|
||||
#' This function was inspired by the blog post
|
||||
#' \url{http://aysent.github.io/2015/11/08/random-forest-leaf-visualization.html}.
|
||||
#' \url{https://github.com/aysent/random-forest-leaf-visualization}.
|
||||
#'
|
||||
#' @return
|
||||
#'
|
||||
|
||||
4
R-package/configure
vendored
4
R-package/configure
vendored
@@ -1667,12 +1667,12 @@ OPENMP_CXXFLAGS=""
|
||||
|
||||
if test `uname -s` = "Linux"
|
||||
then
|
||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CFLAGS)"
|
||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CXXFLAGS)"
|
||||
fi
|
||||
|
||||
if test `uname -s` = "Darwin"
|
||||
then
|
||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CFLAGS)"
|
||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CXXFLAGS)"
|
||||
ac_pkg_openmp=no
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether OpenMP will work in a package" >&5
|
||||
$as_echo_n "checking whether OpenMP will work in a package... " >&6; }
|
||||
|
||||
@@ -8,12 +8,12 @@ OPENMP_CXXFLAGS=""
|
||||
|
||||
if test `uname -s` = "Linux"
|
||||
then
|
||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CFLAGS)"
|
||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CXXFLAGS)"
|
||||
fi
|
||||
|
||||
if test `uname -s` = "Darwin"
|
||||
then
|
||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CFLAGS)"
|
||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CXXFLAGS)"
|
||||
ac_pkg_openmp=no
|
||||
AC_MSG_CHECKING([whether OpenMP will work in a package])
|
||||
AC_LANG_CONFTEST(
|
||||
|
||||
@@ -33,7 +33,7 @@ evalerror <- function(preds, dtrain) {
|
||||
return(list(metric = "error", value = err))
|
||||
}
|
||||
|
||||
param <- list(max_depth=2, eta=1, nthread = 2, silent=1,
|
||||
param <- list(max_depth=2, eta=1, nthread = 2, verbosity=0,
|
||||
objective=logregobj, eval_metric=evalerror)
|
||||
print ('start training with user customized objective')
|
||||
# training with customized objective, we can also do step by step training
|
||||
@@ -57,7 +57,7 @@ logregobjattr <- function(preds, dtrain) {
|
||||
hess <- preds * (1 - preds)
|
||||
return(list(grad = grad, hess = hess))
|
||||
}
|
||||
param <- list(max_depth=2, eta=1, nthread = 2, silent=1,
|
||||
param <- list(max_depth=2, eta=1, nthread = 2, verbosity=0,
|
||||
objective=logregobjattr, eval_metric=evalerror)
|
||||
print ('start training with user customized objective, with additional attributes in DMatrix')
|
||||
# training with customized objective, we can also do step by step training
|
||||
|
||||
@@ -7,7 +7,7 @@ dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
# note: for customized objective function, we leave objective as default
|
||||
# note: what we are getting is margin value in prediction
|
||||
# you must know what you are doing
|
||||
param <- list(max_depth=2, eta=1, nthread = 2, silent=1)
|
||||
param <- list(max_depth=2, eta=1, nthread=2, verbosity=0)
|
||||
watchlist <- list(eval = dtest)
|
||||
num_round <- 20
|
||||
# user define objective function, given prediction, return gradient and second order gradient
|
||||
@@ -32,9 +32,9 @@ evalerror <- function(preds, dtrain) {
|
||||
}
|
||||
print ('start training with early Stopping setting')
|
||||
|
||||
bst <- xgb.train(param, dtrain, num_round, watchlist,
|
||||
bst <- xgb.train(param, dtrain, num_round, watchlist,
|
||||
objective = logregobj, eval_metric = evalerror, maximize = FALSE,
|
||||
early_stopping_round = 3)
|
||||
bst <- xgb.cv(param, dtrain, num_round, nfold = 5,
|
||||
bst <- xgb.cv(param, dtrain, num_round, nfold = 5,
|
||||
objective = logregobj, eval_metric = evalerror,
|
||||
maximize = FALSE, early_stopping_rounds = 3)
|
||||
|
||||
@@ -50,7 +50,7 @@ per tree with respect to tree number are created. And \code{which="med.weight"}
|
||||
a tree's median absolute leaf weight changes through the iterations.
|
||||
|
||||
This function was inspired by the blog post
|
||||
\url{http://aysent.github.io/2015/11/08/random-forest-leaf-visualization.html}.
|
||||
\url{https://github.com/aysent/random-forest-leaf-visualization}.
|
||||
}
|
||||
\examples{
|
||||
|
||||
|
||||
@@ -17,8 +17,8 @@ endif
|
||||
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
|
||||
|
||||
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
|
||||
PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ $(SHLIB_PTHREAD_FLAGS)
|
||||
PKG_LIBS = @OPENMP_CXXFLAGS@ $(SHLIB_PTHREAD_FLAGS)
|
||||
PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ -pthread
|
||||
PKG_LIBS = @OPENMP_CXXFLAGS@ -pthread
|
||||
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o\
|
||||
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\
|
||||
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o
|
||||
|
||||
@@ -29,8 +29,8 @@ endif
|
||||
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
|
||||
|
||||
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
|
||||
PKG_CXXFLAGS= $(SHLIB_OPENMP_CFLAGS) $(SHLIB_PTHREAD_FLAGS)
|
||||
PKG_LIBS = $(SHLIB_OPENMP_CFLAGS) $(SHLIB_PTHREAD_FLAGS)
|
||||
PKG_CXXFLAGS= $(SHLIB_OPENMP_CXXFLAGS) $(SHLIB_PTHREAD_FLAGS)
|
||||
PKG_LIBS = $(SHLIB_OPENMP_CXXFLAGS) $(SHLIB_PTHREAD_FLAGS)
|
||||
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o\
|
||||
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\
|
||||
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/* Copyright (c) 2015 by Contributors
|
||||
*
|
||||
*
|
||||
* This file was initially generated using the following R command:
|
||||
* tools::package_native_routine_registration_skeleton('.', con = 'src/init.c', character_only = F)
|
||||
* and edited to conform to xgboost C linter requirements. For details, see
|
||||
@@ -10,7 +10,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <R_ext/Rdynload.h>
|
||||
|
||||
/* FIXME:
|
||||
/* FIXME:
|
||||
Check these declarations against the C/Fortran source code.
|
||||
*/
|
||||
|
||||
@@ -70,7 +70,7 @@ static const R_CallMethodDef CallEntries[] = {
|
||||
|
||||
#if defined(_WIN32)
|
||||
__declspec(dllexport)
|
||||
#endif
|
||||
#endif // defined(_WIN32)
|
||||
void R_init_xgboost(DllInfo *dll) {
|
||||
R_registerRoutines(dll, NULL, CallEntries, NULL, NULL);
|
||||
R_useDynamicSymbols(dll, FALSE);
|
||||
|
||||
@@ -32,7 +32,10 @@ extern "C" {
|
||||
|
||||
namespace xgboost {
|
||||
ConsoleLogger::~ConsoleLogger() {
|
||||
dmlc::CustomLogMessage::Log(log_stream_.str());
|
||||
if (cur_verbosity_ == LogVerbosity::kIgnore ||
|
||||
cur_verbosity_ <= global_verbosity_) {
|
||||
dmlc::CustomLogMessage::Log(log_stream_.str());
|
||||
}
|
||||
}
|
||||
TrackerLogger::~TrackerLogger() {
|
||||
dmlc::CustomLogMessage::Log(log_stream_.str());
|
||||
@@ -46,10 +49,11 @@ namespace common {
|
||||
bool CheckNAN(double v) {
|
||||
return ISNAN(v);
|
||||
}
|
||||
#if !defined(XGBOOST_USE_CUDA)
|
||||
double LogGamma(double v) {
|
||||
return lgammafn(v);
|
||||
}
|
||||
|
||||
#endif // !defined(XGBOOST_USE_CUDA)
|
||||
// customize random engine.
|
||||
void CustomGlobalRandomEngine::seed(CustomGlobalRandomEngine::result_type val) {
|
||||
// ignore the seed
|
||||
|
||||
@@ -81,6 +81,39 @@ test_that("predict feature interactions works", {
|
||||
expect_lt(max(abs(intr - gt_intr)), 0.1)
|
||||
})
|
||||
|
||||
test_that("SHAP contribution values are not NAN", {
|
||||
d <- data.frame(
|
||||
x1 = c(-2.3, 1.4, 5.9, 2, 2.5, 0.3, -3.6, -0.2, 0.5, -2.8, -4.6, 3.3, -1.2,
|
||||
-1.1, -2.3, 0.4, -1.5, -0.2, -1, 3.7),
|
||||
x2 = c(291.179171, 269.198331, 289.942097, 283.191669, 269.673332,
|
||||
294.158346, 287.255835, 291.530838, 285.899586, 269.290833,
|
||||
268.649586, 291.530841, 280.074593, 269.484168, 293.94042,
|
||||
294.327506, 296.20709, 295.441669, 283.16792, 270.227085),
|
||||
y = c(9, 15, 5.7, 9.2, 22.4, 5, 9, 3.2, 7.2, 13.1, 7.8, 16.9, 6.5, 22.1,
|
||||
5.3, 10.4, 11.1, 13.9, 11, 20.5),
|
||||
fold = c(2, 2, 2, 1, 2, 2, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2))
|
||||
|
||||
ivs <- c("x1", "x2")
|
||||
|
||||
fit <- xgboost(
|
||||
verbose = 0,
|
||||
params = list(
|
||||
objective = "reg:linear",
|
||||
eval_metric = "rmse"),
|
||||
data = as.matrix(subset(d, fold == 2)[, ivs]),
|
||||
label = subset(d, fold == 2)$y,
|
||||
nthread = 1,
|
||||
nrounds = 3)
|
||||
|
||||
shaps <- as.data.frame(predict(fit,
|
||||
newdata = as.matrix(subset(d, fold == 1)[, ivs]),
|
||||
predcontrib = T))
|
||||
result <- cbind(shaps, sum = rowSums(shaps), pred = predict(fit,
|
||||
newdata = as.matrix(subset(d, fold == 1)[, ivs])))
|
||||
|
||||
expect_true(identical(TRUE, all.equal(result$sum, result$pred, tol = 1e-6)))
|
||||
})
|
||||
|
||||
|
||||
test_that("multiclass feature interactions work", {
|
||||
dm <- xgb.DMatrix(as.matrix(iris[,-5]), label=as.numeric(iris$Species)-1)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
<img src=https://raw.githubusercontent.com/dmlc/dmlc.github.io/master/img/logo-m/xgboost.png width=135/> eXtreme Gradient Boosting
|
||||
===========
|
||||
[](https://xgboost-ci.net/blue/organizations/jenkins/xgboost/activity)
|
||||
[](https://travis-ci.org/dmlc/xgboost)
|
||||
[](https://ci.appveyor.com/project/tqchen/xgboost)
|
||||
[](https://xgboost.readthedocs.org)
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
#include "../src/tree/tree_model.cc"
|
||||
#include "../src/tree/tree_updater.cc"
|
||||
#include "../src/tree/updater_colmaker.cc"
|
||||
#include "../src/tree/updater_fast_hist.cc"
|
||||
#include "../src/tree/updater_quantile_hist.cc"
|
||||
#include "../src/tree/updater_prune.cc"
|
||||
#include "../src/tree/updater_refresh.cc"
|
||||
#include "../src/tree/updater_sync.cc"
|
||||
|
||||
@@ -44,12 +44,12 @@ install:
|
||||
- set DO_PYTHON=off
|
||||
- if /i "%target%" == "mingw" set DO_PYTHON=on
|
||||
- if /i "%target%_%ver%_%configuration%" == "msvc_2015_Release" set DO_PYTHON=on
|
||||
- if /i "%DO_PYTHON%" == "on" conda install -y numpy scipy pandas matplotlib nose scikit-learn graphviz python-graphviz
|
||||
- if /i "%DO_PYTHON%" == "on" conda install -y numpy scipy pandas matplotlib pytest scikit-learn graphviz python-graphviz
|
||||
# R: based on https://github.com/krlmlr/r-appveyor
|
||||
- ps: |
|
||||
if($env:target -eq 'rmingw' -or $env:target -eq 'rmsvc') {
|
||||
#$ErrorActionPreference = "Stop"
|
||||
Invoke-WebRequest http://raw.github.com/krlmlr/r-appveyor/master/scripts/appveyor-tool.ps1 -OutFile "$Env:TEMP\appveyor-tool.ps1"
|
||||
Invoke-WebRequest https://raw.githubusercontent.com/krlmlr/r-appveyor/master/scripts/appveyor-tool.ps1 -OutFile "$Env:TEMP\appveyor-tool.ps1"
|
||||
Import-Module "$Env:TEMP\appveyor-tool.ps1"
|
||||
Bootstrap
|
||||
$DEPS = "c('data.table','magrittr','stringi','ggplot2','DiagrammeR','Ckmeans.1d.dp','vcd','testthat','lintr','knitr','rmarkdown')"
|
||||
@@ -96,7 +96,7 @@ build_script:
|
||||
|
||||
test_script:
|
||||
- cd %APPVEYOR_BUILD_FOLDER%
|
||||
- if /i "%DO_PYTHON%" == "on" python -m nose tests/python
|
||||
- if /i "%DO_PYTHON%" == "on" python -m pytest tests/python
|
||||
# mingw R package: run the R check (which includes unit tests), and also keep the built binary package
|
||||
- if /i "%target%" == "rmingw" (
|
||||
set _R_CHECK_CRAN_INCOMING_=FALSE&&
|
||||
|
||||
11
cmake/build_config.h.in
Normal file
11
cmake/build_config.h.in
Normal file
@@ -0,0 +1,11 @@
|
||||
/*!
|
||||
* Copyright 2019 by Contributors
|
||||
* \file build_config.h
|
||||
*/
|
||||
#ifndef XGBOOST_BUILD_CONFIG_H_
|
||||
#define XGBOOST_BUILD_CONFIG_H_
|
||||
|
||||
#cmakedefine XGBOOST_MM_PREFETCH_PRESENT
|
||||
#cmakedefine XGBOOST_BUILTIN_PREFETCH_PRESENT
|
||||
|
||||
#endif // XGBOOST_BUILD_CONFIG_H_
|
||||
@@ -135,6 +135,7 @@ Send a PR to add a one sentence description:)
|
||||
|
||||
## Awards
|
||||
- [John Chambers Award](http://stat-computing.org/awards/jmc/winners.html) - 2016 Winner: XGBoost R Package, by Tong He (Simon Fraser University) and Tianqi Chen (University of Washington)
|
||||
- [InfoWorld’s 2019 Technology of the Year Award](https://www.infoworld.com/article/3336072/application-development/infoworlds-2019-technology-of-the-year-award-winners.html)
|
||||
|
||||
## Windows Binaries
|
||||
Unofficial windows binaries and instructions on how to use them are hosted on [Guido Tapia's blog](http://www.picnet.com.au/blogs/guido/post/2016/09/22/xgboost-windows-x64-binaries-for-download/)
|
||||
|
||||
@@ -62,7 +62,7 @@ test:data = "agaricus.txt.test"
|
||||
We use the tree booster and logistic regression objective in our setting. This indicates that we accomplish our task using classic gradient boosting regression tree(GBRT), which is a promising method for binary classification.
|
||||
|
||||
The parameters shown in the example gives the most common ones that are needed to use xgboost.
|
||||
If you are interested in more parameter settings, the complete parameter settings and detailed descriptions are [here](../../doc/parameter.md). Besides putting the parameters in the configuration file, we can set them by passing them as arguments as below:
|
||||
If you are interested in more parameter settings, the complete parameter settings and detailed descriptions are [here](../../doc/parameter.rst). Besides putting the parameters in the configuration file, we can set them by passing them as arguments as below:
|
||||
|
||||
```
|
||||
../../xgboost mushroom.conf max_depth=6
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
Benckmark for Otto Group Competition
|
||||
Benchmark for Otto Group Competition
|
||||
=========
|
||||
|
||||
This is a folder containing the benchmark for the [Otto Group Competition on Kaggle](http://www.kaggle.com/c/otto-group-product-classification-challenge).
|
||||
@@ -20,5 +20,3 @@ devtools::install_github('tqchen/xgboost',subdir='R-package')
|
||||
```
|
||||
|
||||
Windows users may need to install [RTools](http://cran.r-project.org/bin/windows/Rtools/) first.
|
||||
|
||||
|
||||
|
||||
Submodule dmlc-core updated: 4d49691f1a...ac983092ee
@@ -176,7 +176,7 @@ In a *sparse* matrix, cells containing `0` are not stored in memory. Therefore,
|
||||
We will train decision tree model using the following parameters:
|
||||
|
||||
* `objective = "binary:logistic"`: we will train a binary classification model ;
|
||||
* `max.deph = 2`: the trees won't be deep, because our case is very simple ;
|
||||
* `max.depth = 2`: the trees won't be deep, because our case is very simple ;
|
||||
* `nthread = 2`: the number of cpu threads we are going to use;
|
||||
* `nrounds = 2`: there will be two passes on the data, the second one will enhance the model by further reducing the difference between ground truth and prediction.
|
||||
|
||||
@@ -576,8 +576,8 @@ print(class(rawVec))
|
||||
bst3 <- xgb.load(rawVec)
|
||||
pred3 <- predict(bst3, test$data)
|
||||
|
||||
# pred2 should be identical to pred
|
||||
print(paste("sum(abs(pred3-pred))=", sum(abs(pred2-pred))))
|
||||
# pred3 should be identical to pred
|
||||
print(paste("sum(abs(pred3-pred))=", sum(abs(pred3-pred))))
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
@@ -19,6 +19,7 @@ Everyone is more than welcome to contribute. It is a way to make the project bet
|
||||
* `Documents`_
|
||||
* `Testcases`_
|
||||
* `Sanitizers`_
|
||||
* `clang-tidy`_
|
||||
* `Examples`_
|
||||
* `Core Library`_
|
||||
* `Python Package`_
|
||||
@@ -169,6 +170,31 @@ environment variable:
|
||||
|
||||
For details, please consult `official documentation <https://github.com/google/sanitizers/wiki>`_ for sanitizers.
|
||||
|
||||
**********
|
||||
clang-tidy
|
||||
**********
|
||||
To run clang-tidy on both C++ and CUDA source code, run the following command
|
||||
from the top level source tree:
|
||||
|
||||
.. code-black:: bash
|
||||
cd /path/to/xgboost/
|
||||
python3 tests/ci_build/tidy.py --gtest-path=/path/to/google-test
|
||||
|
||||
The script requires the full path of Google Test library via the ``--gtest-path`` argument.
|
||||
|
||||
Also, the script accepts two optional integer arguments, namely ``--cpp`` and ``--cuda``.
|
||||
By default they are both set to 1. If you want to exclude CUDA source from
|
||||
linting, use:
|
||||
|
||||
.. code-black:: bash
|
||||
cd /path/to/xgboost/
|
||||
python3 tests/ci_build/tidy.py --cuda=0
|
||||
|
||||
Similarly, if you want to exclude C++ source from linting:
|
||||
|
||||
.. code-black:: bash
|
||||
cd /path/to/xgboost/
|
||||
python3 tests/ci_build/tidy.py --cpp=0
|
||||
|
||||
********
|
||||
Examples
|
||||
|
||||
@@ -18,7 +18,7 @@ Tree construction (training) and prediction can be accelerated with CUDA-capable
|
||||
|
||||
Usage
|
||||
=====
|
||||
Specify the ``tree_method`` parameter as one of the following algorithms.
|
||||
Specify the ``tree_method`` parameter as one of the following algorithms.
|
||||
|
||||
Algorithms
|
||||
----------
|
||||
@@ -31,39 +31,43 @@ Algorithms
|
||||
| gpu_hist | Equivalent to the XGBoost fast histogram algorithm. Much faster and uses considerably less memory. NOTE: Will run very slowly on GPUs older than Pascal architecture. |
|
||||
+--------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
Supported parameters
|
||||
Supported parameters
|
||||
--------------------
|
||||
|
||||
.. |tick| unicode:: U+2714
|
||||
.. |cross| unicode:: U+2718
|
||||
.. |tick| unicode:: U+2714
|
||||
.. |cross| unicode:: U+2718
|
||||
|
||||
+--------------------------+---------------+--------------+
|
||||
| parameter | ``gpu_exact`` | ``gpu_hist`` |
|
||||
+==========================+===============+==============+
|
||||
| ``subsample`` | |cross| | |tick| |
|
||||
+--------------------------+---------------+--------------+
|
||||
| ``colsample_bytree`` | |cross| | |tick| |
|
||||
+--------------------------+---------------+--------------+
|
||||
| ``colsample_bylevel`` | |cross| | |tick| |
|
||||
+--------------------------+---------------+--------------+
|
||||
| ``max_bin`` | |cross| | |tick| |
|
||||
+--------------------------+---------------+--------------+
|
||||
| ``gpu_id`` | |tick| | |tick| |
|
||||
+--------------------------+---------------+--------------+
|
||||
| ``n_gpus`` | |cross| | |tick| |
|
||||
+--------------------------+---------------+--------------+
|
||||
| ``predictor`` | |tick| | |tick| |
|
||||
+--------------------------+---------------+--------------+
|
||||
| ``grow_policy`` | |cross| | |tick| |
|
||||
+--------------------------+---------------+--------------+
|
||||
| ``monotone_constraints`` | |cross| | |tick| |
|
||||
+--------------------------+---------------+--------------+
|
||||
+--------------------------------+---------------+--------------+
|
||||
| parameter | ``gpu_exact`` | ``gpu_hist`` |
|
||||
+================================+===============+==============+
|
||||
| ``subsample`` | |cross| | |tick| |
|
||||
+--------------------------------+---------------+--------------+
|
||||
| ``colsample_bytree`` | |cross| | |tick| |
|
||||
+--------------------------------+---------------+--------------+
|
||||
| ``colsample_bylevel`` | |cross| | |tick| |
|
||||
+--------------------------------+---------------+--------------+
|
||||
| ``max_bin`` | |cross| | |tick| |
|
||||
+--------------------------------+---------------+--------------+
|
||||
| ``gpu_id`` | |tick| | |tick| |
|
||||
+--------------------------------+---------------+--------------+
|
||||
| ``n_gpus`` | |cross| | |tick| |
|
||||
+--------------------------------+---------------+--------------+
|
||||
| ``predictor`` | |tick| | |tick| |
|
||||
+--------------------------------+---------------+--------------+
|
||||
| ``grow_policy`` | |cross| | |tick| |
|
||||
+--------------------------------+---------------+--------------+
|
||||
| ``monotone_constraints`` | |cross| | |tick| |
|
||||
+--------------------------------+---------------+--------------+
|
||||
| ``single_precision_histogram`` | |cross| | |tick| |
|
||||
+--------------------------------+---------------+--------------+
|
||||
|
||||
GPU accelerated prediction is enabled by default for the above mentioned ``tree_method`` parameters but can be switched to CPU prediction by setting ``predictor`` to ``cpu_predictor``. This could be useful if you want to conserve GPU memory. Likewise when using CPU algorithms, GPU accelerated prediction can be enabled by setting ``predictor`` to ``gpu_predictor``.
|
||||
|
||||
The experimental parameter ``single_precision_histogram`` can be set to True to enable building histograms using single precision. This may improve speed, in particular on older architectures.
|
||||
|
||||
The device ordinal can be selected using the ``gpu_id`` parameter, which defaults to 0.
|
||||
|
||||
Multiple GPUs can be used with the ``gpu_hist`` tree method using the ``n_gpus`` parameter. which defaults to 1. If this is set to -1 all available GPUs will be used. If ``gpu_id`` is specified as non-zero, the gpu device order is ``mod(gpu_id + i) % n_visible_devices`` for ``i=0`` to ``n_gpus-1``. As with GPU vs. CPU, multi-GPU will not always be faster than a single GPU due to PCI bus bandwidth that can limit performance.
|
||||
Multiple GPUs can be used with the ``gpu_hist`` tree method using the ``n_gpus`` parameter. which defaults to 1. If this is set to -1 all available GPUs will be used. If ``gpu_id`` is specified as non-zero, the selected gpu devices will be from ``gpu_id`` to ``gpu_id+n_gpus``, please note that ``gpu_id+n_gpus`` must be less than or equal to the number of available GPUs on your system. As with GPU vs. CPU, multi-GPU will not always be faster than a single GPU due to PCI bus bandwidth that can limit performance.
|
||||
|
||||
.. note:: Enabling multi-GPU training
|
||||
|
||||
@@ -78,6 +82,95 @@ The GPU algorithms currently work with CLI, Python and R packages. See :doc:`/bu
|
||||
param['max_bin'] = 16
|
||||
param['tree_method'] = 'gpu_hist'
|
||||
|
||||
Objective functions
|
||||
===================
|
||||
Most of the objective functions implemented in XGBoost can be run on GPU. Following table shows current support status.
|
||||
|
||||
.. |tick| unicode:: U+2714
|
||||
.. |cross| unicode:: U+2718
|
||||
|
||||
+-----------------+-------------+
|
||||
| Objectives | GPU support |
|
||||
+-----------------+-------------+
|
||||
| reg:linear | |tick| |
|
||||
+-----------------+-------------+
|
||||
| reg:logistic | |tick| |
|
||||
+-----------------+-------------+
|
||||
| binary:logistic | |tick| |
|
||||
+-----------------+-------------+
|
||||
| binary:logitraw | |tick| |
|
||||
+-----------------+-------------+
|
||||
| binary:hinge | |tick| |
|
||||
+-----------------+-------------+
|
||||
| count:poisson | |tick| |
|
||||
+-----------------+-------------+
|
||||
| reg:gamma | |tick| |
|
||||
+-----------------+-------------+
|
||||
| reg:tweedie | |tick| |
|
||||
+-----------------+-------------+
|
||||
| multi:softmax | |tick| |
|
||||
+-----------------+-------------+
|
||||
| multi:softprob | |tick| |
|
||||
+-----------------+-------------+
|
||||
| survival:cox | |cross| |
|
||||
+-----------------+-------------+
|
||||
| rank:pairwise | |cross| |
|
||||
+-----------------+-------------+
|
||||
| rank:ndcg | |cross| |
|
||||
+-----------------+-------------+
|
||||
| rank:map | |cross| |
|
||||
+-----------------+-------------+
|
||||
|
||||
For multi-gpu support, objective functions also honor the ``n_gpus`` parameter,
|
||||
which, by default is set to 1. To disable running objectives on GPU, just set
|
||||
``n_gpus`` to 0.
|
||||
|
||||
Metric functions
|
||||
===================
|
||||
Following table shows current support status for evaluation metrics on the GPU.
|
||||
|
||||
.. |tick| unicode:: U+2714
|
||||
.. |cross| unicode:: U+2718
|
||||
|
||||
+-----------------+-------------+
|
||||
| Metric | GPU Support |
|
||||
+=================+=============+
|
||||
| rmse | |tick| |
|
||||
+-----------------+-------------+
|
||||
| mae | |tick| |
|
||||
+-----------------+-------------+
|
||||
| logloss | |tick| |
|
||||
+-----------------+-------------+
|
||||
| error | |tick| |
|
||||
+-----------------+-------------+
|
||||
| merror | |cross| |
|
||||
+-----------------+-------------+
|
||||
| mlogloss | |cross| |
|
||||
+-----------------+-------------+
|
||||
| auc | |cross| |
|
||||
+-----------------+-------------+
|
||||
| aucpr | |cross| |
|
||||
+-----------------+-------------+
|
||||
| ndcg | |cross| |
|
||||
+-----------------+-------------+
|
||||
| map | |cross| |
|
||||
+-----------------+-------------+
|
||||
| poisson-nloglik | |tick| |
|
||||
+-----------------+-------------+
|
||||
| gamma-nloglik | |tick| |
|
||||
+-----------------+-------------+
|
||||
| cox-nloglik | |cross| |
|
||||
+-----------------+-------------+
|
||||
| gamma-deviance | |tick| |
|
||||
+-----------------+-------------+
|
||||
| tweedie-nloglik | |tick| |
|
||||
+-----------------+-------------+
|
||||
|
||||
As for objective functions, metrics honor the ``n_gpus`` parameter,
|
||||
which, by default is set to 1. To disable running metrics on GPU, just set
|
||||
``n_gpus`` to 0.
|
||||
|
||||
|
||||
Benchmarks
|
||||
==========
|
||||
You can run benchmarks on synthetic data for binary classification:
|
||||
@@ -109,13 +202,16 @@ References
|
||||
|
||||
`Nvidia Parallel Forall: Gradient Boosting, Decision Trees and XGBoost with CUDA <https://devblogs.nvidia.com/parallelforall/gradient-boosting-decision-trees-xgboost-cuda/>`_
|
||||
|
||||
Authors
|
||||
Contributors
|
||||
=======
|
||||
* Rory Mitchell
|
||||
Many thanks to the following contributors (alphabetical order):
|
||||
* Andrey Adinets
|
||||
* Jiaming Yuan
|
||||
* Jonathan C. McKinney
|
||||
* Matthew Jones
|
||||
* Philip Cho
|
||||
* Rory Mitchell
|
||||
* Shankara Rao Thejaswi Nanditale
|
||||
* Vinay Deshpande
|
||||
* ... and the rest of the H2O.ai and NVIDIA team.
|
||||
|
||||
Please report bugs to the user forum https://discuss.xgboost.ai/.
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ and then refer to the snapshot dependency by adding:
|
||||
|
||||
<dependency>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost4j</artifactId>
|
||||
<artifactId>xgboost4j-spark</artifactId>
|
||||
<version>next_version_num-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
@@ -194,11 +194,16 @@ After we set XGBoostClassifier parameters and feature/label column, we can build
|
||||
Early Stopping
|
||||
----------------
|
||||
|
||||
Early stopping is a feature to prevent the unnecessary training iterations. By specifying ``num_early_stopping_rounds`` or directly call ``setNumEarlyStoppingRounds`` over a XGBoostClassifier or XGBoostRegressor, we can define number of rounds for the evaluation metric going to the unexpected direction to tolerate before stopping the training.
|
||||
Early stopping is a feature to prevent the unnecessary training iterations. By specifying ``num_early_stopping_rounds`` or directly call ``setNumEarlyStoppingRounds`` over a XGBoostClassifier or XGBoostRegressor, we can define number of rounds if the evaluation metric going away from the best iteration and early stop training iterations.
|
||||
|
||||
In additional to ``num_early_stopping_rounds``, you also need to define ``maximize_evaluation_metrics`` or call ``setMaximizeEvaluationMetrics`` to specify whether you want to maximize or minimize the metrics in training.
|
||||
|
||||
After specifying these two parameters, the training would stop when the metrics goes to the other direction against the one specified by ``maximize_evaluation_metrics`` for ``num_early_stopping_rounds`` iterations.
|
||||
For example, we need to maximize the evaluation metrics (set ``maximize_evaluation_metrics`` with true), and set ``num_early_stopping_rounds`` with 5. The evaluation metric of 10th iteration is the maximum one until now. In the following iterations, if there is no evaluation metric greater than the 10th iteration's (best one), the traning would be early stopped at 15th iteration.
|
||||
|
||||
Training with Evaluation Sets
|
||||
----------------
|
||||
|
||||
You can also monitor the performance of the model during training with multiple evaluation datasets. By specifying ``eval_sets`` or call ``setEvalSets`` over a XGBoostClassifier or XGBoostRegressor, you can pass in multiple evaluation datasets typed as a Map from String to DataFrame.
|
||||
|
||||
Prediction
|
||||
==========
|
||||
|
||||
@@ -23,9 +23,16 @@ General Parameters
|
||||
|
||||
- Which booster to use. Can be ``gbtree``, ``gblinear`` or ``dart``; ``gbtree`` and ``dart`` use tree based models while ``gblinear`` uses linear functions.
|
||||
|
||||
* ``silent`` [default=0]
|
||||
* ``silent`` [default=0] [Deprecated]
|
||||
|
||||
- 0 means printing running messages, 1 means silent mode
|
||||
- Deprecated. Please use ``verbosity`` instead.
|
||||
|
||||
* ``verbosity`` [default=1]
|
||||
|
||||
- Verbosity of printing messages. Valid values are 0 (silent),
|
||||
1 (warning), 2 (info), 3 (debug). Sometimes XGBoost tries to change
|
||||
configurations based on heuristics, which is displayed as warning message.
|
||||
If there's unexpected behaviour, please try to increase value of verbosity.
|
||||
|
||||
* ``nthread`` [default to maximum number of threads available if not set]
|
||||
|
||||
@@ -57,8 +64,8 @@ Parameters for Tree Booster
|
||||
|
||||
* ``max_depth`` [default=6]
|
||||
|
||||
- Maximum depth of a tree. Increasing this value will make the model more complex and more likely to overfit. 0 indicates no limit. Note that limit is required when ``grow_policy`` is set of ``depthwise``.
|
||||
- range: [0,∞]
|
||||
- Maximum depth of a tree. Increasing this value will make the model more complex and more likely to overfit. 0 is only accepted in ``lossguided`` growing policy when tree_method is set as ``hist`` and it indicates no limit on depth. Beware that XGBoost aggressively consumes memory when training a deep tree.
|
||||
- range: [0,∞] (0 is only accepted in ``lossguided`` growing policy when tree_method is set as ``hist``)
|
||||
|
||||
* ``min_child_weight`` [default=1]
|
||||
|
||||
@@ -75,15 +82,22 @@ Parameters for Tree Booster
|
||||
- Subsample ratio of the training instances. Setting it to 0.5 means that XGBoost would randomly sample half of the training data prior to growing trees. and this will prevent overfitting. Subsampling will occur once in every boosting iteration.
|
||||
- range: (0,1]
|
||||
|
||||
* ``colsample_bytree`` [default=1]
|
||||
|
||||
- Subsample ratio of columns when constructing each tree. Subsampling will occur once in every boosting iteration.
|
||||
- range: (0,1]
|
||||
|
||||
* ``colsample_bylevel`` [default=1]
|
||||
|
||||
- Subsample ratio of columns for each split, in each level. Subsampling will occur each time a new split is made.
|
||||
- range: (0,1]
|
||||
* ``colsample_bytree``, ``colsample_bylevel``, ``colsample_bynode`` [default=1]
|
||||
- This is a family of parameters for subsampling of columns.
|
||||
- All ``colsample_by*`` parameters have a range of (0, 1], the default value of 1, and
|
||||
specify the fraction of columns to be subsampled.
|
||||
- ``colsample_bytree`` is the subsample ratio of columns when constructing each
|
||||
tree. Subsampling occurs once for every tree constructed.
|
||||
- ``colsample_bylevel`` is the subsample ratio of columns for each level. Subsampling
|
||||
occurs once for every new depth level reached in a tree. Columns are subsampled from
|
||||
the set of columns chosen for the current tree.
|
||||
- ``colsample_bynode`` is the subsample ratio of columns for each node
|
||||
(split). Subsampling occurs once every time a new split is evaluated. Columns are
|
||||
subsampled from the set of columns chosen for the current level.
|
||||
- ``colsample_by*`` parameters work cumulatively. For instance,
|
||||
the combination ``{'colsample_bytree':0.5, 'colsample_bylevel':0.5,
|
||||
'colsample_bynode':0.5}`` with 64 features will leave 4 features to choose from at
|
||||
each split.
|
||||
|
||||
* ``lambda`` [default=1, alias: ``reg_lambda``]
|
||||
|
||||
@@ -96,7 +110,7 @@ Parameters for Tree Booster
|
||||
* ``tree_method`` string [default= ``auto``]
|
||||
|
||||
- The tree construction algorithm used in XGBoost. See description in the `reference paper <http://arxiv.org/abs/1603.02754>`_.
|
||||
- Distributed and external memory version only support ``tree_method=approx``.
|
||||
- XGBoost supports ``hist`` and ``approx`` for distributed training and only support ``approx`` for external memory version.
|
||||
- Choices: ``auto``, ``exact``, ``approx``, ``hist``, ``gpu_exact``, ``gpu_hist``
|
||||
|
||||
- ``auto``: Use heuristic to choose the fastest method.
|
||||
@@ -138,7 +152,7 @@ Parameters for Tree Booster
|
||||
- ``refresh``: refreshes tree's statistics and/or leaf values based on the current data. Note that no random subsampling of data rows is performed.
|
||||
- ``prune``: prunes the splits where loss < min_split_loss (or gamma).
|
||||
|
||||
- In a distributed setting, the implicit updater sequence value would be adjusted to ``grow_histmaker,prune``.
|
||||
- In a distributed setting, the implicit updater sequence value would be adjusted to ``grow_histmaker,prune`` by default, and you can set ``tree_method`` as ``hist`` to use ``grow_histmaker``.
|
||||
|
||||
* ``refresh_leaf`` [default=1]
|
||||
|
||||
@@ -178,6 +192,10 @@ Parameters for Tree Booster
|
||||
- ``cpu_predictor``: Multicore CPU prediction algorithm.
|
||||
- ``gpu_predictor``: Prediction using GPU. Default when ``tree_method`` is ``gpu_exact`` or ``gpu_hist``.
|
||||
|
||||
* ``num_parallel_tree``, [default=1]
|
||||
- Number of parallel trees constructed during each iteration. This
|
||||
option is used to support boosted random forest
|
||||
|
||||
Additional parameters for Dart Booster (``booster=dart``)
|
||||
=========================================================
|
||||
|
||||
@@ -245,8 +263,8 @@ Parameters for Linear Booster (``booster=gblinear``)
|
||||
|
||||
- Choice of algorithm to fit linear model
|
||||
|
||||
- ``shotgun``: Parallel coordinate descent algorithm based on shotgun algorithm. Uses 'hogwild' parallelism and therefore produces a nondeterministic solution on each run.
|
||||
- ``coord_descent``: Ordinary coordinate descent algorithm. Also multithreaded but still produces a deterministic solution.
|
||||
- ``shotgun``: Parallel coordinate descent algorithm based on shotgun algorithm. Uses 'hogwild' parallelism and therefore produces a nondeterministic solution on each run.
|
||||
- ``coord_descent``: Ordinary coordinate descent algorithm. Also multithreaded but still produces a deterministic solution.
|
||||
|
||||
* ``feature_selector`` [default= ``cyclic``]
|
||||
|
||||
@@ -283,9 +301,6 @@ Specify the learning task and the corresponding learning objective. The objectiv
|
||||
- ``binary:logistic``: logistic regression for binary classification, output probability
|
||||
- ``binary:logitraw``: logistic regression for binary classification, output score before logistic transformation
|
||||
- ``binary:hinge``: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
|
||||
- ``gpu:reg:linear``, ``gpu:reg:logistic``, ``gpu:binary:logistic``, ``gpu:binary:logitraw``: versions
|
||||
of the corresponding objective functions evaluated on the GPU; note that like the GPU histogram algorithm,
|
||||
they can only be used when the entire training session uses the same dataset
|
||||
- ``count:poisson`` --poisson regression for count data, output mean of poisson distribution
|
||||
|
||||
- ``max_delta_step`` is set to 0.7 by default in poisson regression (used to safeguard optimization)
|
||||
|
||||
@@ -48,9 +48,15 @@ The data is stored in a :py:class:`DMatrix <xgboost.DMatrix>` object.
|
||||
dtrain = xgb.DMatrix('train.csv?format=csv&label_column=0')
|
||||
dtest = xgb.DMatrix('test.csv?format=csv&label_column=0')
|
||||
|
||||
(Note that XGBoost does not support categorical features; if your data contains
|
||||
categorical features, load it as a NumPy array first and then perform
|
||||
`one-hot encoding <http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html>`_.)
|
||||
.. note:: Categorical features not supported
|
||||
|
||||
Note that XGBoost does not support categorical features; if your data contains
|
||||
categorical features, load it as a NumPy array first and then perform
|
||||
`one-hot encoding <http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html>`_.
|
||||
|
||||
.. note:: Use Pandas to load CSV files with headers
|
||||
|
||||
Currently, the DMLC data parser cannot parse CSV files with headers. Use Pandas (see below) to read CSV files with headers.
|
||||
|
||||
* To load a NumPy array into :py:class:`DMatrix <xgboost.DMatrix>`:
|
||||
|
||||
@@ -95,6 +101,10 @@ The data is stored in a :py:class:`DMatrix <xgboost.DMatrix>` object.
|
||||
w = np.random.rand(5, 1)
|
||||
dtrain = xgb.DMatrix(data, label=label, missing=-999.0, weight=w)
|
||||
|
||||
When performing ranking tasks, the number of weights should be equal
|
||||
to number of groups.
|
||||
|
||||
|
||||
Setting Parameters
|
||||
------------------
|
||||
XGBoost can use either a list of pairs or a dictionary to set :doc:`parameters </parameter>`. For instance:
|
||||
@@ -155,6 +165,10 @@ A saved model can be loaded as follows:
|
||||
bst = xgb.Booster({'nthread': 4}) # init model
|
||||
bst.load_model('model.bin') # load data
|
||||
|
||||
Methods including `update` and `boost` from `xgboost.Booster` are designed for
|
||||
internal usage only. The wrapper function `xgboost.train` does some
|
||||
pre-configuration including setting up caches and some other parameters.
|
||||
|
||||
Early Stopping
|
||||
--------------
|
||||
If you have a validation set, you can use early stopping to find the optimal number of boosting rounds.
|
||||
@@ -209,4 +223,3 @@ When you use ``IPython``, you can use the :py:meth:`xgboost.to_graphviz` functio
|
||||
.. code-block:: python
|
||||
|
||||
xgb.to_graphviz(bst, num_trees=2)
|
||||
|
||||
|
||||
@@ -1,216 +1,8 @@
|
||||
###############################
|
||||
Distributed XGBoost YARN on AWS
|
||||
###############################
|
||||
This is a step-by-step tutorial on how to setup and run distributed `XGBoost <https://github.com/dmlc/xgboost>`_
|
||||
on an AWS EC2 cluster. Distributed XGBoost runs on various platforms such as MPI, SGE and Hadoop YARN.
|
||||
In this tutorial, we use YARN as an example since this is a widely used solution for distributed computing.
|
||||
[This page is under construction.]
|
||||
|
||||
.. note:: XGBoost with Spark
|
||||
|
||||
If you are preprocessing training data with Spark, consider using :doc:`XGBoost4J-Spark </jvm/xgboost4j_spark_tutorial>`.
|
||||
|
||||
************
|
||||
Prerequisite
|
||||
************
|
||||
We need to get a `AWS key-pair <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html>`_
|
||||
to access the AWS services. Let us assume that we are using a key ``mykey`` and the corresponding permission file ``mypem.pem``.
|
||||
|
||||
We also need `AWS credentials <https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html>`_,
|
||||
which includes an ``ACCESS_KEY_ID`` and a ``SECRET_ACCESS_KEY``.
|
||||
|
||||
Finally, we will need a S3 bucket to host the data and the model, ``s3://mybucket/``
|
||||
|
||||
***************************
|
||||
Setup a Hadoop YARN Cluster
|
||||
***************************
|
||||
This sections shows how to start a Hadoop YARN cluster from scratch.
|
||||
You can skip this step if you have already have one.
|
||||
We will be using `yarn-ec2 <https://github.com/tqchen/yarn-ec2>`_ to start the cluster.
|
||||
|
||||
We can first clone the yarn-ec2 script by the following command.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git clone https://github.com/tqchen/yarn-ec2
|
||||
|
||||
To use the script, we must set the environment variables ``AWS_ACCESS_KEY_ID`` and
|
||||
``AWS_SECRET_ACCESS_KEY`` properly. This can be done by adding the following two lines in
|
||||
``~/.bashrc`` (replacing the strings with the correct ones)
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
export AWS_ACCESS_KEY_ID=[your access ID]
|
||||
export AWS_SECRET_ACCESS_KEY=[your secret access key]
|
||||
|
||||
Now we can launch a master machine of the cluster from EC2:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
./yarn-ec2 -k mykey -i mypem.pem launch xgboost
|
||||
|
||||
Wait a few mininutes till the master machine gets up.
|
||||
|
||||
After the master machine gets up, we can query the public DNS of the master machine using the following command.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
./yarn-ec2 -k mykey -i mypem.pem get-master xgboost
|
||||
|
||||
It will show the public DNS of the master machine like ``ec2-xx-xx-xx.us-west-2.compute.amazonaws.com``
|
||||
Now we can open the browser, and type (replace the DNS with the master DNS)
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
ec2-xx-xx-xx.us-west-2.compute.amazonaws.com:8088
|
||||
|
||||
This will show the job tracker of the YARN cluster. Note that we may have to wait a few minutes before the master finishes bootstrapping and starts the
|
||||
job tracker.
|
||||
|
||||
After the master machine gets up, we can freely add more slave machines to the cluster.
|
||||
The following command add m3.xlarge instances to the cluster.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
./yarn-ec2 -k mykey -i mypem.pem -t m3.xlarge -s 2 addslave xgboost
|
||||
|
||||
We can also choose to add two spot instances
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
./yarn-ec2 -k mykey -i mypem.pem -t m3.xlarge -s 2 addspot xgboost
|
||||
|
||||
The slave machines will start up, bootstrap and report to the master.
|
||||
You can check if the slave machines are connected by clicking on the Nodes link on the job tracker.
|
||||
Or simply type the following URL (replace DNS ith the master DNS)
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
ec2-xx-xx-xx.us-west-2.compute.amazonaws.com:8088/cluster/nodes
|
||||
|
||||
One thing we should note is that not all the links in the job tracker work.
|
||||
This is due to that many of them use the private IP of AWS, which can only be accessed by EC2.
|
||||
We can use ssh proxy to access these packages.
|
||||
Now that we have set up a cluster with one master and two slaves, we are ready to run the experiment.
|
||||
|
||||
*********************
|
||||
Build XGBoost with S3
|
||||
*********************
|
||||
We can log into the master machine by the following command.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
./yarn-ec2 -k mykey -i mypem.pem login xgboost
|
||||
|
||||
We will be using S3 to host the data and the result model, so the data won't get lost after the cluster shutdown.
|
||||
To do so, we will need to build XGBoost with S3 support. The only thing we need to do is to set ``USE_S3``
|
||||
variable to be true. This can be achieved by the following command.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git clone --recursive https://github.com/dmlc/xgboost
|
||||
cd xgboost
|
||||
cp make/config.mk config.mk
|
||||
echo "USE_S3=1" >> config.mk
|
||||
make -j4
|
||||
|
||||
Now we have built the XGBoost with S3 support. You can also enable HDFS support if you plan to store data on HDFS by turning on ``USE_HDFS`` option.
|
||||
XGBoost also relies on the environment variable to access S3, so you will need to add the following two lines to ``~/.bashrc`` (replacing the strings with the correct ones)
|
||||
on the master machine as well.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
export AWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE
|
||||
export AWS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||
export BUCKET=mybucket
|
||||
|
||||
*******************
|
||||
Host the Data on S3
|
||||
*******************
|
||||
In this example, we will copy the example dataset in XGBoost to the S3 bucket as input.
|
||||
In normal usecases, the dataset is usually created from existing distributed processing pipeline.
|
||||
We can use `s3cmd <http://s3tools.org/s3cmd>`_ to copy the data into mybucket (replace ``${BUCKET}`` with the real bucket name).
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
cd xgboost
|
||||
s3cmd put demo/data/agaricus.txt.train s3://${BUCKET}/xgb-demo/train/
|
||||
s3cmd put demo/data/agaricus.txt.test s3://${BUCKET}/xgb-demo/test/
|
||||
|
||||
***************
|
||||
Submit the Jobs
|
||||
***************
|
||||
Now everything is ready, we can submit the XGBoost distributed job to the YARN cluster.
|
||||
We will use the `dmlc-submit <https://github.com/dmlc/dmlc-core/tree/master/tracker>`_ script to submit the job.
|
||||
|
||||
Now we can run the following script in the distributed training folder (replace ``${BUCKET}`` with the real bucket name)
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
cd xgboost/demo/distributed-training
|
||||
# Use dmlc-submit to submit the job.
|
||||
../../dmlc-core/tracker/dmlc-submit --cluster=yarn --num-workers=2 --worker-cores=2\
|
||||
../../xgboost mushroom.aws.conf nthread=2\
|
||||
data=s3://${BUCKET}/xgb-demo/train\
|
||||
eval[test]=s3://${BUCKET}/xgb-demo/test\
|
||||
model_dir=s3://${BUCKET}/xgb-demo/model
|
||||
|
||||
All the configurations such as ``data`` and ``model_dir`` can also be directly written into the configuration file.
|
||||
Note that we only specified the folder path to the file, instead of the file name.
|
||||
XGBoost will read in all the files in that folder as the training and evaluation data.
|
||||
|
||||
In this command, we are using two workers, and each worker uses two running threads.
|
||||
XGBoost can benefit from using multiple cores in each worker.
|
||||
A common choice of working cores can range from 4 to 8.
|
||||
The trained model will be saved into the specified model folder. You can browse the model folder.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
s3cmd ls s3://${BUCKET}/xgb-demo/model/
|
||||
|
||||
The following is an example output from distributed training.
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
16/02/26 05:41:59 INFO dmlc.Client: jobname=DMLC[nworker=2]:xgboost,username=ubuntu
|
||||
16/02/26 05:41:59 INFO dmlc.Client: Submitting application application_1456461717456_0015
|
||||
16/02/26 05:41:59 INFO impl.YarnClientImpl: Submitted application application_1456461717456_0015
|
||||
2016-02-26 05:42:05,230 INFO @tracker All of 2 nodes getting started
|
||||
2016-02-26 05:42:14,027 INFO [05:42:14] [0] test-error:0.016139 train-error:0.014433
|
||||
2016-02-26 05:42:14,186 INFO [05:42:14] [1] test-error:0.000000 train-error:0.001228
|
||||
2016-02-26 05:42:14,947 INFO @tracker All nodes finishes job
|
||||
2016-02-26 05:42:14,948 INFO @tracker 9.71754479408 secs between node start and job finish
|
||||
Application application_1456461717456_0015 finished with state FINISHED at 1456465335961
|
||||
|
||||
*****************
|
||||
Analyze the Model
|
||||
*****************
|
||||
After the model is trained, we can analyse the learnt model and use it for future prediction tasks.
|
||||
XGBoost is a portable framework, meaning the models in all platforms are *exchangeable*.
|
||||
This means we can load the trained model in python/R/Julia and take benefit of data science pipelines
|
||||
in these languages to do model analysis and prediction.
|
||||
|
||||
For example, you can use `this IPython notebook <https://github.com/dmlc/xgboost/tree/master/demo/distributed-training/plot_model.ipynb>`_
|
||||
to plot feature importance and visualize the learnt model.
|
||||
|
||||
***************
|
||||
Troubleshooting
|
||||
***************
|
||||
|
||||
If you encounter a problem, the best way might be to use the following command
|
||||
to get logs of stdout and stderr of the containers and check what causes the problem.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
yarn logs -applicationId yourAppId
|
||||
|
||||
*****************
|
||||
Future Directions
|
||||
*****************
|
||||
You have learned to use distributed XGBoost on YARN in this tutorial.
|
||||
XGBoost is a portable and scalable framework for gradient boosting.
|
||||
You can check out more examples and resources in the `resources page <https://github.com/dmlc/xgboost/blob/master/demo/README.md>`_.
|
||||
|
||||
The project goal is to make the best scalable machine learning solution available to all platforms.
|
||||
The API is designed to be able to portable, and the same code can also run on other platforms such as MPI and SGE.
|
||||
XGBoost is actively evolving and we are working on even more exciting features
|
||||
such as distributed XGBoost python/R package.
|
||||
|
||||
@@ -143,11 +143,11 @@ first and second constraints (``[0, 1]``, ``[2, 3, 4]``).
|
||||
Enforcing Feature Interaction Constraints in XGBoost
|
||||
****************************************************
|
||||
|
||||
It is very simple to enforce monotonicity constraints in XGBoost. Here we will
|
||||
It is very simple to enforce feature interaction constraints in XGBoost. Here we will
|
||||
give an example using Python, but the same general idea generalizes to other
|
||||
platforms.
|
||||
|
||||
Suppose the following code fits your model without monotonicity constraints:
|
||||
Suppose the following code fits your model without feature interaction constraints:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -155,7 +155,7 @@ Suppose the following code fits your model without monotonicity constraints:
|
||||
num_boost_round = 1000, evals = evallist,
|
||||
early_stopping_rounds = 10)
|
||||
|
||||
Then fitting with monotonicity constraints only requires adding a single
|
||||
Then fitting with feature interaction constraints only requires adding a single
|
||||
parameter:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
*/
|
||||
#ifndef XGBOOST_STRICT_R_MODE
|
||||
#define XGBOOST_STRICT_R_MODE 0
|
||||
#endif
|
||||
#endif // XGBOOST_STRICT_R_MODE
|
||||
|
||||
/*!
|
||||
* \brief Whether always log console message with time.
|
||||
@@ -26,21 +26,21 @@
|
||||
*/
|
||||
#ifndef XGBOOST_LOG_WITH_TIME
|
||||
#define XGBOOST_LOG_WITH_TIME 1
|
||||
#endif
|
||||
#endif // XGBOOST_LOG_WITH_TIME
|
||||
|
||||
/*!
|
||||
* \brief Whether customize the logger outputs.
|
||||
*/
|
||||
#ifndef XGBOOST_CUSTOMIZE_LOGGER
|
||||
#define XGBOOST_CUSTOMIZE_LOGGER XGBOOST_STRICT_R_MODE
|
||||
#endif
|
||||
#endif // XGBOOST_CUSTOMIZE_LOGGER
|
||||
|
||||
/*!
|
||||
* \brief Whether to customize global PRNG.
|
||||
*/
|
||||
#ifndef XGBOOST_CUSTOMIZE_GLOBAL_PRNG
|
||||
#define XGBOOST_CUSTOMIZE_GLOBAL_PRNG XGBOOST_STRICT_R_MODE
|
||||
#endif
|
||||
#endif // XGBOOST_CUSTOMIZE_GLOBAL_PRNG
|
||||
|
||||
/*!
|
||||
* \brief Check if alignas(*) keyword is supported. (g++ 4.8 or higher)
|
||||
@@ -49,7 +49,7 @@
|
||||
#define XGBOOST_ALIGNAS(X) alignas(X)
|
||||
#else
|
||||
#define XGBOOST_ALIGNAS(X)
|
||||
#endif
|
||||
#endif // defined(__GNUC__) && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ > 4)
|
||||
|
||||
#if defined(__GNUC__) && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ > 4) && \
|
||||
!defined(__CUDACC__)
|
||||
@@ -64,7 +64,7 @@
|
||||
#else
|
||||
#define XGBOOST_PARALLEL_SORT(X, Y, Z) std::sort((X), (Y), (Z))
|
||||
#define XGBOOST_PARALLEL_STABLE_SORT(X, Y, Z) std::stable_sort((X), (Y), (Z))
|
||||
#endif
|
||||
#endif // GLIBC VERSION
|
||||
|
||||
/*!
|
||||
* \brief Tag function as usable by device
|
||||
@@ -73,7 +73,7 @@
|
||||
#define XGBOOST_DEVICE __host__ __device__
|
||||
#else
|
||||
#define XGBOOST_DEVICE
|
||||
#endif
|
||||
#endif // defined (__CUDA__) || defined(__NVCC__)
|
||||
|
||||
/*! \brief namespace of xgboost*/
|
||||
namespace xgboost {
|
||||
@@ -215,7 +215,11 @@ using bst_omp_uint = dmlc::omp_uint; // NOLINT
|
||||
#if __GNUC__ == 4 && __GNUC_MINOR__ < 8
|
||||
#define override
|
||||
#define final
|
||||
#endif
|
||||
#endif
|
||||
#endif // __GNUC__ == 4 && __GNUC_MINOR__ < 8
|
||||
#endif // DMLC_USE_CXX11 && defined(__GNUC__) && !defined(__clang_version__)
|
||||
} // namespace xgboost
|
||||
|
||||
/* Always keep this #include at the bottom of xgboost/base.h */
|
||||
#include <xgboost/build_config.h>
|
||||
|
||||
#endif // XGBOOST_BASE_H_
|
||||
|
||||
18
include/xgboost/build_config.h
Normal file
18
include/xgboost/build_config.h
Normal file
@@ -0,0 +1,18 @@
|
||||
/*!
|
||||
* Copyright 2019 by Contributors
|
||||
* \file build_config.h
|
||||
*/
|
||||
#ifndef XGBOOST_BUILD_CONFIG_H_
|
||||
#define XGBOOST_BUILD_CONFIG_H_
|
||||
|
||||
/* default logic for software pre-fetching */
|
||||
#if (defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_AMD64))) || defined(__INTEL_COMPILER)
|
||||
// Enable _mm_prefetch for Intel compiler and MSVC+x86
|
||||
#define XGBOOST_MM_PREFETCH_PRESENT
|
||||
#define XGBOOST_BUILTIN_PREFETCH_PRESENT
|
||||
#elif defined(__GNUC__)
|
||||
// Enable __builtin_prefetch for GCC
|
||||
#define XGBOOST_BUILTIN_PREFETCH_PRESENT
|
||||
#endif // GUARDS
|
||||
|
||||
#endif // XGBOOST_BUILD_CONFIG_H_
|
||||
@@ -10,11 +10,12 @@
|
||||
#ifdef __cplusplus
|
||||
#define XGB_EXTERN_C extern "C"
|
||||
#include <cstdio>
|
||||
#include <cstdint>
|
||||
#else
|
||||
#define XGB_EXTERN_C
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#endif
|
||||
#endif // __cplusplus
|
||||
|
||||
// XGBoost C API will include APIs in Rabit C API
|
||||
#include <rabit/c_api.h>
|
||||
@@ -23,7 +24,7 @@
|
||||
#define XGB_DLL XGB_EXTERN_C __declspec(dllexport)
|
||||
#else
|
||||
#define XGB_DLL XGB_EXTERN_C
|
||||
#endif
|
||||
#endif // defined(_MSC_VER) || defined(_WIN32)
|
||||
|
||||
// manually define unsigned long
|
||||
typedef uint64_t bst_ulong; // NOLINT(*)
|
||||
@@ -49,7 +50,7 @@ typedef struct { // NOLINT(*)
|
||||
long* offset; // NOLINT(*)
|
||||
#else
|
||||
int64_t* offset; // NOLINT(*)
|
||||
#endif
|
||||
#endif // __APPLE__
|
||||
/*! \brief labels of each instance */
|
||||
float* label;
|
||||
/*! \brief weight of each instance, can be NULL */
|
||||
@@ -562,7 +563,7 @@ XGB_DLL int XGBoosterGetAttr(BoosterHandle handle,
|
||||
*
|
||||
* \param handle handle
|
||||
* \param key The key of the attribute.
|
||||
* \param value The value to be saved.
|
||||
* \param value The value to be saved.
|
||||
* If nullptr, the attribute would be deleted.
|
||||
* \return 0 when success, -1 when failure happens
|
||||
*/
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
|
||||
#include <dmlc/base.h>
|
||||
#include <dmlc/data.h>
|
||||
#include <rabit/rabit.h>
|
||||
#include <cstring>
|
||||
#include <memory>
|
||||
#include <numeric>
|
||||
@@ -169,8 +170,16 @@ class SparsePage {
|
||||
inline Inst operator[](size_t i) const {
|
||||
const auto& data_vec = data.HostVector();
|
||||
const auto& offset_vec = offset.HostVector();
|
||||
size_t size;
|
||||
// in distributed mode, some partitions may not get any instance for a feature. Therefore
|
||||
// we should set the size as zero
|
||||
if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) {
|
||||
size = 0;
|
||||
} else {
|
||||
size = offset_vec[i + 1] - offset_vec[i];
|
||||
}
|
||||
return {data_vec.data() + offset_vec[i],
|
||||
static_cast<Inst::index_type>(offset_vec[i + 1] - offset_vec[i])};
|
||||
static_cast<Inst::index_type>(size)};
|
||||
}
|
||||
|
||||
/*! \brief constructor */
|
||||
@@ -241,42 +250,17 @@ class SparsePage {
|
||||
* \brief Push row block into the page.
|
||||
* \param batch the row batch.
|
||||
*/
|
||||
inline void Push(const dmlc::RowBlock<uint32_t>& batch) {
|
||||
auto& data_vec = data.HostVector();
|
||||
auto& offset_vec = offset.HostVector();
|
||||
data_vec.reserve(data.Size() + batch.offset[batch.size] - batch.offset[0]);
|
||||
offset_vec.reserve(offset.Size() + batch.size);
|
||||
CHECK(batch.index != nullptr);
|
||||
for (size_t i = 0; i < batch.size; ++i) {
|
||||
offset_vec.push_back(offset_vec.back() + batch.offset[i + 1] - batch.offset[i]);
|
||||
}
|
||||
for (size_t i = batch.offset[0]; i < batch.offset[batch.size]; ++i) {
|
||||
uint32_t index = batch.index[i];
|
||||
bst_float fvalue = batch.value == nullptr ? 1.0f : batch.value[i];
|
||||
data_vec.emplace_back(index, fvalue);
|
||||
}
|
||||
CHECK_EQ(offset_vec.back(), data.Size());
|
||||
}
|
||||
void Push(const dmlc::RowBlock<uint32_t>& batch);
|
||||
/*!
|
||||
* \brief Push a sparse page
|
||||
* \param batch the row page
|
||||
*/
|
||||
inline void Push(const SparsePage &batch) {
|
||||
auto& data_vec = data.HostVector();
|
||||
auto& offset_vec = offset.HostVector();
|
||||
const auto& batch_offset_vec = batch.offset.HostVector();
|
||||
const auto& batch_data_vec = batch.data.HostVector();
|
||||
size_t top = offset_vec.back();
|
||||
data_vec.resize(top + batch.data.Size());
|
||||
std::memcpy(dmlc::BeginPtr(data_vec) + top,
|
||||
dmlc::BeginPtr(batch_data_vec),
|
||||
sizeof(Entry) * batch.data.Size());
|
||||
size_t begin = offset.Size();
|
||||
offset_vec.resize(begin + batch.Size());
|
||||
for (size_t i = 0; i < batch.Size(); ++i) {
|
||||
offset_vec[i + begin] = top + batch_offset_vec[i + 1];
|
||||
}
|
||||
}
|
||||
void Push(const SparsePage &batch);
|
||||
/*!
|
||||
* \brief Push a SparsePage stored in CSC format
|
||||
* \param batch The row batch to be pushed
|
||||
*/
|
||||
void PushCSC(const SparsePage& batch);
|
||||
/*!
|
||||
* \brief Push one instance into page
|
||||
* \param inst an instance row
|
||||
@@ -285,7 +269,6 @@ class SparsePage {
|
||||
auto& data_vec = data.HostVector();
|
||||
auto& offset_vec = offset.HostVector();
|
||||
offset_vec.push_back(offset_vec.back() + inst.size());
|
||||
|
||||
size_t begin = data_vec.size();
|
||||
data_vec.resize(begin + inst.size());
|
||||
if (inst.size() != 0) {
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include <rabit/rabit.h>
|
||||
#include <utility>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "./base.h"
|
||||
|
||||
@@ -9,8 +9,13 @@
|
||||
#define XGBOOST_LOGGING_H_
|
||||
|
||||
#include <dmlc/logging.h>
|
||||
#include <dmlc/parameter.h>
|
||||
#include <dmlc/thread_local.h>
|
||||
#include <sstream>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include "./base.h"
|
||||
|
||||
namespace xgboost {
|
||||
@@ -20,7 +25,7 @@ class BaseLogger {
|
||||
BaseLogger() {
|
||||
#if XGBOOST_LOG_WITH_TIME
|
||||
log_stream_ << "[" << dmlc::DateLogger().HumanDate() << "] ";
|
||||
#endif
|
||||
#endif // XGBOOST_LOG_WITH_TIME
|
||||
}
|
||||
std::ostream& stream() { return log_stream_; } // NOLINT
|
||||
|
||||
@@ -28,8 +33,55 @@ class BaseLogger {
|
||||
std::ostringstream log_stream_;
|
||||
};
|
||||
|
||||
// Parsing both silent and debug_verbose is to provide backward compatibility.
|
||||
struct ConsoleLoggerParam : public dmlc::Parameter<ConsoleLoggerParam> {
|
||||
bool silent; // deprecated.
|
||||
int verbosity;
|
||||
|
||||
DMLC_DECLARE_PARAMETER(ConsoleLoggerParam) {
|
||||
DMLC_DECLARE_FIELD(silent)
|
||||
.set_default(false)
|
||||
.describe("Do not print information during training.");
|
||||
DMLC_DECLARE_FIELD(verbosity)
|
||||
.set_range(0, 3)
|
||||
.set_default(1) // shows only warning
|
||||
.describe("Flag to print out detailed breakdown of runtime.");
|
||||
DMLC_DECLARE_ALIAS(verbosity, debug_verbose);
|
||||
}
|
||||
};
|
||||
|
||||
class ConsoleLogger : public BaseLogger {
|
||||
public:
|
||||
enum class LogVerbosity {
|
||||
kSilent = 0,
|
||||
kWarning = 1,
|
||||
kInfo = 2, // information may interests users.
|
||||
kDebug = 3, // information only interesting to developers.
|
||||
kIgnore = 4 // ignore global setting
|
||||
};
|
||||
using LV = LogVerbosity;
|
||||
|
||||
private:
|
||||
static LogVerbosity global_verbosity_;
|
||||
static ConsoleLoggerParam param_;
|
||||
|
||||
LogVerbosity cur_verbosity_;
|
||||
static void Configure(const std::map<std::string, std::string>& args);
|
||||
|
||||
public:
|
||||
template <typename ArgIter>
|
||||
static void Configure(ArgIter begin, ArgIter end) {
|
||||
std::map<std::string, std::string> args(begin, end);
|
||||
Configure(args);
|
||||
}
|
||||
|
||||
static LogVerbosity GlobalVerbosity();
|
||||
static LogVerbosity DefaultVerbosity();
|
||||
static bool ShouldLog(LogVerbosity verbosity);
|
||||
|
||||
ConsoleLogger() = delete;
|
||||
explicit ConsoleLogger(LogVerbosity cur_verb);
|
||||
ConsoleLogger(const std::string& file, int line, LogVerbosity cur_verb);
|
||||
~ConsoleLogger();
|
||||
};
|
||||
|
||||
@@ -64,17 +116,47 @@ class LogCallbackRegistry {
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
#endif
|
||||
#endif // !defined(XGBOOST_STRICT_R_MODE) || XGBOOST_STRICT_R_MODE == 0
|
||||
|
||||
using LogCallbackRegistryStore = dmlc::ThreadLocalStore<LogCallbackRegistry>;
|
||||
|
||||
// Redefines LOG_WARNING for controling verbosity
|
||||
#if defined(LOG_WARNING)
|
||||
#undef LOG_WARNING
|
||||
#endif // defined(LOG_WARNING)
|
||||
#define LOG_WARNING \
|
||||
if (::xgboost::ConsoleLogger::ShouldLog( \
|
||||
::xgboost::ConsoleLogger::LV::kWarning)) \
|
||||
::xgboost::ConsoleLogger(__FILE__, __LINE__, \
|
||||
::xgboost::ConsoleLogger::LogVerbosity::kWarning)
|
||||
|
||||
// Redefines LOG_INFO for controling verbosity
|
||||
#if defined(LOG_INFO)
|
||||
#undef LOG_INFO
|
||||
#endif // defined(LOG_INFO)
|
||||
#define LOG_INFO \
|
||||
if (::xgboost::ConsoleLogger::ShouldLog( \
|
||||
::xgboost::ConsoleLogger::LV::kInfo)) \
|
||||
::xgboost::ConsoleLogger(__FILE__, __LINE__, \
|
||||
::xgboost::ConsoleLogger::LogVerbosity::kInfo)
|
||||
|
||||
#if defined(LOG_DEBUG)
|
||||
#undef LOG_DEBUG
|
||||
#endif // defined(LOG_DEBUG)
|
||||
#define LOG_DEBUG \
|
||||
if (::xgboost::ConsoleLogger::ShouldLog( \
|
||||
::xgboost::ConsoleLogger::LV::kDebug)) \
|
||||
::xgboost::ConsoleLogger(__FILE__, __LINE__, \
|
||||
::xgboost::ConsoleLogger::LogVerbosity::kDebug)
|
||||
|
||||
// redefines the logging macro if not existed
|
||||
#ifndef LOG
|
||||
#define LOG(severity) LOG_##severity.stream()
|
||||
#endif
|
||||
#endif // LOG
|
||||
|
||||
// Enable LOG(CONSOLE) for print messages to console.
|
||||
#define LOG_CONSOLE ::xgboost::ConsoleLogger()
|
||||
#define LOG_CONSOLE ::xgboost::ConsoleLogger( \
|
||||
::xgboost::ConsoleLogger::LogVerbosity::kIgnore)
|
||||
// Enable LOG(TRACKER) for print messages to tracker
|
||||
#define LOG_TRACKER ::xgboost::TrackerLogger()
|
||||
} // namespace xgboost.
|
||||
|
||||
@@ -11,8 +11,11 @@
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <functional>
|
||||
#include <utility>
|
||||
|
||||
#include "./data.h"
|
||||
#include "./base.h"
|
||||
#include "../../src/common/host_device_vector.h"
|
||||
|
||||
namespace xgboost {
|
||||
/*!
|
||||
@@ -21,6 +24,23 @@ namespace xgboost {
|
||||
*/
|
||||
class Metric {
|
||||
public:
|
||||
/*!
|
||||
* \brief Configure the Metric with the specified parameters.
|
||||
* \param args arguments to the objective function.
|
||||
*/
|
||||
virtual void Configure(
|
||||
const std::vector<std::pair<std::string, std::string> >& args) {}
|
||||
/*!
|
||||
* \brief set configuration from pair iterators.
|
||||
* \param begin The beginning iterator.
|
||||
* \param end The end iterator.
|
||||
* \tparam PairIter iterator<std::pair<std::string, std::string> >
|
||||
*/
|
||||
template<typename PairIter>
|
||||
inline void Configure(PairIter begin, PairIter end) {
|
||||
std::vector<std::pair<std::string, std::string> > vec(begin, end);
|
||||
this->Configure(vec);
|
||||
}
|
||||
/*!
|
||||
* \brief evaluate a specific metric
|
||||
* \param preds prediction
|
||||
@@ -29,9 +49,9 @@ class Metric {
|
||||
* the average statistics across all the node,
|
||||
* this is only supported by some metrics
|
||||
*/
|
||||
virtual bst_float Eval(const std::vector<bst_float>& preds,
|
||||
virtual bst_float Eval(const HostDeviceVector<bst_float>& preds,
|
||||
const MetaInfo& info,
|
||||
bool distributed) const = 0;
|
||||
bool distributed) = 0;
|
||||
/*! \return name of metric */
|
||||
virtual const char* Name() const = 0;
|
||||
/*! \brief virtual destructor */
|
||||
|
||||
@@ -7,11 +7,14 @@
|
||||
#pragma once
|
||||
#include <xgboost/base.h>
|
||||
#include <xgboost/data.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "../../src/gbm/gbtree_model.h"
|
||||
#include "../../src/common/host_device_vector.h"
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -19,7 +19,7 @@ CONFIG = {
|
||||
"USE_AZURE": "OFF",
|
||||
"USE_S3": "OFF",
|
||||
|
||||
"PLUGIN_UPDATER_GPU": "OFF",
|
||||
"USE_CUDA": "OFF",
|
||||
"JVM_BINDINGS": "ON"
|
||||
}
|
||||
|
||||
|
||||
@@ -17,5 +17,5 @@ rm /usr/bin/python
|
||||
ln -s /opt/rh/python27/root/usr/bin/python /usr/bin/python
|
||||
|
||||
# build xgboost
|
||||
cd /xgboost/jvm-packages;mvn package
|
||||
cd /xgboost/jvm-packages;ulimit -c unlimited;mvn package
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost-jvm</artifactId>
|
||||
<version>0.81</version>
|
||||
<version>0.82-SNAPSHOT</version>
|
||||
<packaging>pom</packaging>
|
||||
<name>XGBoost JVM Package</name>
|
||||
<description>JVM Package for XGBoost</description>
|
||||
@@ -34,7 +34,7 @@
|
||||
<maven.compiler.source>1.7</maven.compiler.source>
|
||||
<maven.compiler.target>1.7</maven.compiler.target>
|
||||
<flink.version>1.5.0</flink.version>
|
||||
<spark.version>2.3.1</spark.version>
|
||||
<spark.version>2.3.3</spark.version>
|
||||
<scala.version>2.11.12</scala.version>
|
||||
<scala.binary.version>2.11</scala.binary.version>
|
||||
</properties>
|
||||
@@ -237,7 +237,7 @@
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>net.alchim31.maven</groupId>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-site-plugin</artifactId>
|
||||
<version>3.0</version>
|
||||
<configuration>
|
||||
@@ -335,25 +335,6 @@
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.jacoco</groupId>
|
||||
<artifactId>jacoco-maven-plugin</artifactId>
|
||||
<version>0.7.9</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>prepare-agent</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>report</id>
|
||||
<phase>test</phase>
|
||||
<goals>
|
||||
<goal>report</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<dependencies>
|
||||
|
||||
@@ -23,7 +23,7 @@ XGBoost4J Code Examples
|
||||
* [External Memory](src/main/scala/ml/dmlc/xgboost4j/scala/example/ExternalMemory.scala)
|
||||
|
||||
## Spark API
|
||||
* [Distributed Training with Spark](src/main/scala/ml/dmlc/xgboost4j/scala/example/spark/SparkWithDataFrame.scala)
|
||||
* [Distributed Training with Spark](src/main/scala/ml/dmlc/xgboost4j/scala/example/spark/SparkMLlibPipeline.scala)
|
||||
|
||||
## Flink API
|
||||
* [Distributed Training with Flink](src/main/scala/ml/dmlc/xgboost4j/scala/example/flink/DistTrainWithFlink.scala)
|
||||
|
||||
@@ -6,10 +6,10 @@
|
||||
<parent>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost-jvm</artifactId>
|
||||
<version>0.81</version>
|
||||
<version>0.82-SNAPSHOT</version>
|
||||
</parent>
|
||||
<artifactId>xgboost4j-example</artifactId>
|
||||
<version>0.81</version>
|
||||
<version>0.82-SNAPSHOT</version>
|
||||
<packaging>jar</packaging>
|
||||
<build>
|
||||
<plugins>
|
||||
@@ -26,7 +26,7 @@
|
||||
<dependency>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost4j-spark</artifactId>
|
||||
<version>0.81</version>
|
||||
<version>0.82-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.spark</groupId>
|
||||
@@ -37,7 +37,7 @@
|
||||
<dependency>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost4j-flink</artifactId>
|
||||
<version>0.81</version>
|
||||
<version>0.82-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
|
||||
@@ -31,7 +31,6 @@ object SparkTraining {
|
||||
println("Usage: program input_path")
|
||||
sys.exit(1)
|
||||
}
|
||||
|
||||
val spark = SparkSession.builder().getOrCreate()
|
||||
val inputPath = args(0)
|
||||
val schema = new StructType(Array(
|
||||
@@ -40,7 +39,7 @@ object SparkTraining {
|
||||
StructField("petal length", DoubleType, true),
|
||||
StructField("petal width", DoubleType, true),
|
||||
StructField("class", StringType, true)))
|
||||
val rawInput = spark.read.schema(schema).csv(args(0))
|
||||
val rawInput = spark.read.schema(schema).csv(inputPath)
|
||||
|
||||
// transform class to index to make xgboost happy
|
||||
val stringIndexer = new StringIndexer()
|
||||
@@ -55,6 +54,8 @@ object SparkTraining {
|
||||
val xgbInput = vectorAssembler.transform(labelTransformed).select("features",
|
||||
"classIndex")
|
||||
|
||||
val Array(train, eval1, eval2, test) = xgbInput.randomSplit(Array(0.6, 0.2, 0.1, 0.1))
|
||||
|
||||
/**
|
||||
* setup "timeout_request_workers" -> 60000L to make this application if it cannot get enough resources
|
||||
* to get 2 workers within 60000 ms
|
||||
@@ -67,12 +68,13 @@ object SparkTraining {
|
||||
"objective" -> "multi:softprob",
|
||||
"num_class" -> 3,
|
||||
"num_round" -> 100,
|
||||
"num_workers" -> 2)
|
||||
"num_workers" -> 2,
|
||||
"eval_sets" -> Map("eval1" -> eval1, "eval2" -> eval2))
|
||||
val xgbClassifier = new XGBoostClassifier(xgbParam).
|
||||
setFeaturesCol("features").
|
||||
setLabelCol("classIndex")
|
||||
val xgbClassificationModel = xgbClassifier.fit(xgbInput)
|
||||
val results = xgbClassificationModel.transform(xgbInput)
|
||||
val xgbClassificationModel = xgbClassifier.fit(train)
|
||||
val results = xgbClassificationModel.transform(test)
|
||||
results.show()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,10 +6,10 @@
|
||||
<parent>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost-jvm</artifactId>
|
||||
<version>0.81</version>
|
||||
<version>0.82-SNAPSHOT</version>
|
||||
</parent>
|
||||
<artifactId>xgboost4j-flink</artifactId>
|
||||
<version>0.81</version>
|
||||
<version>0.82-SNAPSHOT</version>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
@@ -26,7 +26,7 @@
|
||||
<dependency>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost4j</artifactId>
|
||||
<version>0.81</version>
|
||||
<version>0.82-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
<parent>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost-jvm</artifactId>
|
||||
<version>0.81</version>
|
||||
<version>0.82-SNAPSHOT</version>
|
||||
</parent>
|
||||
<artifactId>xgboost4j-spark</artifactId>
|
||||
<build>
|
||||
@@ -24,7 +24,7 @@
|
||||
<dependency>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost4j</artifactId>
|
||||
<version>0.81</version>
|
||||
<version>0.82-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.spark</groupId>
|
||||
|
||||
@@ -20,6 +20,11 @@ import ml.dmlc.xgboost4j.{LabeledPoint => XGBLabeledPoint}
|
||||
|
||||
import org.apache.spark.ml.feature.{LabeledPoint => MLLabeledPoint}
|
||||
import org.apache.spark.ml.linalg.{DenseVector, SparseVector, Vector, Vectors}
|
||||
import org.apache.spark.ml.param.Param
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.apache.spark.sql.{Column, DataFrame, Row}
|
||||
import org.apache.spark.sql.functions.col
|
||||
import org.apache.spark.sql.types.{FloatType, IntegerType}
|
||||
|
||||
object DataUtils extends Serializable {
|
||||
private[spark] implicit class XGBLabeledPointFeatures(
|
||||
@@ -67,4 +72,38 @@ object DataUtils extends Serializable {
|
||||
XGBLabeledPoint(0.0f, v.indices, v.values.map(_.toFloat))
|
||||
}
|
||||
}
|
||||
|
||||
private[spark] def convertDataFrameToXGBLabeledPointRDDs(
|
||||
labelCol: Column,
|
||||
featuresCol: Column,
|
||||
weight: Column,
|
||||
baseMargin: Column,
|
||||
group: Option[Column],
|
||||
dataFrames: DataFrame*): Array[RDD[XGBLabeledPoint]] = {
|
||||
val selectedColumns = group.map(groupCol => Seq(labelCol.cast(FloatType),
|
||||
featuresCol,
|
||||
weight.cast(FloatType),
|
||||
groupCol.cast(IntegerType),
|
||||
baseMargin.cast(FloatType))).getOrElse(Seq(labelCol.cast(FloatType),
|
||||
featuresCol,
|
||||
weight.cast(FloatType),
|
||||
baseMargin.cast(FloatType)))
|
||||
dataFrames.toArray.map {
|
||||
df => df.select(selectedColumns: _*).rdd.map {
|
||||
case Row(label: Float, features: Vector, weight: Float, group: Int, baseMargin: Float) =>
|
||||
val (indices, values) = features match {
|
||||
case v: SparseVector => (v.indices, v.values.map(_.toFloat))
|
||||
case v: DenseVector => (null, v.values.map(_.toFloat))
|
||||
}
|
||||
XGBLabeledPoint(label, indices, values, weight, group, baseMargin)
|
||||
case Row(label: Float, features: Vector, weight: Float, baseMargin: Float) =>
|
||||
val (indices, values) = features match {
|
||||
case v: SparseVector => (v.indices, v.values.map(_.toFloat))
|
||||
case v: DenseVector => (null, v.values.map(_.toFloat))
|
||||
}
|
||||
XGBLabeledPoint(label, indices, values, weight, baseMargin = baseMargin)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ package ml.dmlc.xgboost4j.scala.spark
|
||||
import java.io.File
|
||||
import java.nio.file.Files
|
||||
|
||||
import scala.collection.mutable.ListBuffer
|
||||
import scala.collection.{AbstractIterator, mutable}
|
||||
import scala.util.Random
|
||||
|
||||
@@ -26,12 +27,12 @@ import ml.dmlc.xgboost4j.java.{IRabitTracker, Rabit, XGBoostError, RabitTracker
|
||||
import ml.dmlc.xgboost4j.scala.rabit.RabitTracker
|
||||
import ml.dmlc.xgboost4j.scala.{XGBoost => SXGBoost, _}
|
||||
import ml.dmlc.xgboost4j.{LabeledPoint => XGBLabeledPoint}
|
||||
|
||||
import org.apache.commons.io.FileUtils
|
||||
import org.apache.commons.logging.LogFactory
|
||||
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.apache.spark.{SparkContext, SparkParallelismTracker, TaskContext}
|
||||
import org.apache.spark.sql.SparkSession
|
||||
import org.apache.spark.sql.{DataFrame, SparkSession}
|
||||
|
||||
|
||||
/**
|
||||
@@ -114,13 +115,12 @@ object XGBoost extends Serializable {
|
||||
round: Int,
|
||||
obj: ObjectiveTrait,
|
||||
eval: EvalTrait,
|
||||
prevBooster: Booster)
|
||||
: Iterator[(Booster, Map[String, Array[Float]])] = {
|
||||
prevBooster: Booster): Iterator[(Booster, Map[String, Array[Float]])] = {
|
||||
|
||||
// to workaround the empty partitions in training dataset,
|
||||
// this might not be the best efficient implementation, see
|
||||
// (https://github.com/dmlc/xgboost/issues/1277)
|
||||
if (watches.train.rowNum == 0) {
|
||||
if (watches.toMap("train").rowNum == 0) {
|
||||
throw new XGBoostError(
|
||||
s"detected an empty partition in the training data, partition ID:" +
|
||||
s" ${TaskContext.getPartitionId()}")
|
||||
@@ -138,7 +138,7 @@ object XGBoost extends Serializable {
|
||||
}
|
||||
}
|
||||
val metrics = Array.tabulate(watches.size)(_ => Array.ofDim[Float](round))
|
||||
val booster = SXGBoost.train(watches.train, params, round,
|
||||
val booster = SXGBoost.train(watches.toMap("train"), params, round,
|
||||
watches.toMap, metrics, obj, eval,
|
||||
earlyStoppingRound = numEarlyStoppingRounds, prevBooster)
|
||||
Iterator(booster -> watches.toMap.keys.zip(metrics).toMap)
|
||||
@@ -175,6 +175,52 @@ object XGBoost extends Serializable {
|
||||
tracker
|
||||
}
|
||||
|
||||
class IteratorWrapper[T](arrayOfXGBLabeledPoints: Array[(String, Iterator[T])])
|
||||
extends Iterator[(String, Iterator[T])] {
|
||||
|
||||
private var currentIndex = 0
|
||||
|
||||
override def hasNext: Boolean = currentIndex <= arrayOfXGBLabeledPoints.length - 1
|
||||
|
||||
override def next(): (String, Iterator[T]) = {
|
||||
currentIndex += 1
|
||||
arrayOfXGBLabeledPoints(currentIndex - 1)
|
||||
}
|
||||
}
|
||||
|
||||
private def coPartitionNoGroupSets(
|
||||
trainingData: RDD[XGBLabeledPoint],
|
||||
evalSets: Map[String, RDD[XGBLabeledPoint]],
|
||||
nWorkers: Int) = {
|
||||
// eval_sets is supposed to be set by the caller of [[trainDistributed]]
|
||||
val allDatasets = Map("train" -> trainingData) ++ evalSets
|
||||
val repartitionedDatasets = allDatasets.map{case (name, rdd) =>
|
||||
if (rdd.getNumPartitions != nWorkers) {
|
||||
(name, rdd.repartition(nWorkers))
|
||||
} else {
|
||||
(name, rdd)
|
||||
}
|
||||
}
|
||||
repartitionedDatasets.foldLeft(trainingData.sparkContext.parallelize(
|
||||
Array.fill[(String, Iterator[XGBLabeledPoint])](nWorkers)(null), nWorkers)){
|
||||
case (rddOfIterWrapper, (name, rddOfIter)) =>
|
||||
rddOfIterWrapper.zipPartitions(rddOfIter){
|
||||
(itrWrapper, itr) =>
|
||||
if (!itr.hasNext) {
|
||||
logger.error("when specifying eval sets as dataframes, you have to ensure that " +
|
||||
"the number of elements in each dataframe is larger than the number of workers")
|
||||
throw new Exception("too few elements in evaluation sets")
|
||||
}
|
||||
val itrArray = itrWrapper.toArray
|
||||
if (itrArray.head != null) {
|
||||
new IteratorWrapper(itrArray :+ (name -> itr))
|
||||
} else {
|
||||
new IteratorWrapper(Array(name -> itr))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check to see if Spark expects SSL encryption (`spark.ssl.enabled` set to true).
|
||||
* If so, throw an exception unless this safety measure has been explicitly overridden
|
||||
@@ -207,24 +253,25 @@ object XGBoost extends Serializable {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return A tuple of the booster and the metrics used to build training summary
|
||||
*/
|
||||
@throws(classOf[XGBoostError])
|
||||
private[spark] def trainDistributed(
|
||||
trainingData: RDD[XGBLabeledPoint],
|
||||
params: Map[String, Any],
|
||||
round: Int,
|
||||
nWorkers: Int,
|
||||
obj: ObjectiveTrait = null,
|
||||
eval: EvalTrait = null,
|
||||
useExternalMemory: Boolean = false,
|
||||
missing: Float = Float.NaN,
|
||||
hasGroup: Boolean = false): (Booster, Map[String, Array[Float]]) = {
|
||||
validateSparkSslConf(trainingData.context)
|
||||
private def parameterFetchAndValidation(params: Map[String, Any], sparkContext: SparkContext) = {
|
||||
val nWorkers = params("num_workers").asInstanceOf[Int]
|
||||
val round = params("num_round").asInstanceOf[Int]
|
||||
val useExternalMemory = params("use_external_memory").asInstanceOf[Boolean]
|
||||
val obj = params.getOrElse("custom_obj", null).asInstanceOf[ObjectiveTrait]
|
||||
val eval = params.getOrElse("custom_eval", null).asInstanceOf[EvalTrait]
|
||||
val missing = params.getOrElse("missing", Float.NaN).asInstanceOf[Float]
|
||||
validateSparkSslConf(sparkContext)
|
||||
|
||||
if (params.contains("tree_method")) {
|
||||
require(params("tree_method") != "hist", "xgboost4j-spark does not support fast histogram" +
|
||||
" for now")
|
||||
require(params("tree_method") == "hist" ||
|
||||
params("tree_method") == "approx" ||
|
||||
params("tree_method") == "auto", "xgboost4j-spark only supports tree_method as 'hist'," +
|
||||
" 'approx' and 'auto'")
|
||||
}
|
||||
if (params.contains("train_test_ratio")) {
|
||||
logger.warn("train_test_ratio is deprecated since XGBoost 0.82, we recommend to explicitly" +
|
||||
" pass a training and multiple evaluation datasets by passing 'eval_sets' and " +
|
||||
"'eval_set_names'")
|
||||
}
|
||||
require(nWorkers > 0, "you must specify more than 0 workers")
|
||||
if (obj != null) {
|
||||
@@ -245,11 +292,89 @@ object XGBoost extends Serializable {
|
||||
" an instance of Long.")
|
||||
}
|
||||
val (checkpointPath, checkpointInterval) = CheckpointManager.extractParams(params)
|
||||
(nWorkers, round, useExternalMemory, obj, eval, missing, trackerConf, timeoutRequestWorkers,
|
||||
checkpointPath, checkpointInterval)
|
||||
}
|
||||
|
||||
private def trainForNonRanking(
|
||||
trainingData: RDD[XGBLabeledPoint],
|
||||
params: Map[String, Any],
|
||||
rabitEnv: java.util.Map[String, String],
|
||||
checkpointRound: Int,
|
||||
prevBooster: Booster,
|
||||
evalSetsMap: Map[String, RDD[XGBLabeledPoint]]): RDD[(Booster, Map[String, Array[Float]])] = {
|
||||
val (nWorkers, _, useExternalMemory, obj, eval, missing, _, _, _, _) =
|
||||
parameterFetchAndValidation(params, trainingData.sparkContext)
|
||||
val partitionedData = repartitionForTraining(trainingData, nWorkers)
|
||||
if (evalSetsMap.isEmpty) {
|
||||
partitionedData.mapPartitions(labeledPoints => {
|
||||
val watches = Watches.buildWatches(params,
|
||||
removeMissingValues(labeledPoints, missing),
|
||||
getCacheDirName(useExternalMemory))
|
||||
buildDistributedBooster(watches, params, rabitEnv, checkpointRound,
|
||||
obj, eval, prevBooster)
|
||||
}).cache()
|
||||
} else {
|
||||
coPartitionNoGroupSets(partitionedData, evalSetsMap, nWorkers).mapPartitions {
|
||||
nameAndLabeledPointSets =>
|
||||
val watches = Watches.buildWatches(
|
||||
nameAndLabeledPointSets.map {
|
||||
case (name, iter) => (name, removeMissingValues(iter, missing))},
|
||||
getCacheDirName(useExternalMemory))
|
||||
buildDistributedBooster(watches, params, rabitEnv, checkpointRound,
|
||||
obj, eval, prevBooster)
|
||||
}.cache()
|
||||
}
|
||||
}
|
||||
|
||||
private def trainForRanking(
|
||||
trainingData: RDD[XGBLabeledPoint],
|
||||
params: Map[String, Any],
|
||||
rabitEnv: java.util.Map[String, String],
|
||||
checkpointRound: Int,
|
||||
prevBooster: Booster,
|
||||
evalSetsMap: Map[String, RDD[XGBLabeledPoint]]): RDD[(Booster, Map[String, Array[Float]])] = {
|
||||
val (nWorkers, _, useExternalMemory, obj, eval, missing, _, _, _, _) =
|
||||
parameterFetchAndValidation(params, trainingData.sparkContext)
|
||||
val partitionedTrainingSet = repartitionForTrainingGroup(trainingData, nWorkers)
|
||||
if (evalSetsMap.isEmpty) {
|
||||
partitionedTrainingSet.mapPartitions(labeledPointGroups => {
|
||||
val watches = Watches.buildWatchesWithGroup(params,
|
||||
removeMissingValuesWithGroup(labeledPointGroups, missing),
|
||||
getCacheDirName(useExternalMemory))
|
||||
buildDistributedBooster(watches, params, rabitEnv, checkpointRound, obj, eval, prevBooster)
|
||||
}).cache()
|
||||
} else {
|
||||
coPartitionGroupSets(partitionedTrainingSet, evalSetsMap, nWorkers).mapPartitions(
|
||||
labeledPointGroupSets => {
|
||||
val watches = Watches.buildWatchesWithGroup(
|
||||
labeledPointGroupSets.map {
|
||||
case (name, iter) => (name, removeMissingValuesWithGroup(iter, missing))
|
||||
},
|
||||
getCacheDirName(useExternalMemory))
|
||||
buildDistributedBooster(watches, params, rabitEnv, checkpointRound, obj, eval,
|
||||
prevBooster)
|
||||
}).cache()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return A tuple of the booster and the metrics used to build training summary
|
||||
*/
|
||||
@throws(classOf[XGBoostError])
|
||||
private[spark] def trainDistributed(
|
||||
trainingData: RDD[XGBLabeledPoint],
|
||||
params: Map[String, Any],
|
||||
hasGroup: Boolean = false,
|
||||
evalSetsMap: Map[String, RDD[XGBLabeledPoint]] = Map()):
|
||||
(Booster, Map[String, Array[Float]]) = {
|
||||
logger.info(s"XGBoost training with parameters:\n${params.mkString("\n")}")
|
||||
val (nWorkers, round, _, _, _, _, trackerConf, timeoutRequestWorkers,
|
||||
checkpointPath, checkpointInterval) = parameterFetchAndValidation(params,
|
||||
trainingData.sparkContext)
|
||||
val sc = trainingData.sparkContext
|
||||
val checkpointManager = new CheckpointManager(sc, checkpointPath)
|
||||
checkpointManager.cleanUpHigherVersions(round)
|
||||
|
||||
checkpointManager.cleanUpHigherVersions(round.asInstanceOf[Int])
|
||||
var prevBooster = checkpointManager.loadCheckpointAsBooster
|
||||
// Train for every ${savingRound} rounds and save the partially completed booster
|
||||
checkpointManager.getCheckpointRounds(checkpointInterval, round).map {
|
||||
@@ -259,27 +384,12 @@ object XGBoost extends Serializable {
|
||||
val overriddenParams = overrideParamsAccordingToTaskCPUs(params, sc)
|
||||
val parallelismTracker = new SparkParallelismTracker(sc, timeoutRequestWorkers, nWorkers)
|
||||
val rabitEnv = tracker.getWorkerEnvs
|
||||
val boostersAndMetrics = hasGroup match {
|
||||
case true => {
|
||||
val partitionedData = repartitionForTrainingGroup(trainingData, nWorkers)
|
||||
partitionedData.mapPartitions(labeledPointGroups => {
|
||||
val watches = Watches.buildWatchesWithGroup(overriddenParams,
|
||||
removeMissingValuesWithGroup(labeledPointGroups, missing),
|
||||
getCacheDirName(useExternalMemory))
|
||||
buildDistributedBooster(watches, overriddenParams, rabitEnv, checkpointRound,
|
||||
obj, eval, prevBooster)
|
||||
}).cache()
|
||||
}
|
||||
case false => {
|
||||
val partitionedData = repartitionForTraining(trainingData, nWorkers)
|
||||
partitionedData.mapPartitions(labeledPoints => {
|
||||
val watches = Watches.buildWatches(overriddenParams,
|
||||
removeMissingValues(labeledPoints, missing),
|
||||
getCacheDirName(useExternalMemory))
|
||||
buildDistributedBooster(watches, overriddenParams, rabitEnv, checkpointRound,
|
||||
obj, eval, prevBooster)
|
||||
}).cache()
|
||||
}
|
||||
val boostersAndMetrics = if (hasGroup) {
|
||||
trainForRanking(trainingData, overriddenParams, rabitEnv, checkpointRound,
|
||||
prevBooster, evalSetsMap)
|
||||
} else {
|
||||
trainForNonRanking(trainingData, overriddenParams, rabitEnv, checkpointRound,
|
||||
prevBooster, evalSetsMap)
|
||||
}
|
||||
val sparkJobThread = new Thread() {
|
||||
override def run() {
|
||||
@@ -313,8 +423,7 @@ object XGBoost extends Serializable {
|
||||
}
|
||||
}
|
||||
|
||||
private[spark] def repartitionForTrainingGroup(
|
||||
trainingData: RDD[XGBLabeledPoint], nWorkers: Int): RDD[Array[XGBLabeledPoint]] = {
|
||||
private def aggByGroupInfo(trainingData: RDD[XGBLabeledPoint]) = {
|
||||
val normalGroups: RDD[Array[XGBLabeledPoint]] = trainingData.mapPartitions(
|
||||
// LabeledPointGroupIterator returns (Boolean, Array[XGBLabeledPoint])
|
||||
new LabeledPointGroupIterator(_)).filter(!_.isEdgeGroup).map(_.points)
|
||||
@@ -322,22 +431,60 @@ object XGBoost extends Serializable {
|
||||
// edge groups with partition id.
|
||||
val edgeGroups: RDD[(Int, XGBLabeledPointGroup)] = trainingData.mapPartitions(
|
||||
new LabeledPointGroupIterator(_)).filter(_.isEdgeGroup).map(
|
||||
group => (TaskContext.getPartitionId(), group))
|
||||
group => (TaskContext.getPartitionId(), group))
|
||||
|
||||
// group chunks from different partitions together by group id in XGBLabeledPoint.
|
||||
// use groupBy instead of aggregateBy since all groups within a partition have unique groud ids.
|
||||
// use groupBy instead of aggregateBy since all groups within a partition have unique group ids.
|
||||
val stitchedGroups: RDD[Array[XGBLabeledPoint]] = edgeGroups.groupBy(_._2.groupId).map(
|
||||
groups => {
|
||||
val it: Iterable[(Int, XGBLabeledPointGroup)] = groups._2
|
||||
// sorted by partition id and merge list of Array[XGBLabeledPoint] into one array
|
||||
it.toArray.sortBy(_._1).map(_._2.points).flatten
|
||||
it.toArray.sortBy(_._1).flatMap(_._2.points)
|
||||
})
|
||||
normalGroups.union(stitchedGroups)
|
||||
}
|
||||
|
||||
var allGroups = normalGroups.union(stitchedGroups)
|
||||
private[spark] def repartitionForTrainingGroup(
|
||||
trainingData: RDD[XGBLabeledPoint], nWorkers: Int): RDD[Array[XGBLabeledPoint]] = {
|
||||
val allGroups = aggByGroupInfo(trainingData)
|
||||
logger.info(s"repartitioning training group set to $nWorkers partitions")
|
||||
allGroups.repartition(nWorkers)
|
||||
}
|
||||
|
||||
private def coPartitionGroupSets(
|
||||
aggedTrainingSet: RDD[Array[XGBLabeledPoint]],
|
||||
evalSets: Map[String, RDD[XGBLabeledPoint]],
|
||||
nWorkers: Int): RDD[(String, Iterator[Array[XGBLabeledPoint]])] = {
|
||||
val repartitionedDatasets = Map("train" -> aggedTrainingSet) ++ evalSets.map {
|
||||
case (name, rdd) => {
|
||||
val aggedRdd = aggByGroupInfo(rdd)
|
||||
if (aggedRdd.getNumPartitions != nWorkers) {
|
||||
name -> aggedRdd.repartition(nWorkers)
|
||||
} else {
|
||||
name -> aggedRdd
|
||||
}
|
||||
}
|
||||
}
|
||||
repartitionedDatasets.foldLeft(aggedTrainingSet.sparkContext.parallelize(
|
||||
Array.fill[(String, Iterator[Array[XGBLabeledPoint]])](nWorkers)(null), nWorkers)){
|
||||
case (rddOfIterWrapper, (name, rddOfIter)) =>
|
||||
rddOfIterWrapper.zipPartitions(rddOfIter){
|
||||
(itrWrapper, itr) =>
|
||||
if (!itr.hasNext) {
|
||||
logger.error("when specifying eval sets as dataframes, you have to ensure that " +
|
||||
"the number of elements in each dataframe is larger than the number of workers")
|
||||
throw new Exception("too few elements in evaluation sets")
|
||||
}
|
||||
val itrArray = itrWrapper.toArray
|
||||
if (itrArray.head != null) {
|
||||
new IteratorWrapper(itrArray :+ (name -> itr))
|
||||
} else {
|
||||
new IteratorWrapper(Array(name -> itr))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private def postTrackerReturnProcessing(
|
||||
trackerReturnVal: Int,
|
||||
distributedBoostersAndMetrics: RDD[(Booster, Map[String, Array[Float]])],
|
||||
@@ -368,12 +515,13 @@ object XGBoost extends Serializable {
|
||||
}
|
||||
|
||||
private class Watches private(
|
||||
val train: DMatrix,
|
||||
val test: DMatrix,
|
||||
private val cacheDirName: Option[String]) {
|
||||
val datasets: Array[DMatrix],
|
||||
val names: Array[String],
|
||||
val cacheDirName: Option[String]) {
|
||||
|
||||
def toMap: Map[String, DMatrix] = Map("train" -> train, "test" -> test)
|
||||
.filter { case (_, matrix) => matrix.rowNum > 0 }
|
||||
def toMap: Map[String, DMatrix] = {
|
||||
names.zip(datasets).toMap.filter { case (_, matrix) => matrix.rowNum > 0 }
|
||||
}
|
||||
|
||||
def size: Int = toMap.size
|
||||
|
||||
@@ -413,6 +561,26 @@ private object Watches {
|
||||
}
|
||||
}
|
||||
|
||||
def buildWatches(
|
||||
nameAndLabeledPointSets: Iterator[(String, Iterator[XGBLabeledPoint])],
|
||||
cachedDirName: Option[String]): Watches = {
|
||||
val dms = nameAndLabeledPointSets.map {
|
||||
case (name, labeledPoints) =>
|
||||
val baseMargins = new mutable.ArrayBuilder.ofFloat
|
||||
val duplicatedItr = labeledPoints.map(labeledPoint => {
|
||||
baseMargins += labeledPoint.baseMargin
|
||||
labeledPoint
|
||||
})
|
||||
val dMatrix = new DMatrix(duplicatedItr, cachedDirName.map(_ + s"/$name").orNull)
|
||||
val baseMargin = fromBaseMarginsToArray(baseMargins.result().iterator)
|
||||
if (baseMargin.isDefined) {
|
||||
dMatrix.setBaseMargin(baseMargin.get)
|
||||
}
|
||||
(name, dMatrix)
|
||||
}.toArray
|
||||
new Watches(dms.map(_._2), dms.map(_._1), cachedDirName)
|
||||
}
|
||||
|
||||
def buildWatches(
|
||||
params: Map[String, Any],
|
||||
labeledPoints: Iterator[XGBLabeledPoint],
|
||||
@@ -441,7 +609,46 @@ private object Watches {
|
||||
if (trainMargin.isDefined) trainMatrix.setBaseMargin(trainMargin.get)
|
||||
if (testMargin.isDefined) testMatrix.setBaseMargin(testMargin.get)
|
||||
|
||||
new Watches(trainMatrix, testMatrix, cacheDirName)
|
||||
new Watches(Array(trainMatrix, testMatrix), Array("train", "test"), cacheDirName)
|
||||
}
|
||||
|
||||
def buildWatchesWithGroup(
|
||||
nameAndlabeledPointGroupSets: Iterator[(String, Iterator[Array[XGBLabeledPoint]])],
|
||||
cachedDirName: Option[String]): Watches = {
|
||||
val dms = nameAndlabeledPointGroupSets.map {
|
||||
case (name, labeledPointsGroups) =>
|
||||
val baseMargins = new mutable.ArrayBuilder.ofFloat
|
||||
val groupsInfo = new mutable.ArrayBuilder.ofInt
|
||||
val weights = new mutable.ArrayBuilder.ofFloat
|
||||
val iter = labeledPointsGroups.filter(labeledPointGroup => {
|
||||
var groupWeight = -1.0f
|
||||
var groupSize = 0
|
||||
labeledPointGroup.map { labeledPoint => {
|
||||
if (groupWeight < 0) {
|
||||
groupWeight = labeledPoint.weight
|
||||
} else if (groupWeight != labeledPoint.weight) {
|
||||
throw new IllegalArgumentException("the instances in the same group have to be" +
|
||||
s" assigned with the same weight (unexpected weight ${labeledPoint.weight}")
|
||||
}
|
||||
baseMargins += labeledPoint.baseMargin
|
||||
groupSize += 1
|
||||
labeledPoint
|
||||
}
|
||||
}
|
||||
weights += groupWeight
|
||||
groupsInfo += groupSize
|
||||
true
|
||||
})
|
||||
val dMatrix = new DMatrix(iter.flatMap(_.iterator), cachedDirName.map(_ + s"/$name").orNull)
|
||||
val baseMargin = fromBaseMarginsToArray(baseMargins.result().iterator)
|
||||
if (baseMargin.isDefined) {
|
||||
dMatrix.setBaseMargin(baseMargin.get)
|
||||
}
|
||||
dMatrix.setGroup(groupsInfo.result())
|
||||
dMatrix.setWeight(weights.result())
|
||||
(name, dMatrix)
|
||||
}.toArray
|
||||
new Watches(dms.map(_._2), dms.map(_._1), cachedDirName)
|
||||
}
|
||||
|
||||
def buildWatchesWithGroup(
|
||||
@@ -454,20 +661,46 @@ private object Watches {
|
||||
val testPoints = mutable.ArrayBuilder.make[XGBLabeledPoint]
|
||||
val trainBaseMargins = new mutable.ArrayBuilder.ofFloat
|
||||
val testBaseMargins = new mutable.ArrayBuilder.ofFloat
|
||||
|
||||
val trainGroups = new mutable.ArrayBuilder.ofInt
|
||||
val testGroups = new mutable.ArrayBuilder.ofInt
|
||||
|
||||
val trainWeights = new mutable.ArrayBuilder.ofFloat
|
||||
val testWeights = new mutable.ArrayBuilder.ofFloat
|
||||
|
||||
val trainLabelPointGroups = labeledPointGroups.filter { labeledPointGroup =>
|
||||
val accepted = r.nextDouble() <= trainTestRatio
|
||||
if (!accepted) {
|
||||
var groupWeight = -1.0f
|
||||
var groupSize = 0
|
||||
labeledPointGroup.foreach(labeledPoint => {
|
||||
testPoints += labeledPoint
|
||||
testBaseMargins += labeledPoint.baseMargin
|
||||
if (groupWeight < 0) {
|
||||
groupWeight = labeledPoint.weight
|
||||
} else if (labeledPoint.weight != groupWeight) {
|
||||
throw new IllegalArgumentException("the instances in the same group have to be" +
|
||||
s" assigned with the same weight (unexpected weight ${labeledPoint.weight}")
|
||||
}
|
||||
groupSize += 1
|
||||
})
|
||||
testGroups += labeledPointGroup.length
|
||||
testWeights += groupWeight
|
||||
testGroups += groupSize
|
||||
} else {
|
||||
labeledPointGroup.foreach(trainBaseMargins += _.baseMargin)
|
||||
trainGroups += labeledPointGroup.length
|
||||
var groupWeight = -1.0f
|
||||
var groupSize = 0
|
||||
labeledPointGroup.foreach { labeledPoint => {
|
||||
if (groupWeight < 0) {
|
||||
groupWeight = labeledPoint.weight
|
||||
} else if (labeledPoint.weight != groupWeight) {
|
||||
throw new IllegalArgumentException("the instances in the same group have to be" +
|
||||
s" assigned with the same weight (unexpected weight ${labeledPoint.weight}")
|
||||
}
|
||||
trainBaseMargins += labeledPoint.baseMargin
|
||||
groupSize += 1
|
||||
}}
|
||||
trainWeights += groupWeight
|
||||
trainGroups += groupSize
|
||||
}
|
||||
accepted
|
||||
}
|
||||
@@ -475,10 +708,12 @@ private object Watches {
|
||||
val trainPoints = trainLabelPointGroups.flatMap(_.iterator)
|
||||
val trainMatrix = new DMatrix(trainPoints, cacheDirName.map(_ + "/train").orNull)
|
||||
trainMatrix.setGroup(trainGroups.result())
|
||||
trainMatrix.setWeight(trainWeights.result())
|
||||
|
||||
val testMatrix = new DMatrix(testPoints.result().iterator, cacheDirName.map(_ + "/test").orNull)
|
||||
if (trainTestRatio < 1.0) {
|
||||
testMatrix.setGroup(testGroups.result())
|
||||
testMatrix.setWeight(testWeights.result())
|
||||
}
|
||||
|
||||
val trainMargin = fromBaseMarginsToArray(trainBaseMargins.result().iterator)
|
||||
@@ -486,7 +721,7 @@ private object Watches {
|
||||
if (trainMargin.isDefined) trainMatrix.setBaseMargin(trainMargin.get)
|
||||
if (testMargin.isDefined) testMatrix.setBaseMargin(testMargin.get)
|
||||
|
||||
new Watches(trainMatrix, testMatrix, cacheDirName)
|
||||
new Watches(Array(trainMatrix, testMatrix), Array("train", "test"), cacheDirName)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -505,7 +740,7 @@ private[spark] class LabeledPointGroupIterator(base: Iterator[XGBLabeledPoint])
|
||||
private var isNewGroup = false
|
||||
|
||||
override def hasNext: Boolean = {
|
||||
return base.hasNext || isNewGroup
|
||||
base.hasNext || isNewGroup
|
||||
}
|
||||
|
||||
override def next(): XGBLabeledPointGroup = {
|
||||
|
||||
@@ -43,7 +43,7 @@ import org.apache.spark.broadcast.Broadcast
|
||||
|
||||
private[spark] trait XGBoostClassifierParams extends GeneralParams with LearningTaskParams
|
||||
with BoosterParams with HasWeightCol with HasBaseMarginCol with HasNumClass with ParamMapFuncs
|
||||
with HasLeafPredictionCol with HasContribPredictionCol
|
||||
with HasLeafPredictionCol with HasContribPredictionCol with NonParamVariables
|
||||
|
||||
class XGBoostClassifier (
|
||||
override val uid: String,
|
||||
@@ -182,24 +182,19 @@ class XGBoostClassifier (
|
||||
col($(baseMarginCol))
|
||||
}
|
||||
|
||||
val instances: RDD[XGBLabeledPoint] = dataset.select(
|
||||
col($(featuresCol)),
|
||||
col($(labelCol)).cast(FloatType),
|
||||
baseMargin.cast(FloatType),
|
||||
weight.cast(FloatType)
|
||||
).rdd.map { case Row(features: Vector, label: Float, baseMargin: Float, weight: Float) =>
|
||||
val (indices, values) = features match {
|
||||
case v: SparseVector => (v.indices, v.values.map(_.toFloat))
|
||||
case v: DenseVector => (null, v.values.map(_.toFloat))
|
||||
}
|
||||
XGBLabeledPoint(label, indices, values, baseMargin = baseMargin, weight = weight)
|
||||
val trainingSet: RDD[XGBLabeledPoint] = DataUtils.convertDataFrameToXGBLabeledPointRDDs(
|
||||
col($(labelCol)), col($(featuresCol)), weight, baseMargin,
|
||||
None, dataset.asInstanceOf[DataFrame]).head
|
||||
val evalRDDMap = getEvalSets(xgboostParams).map {
|
||||
case (name, dataFrame) => (name,
|
||||
DataUtils.convertDataFrameToXGBLabeledPointRDDs(col($(labelCol)), col($(featuresCol)),
|
||||
weight, baseMargin, None, dataFrame).head)
|
||||
}
|
||||
transformSchema(dataset.schema, logging = true)
|
||||
val derivedXGBParamMap = MLlib2XGBoostParams
|
||||
// All non-null param maps in XGBoostClassifier are in derivedXGBParamMap.
|
||||
val (_booster, _metrics) = XGBoost.trainDistributed(instances, derivedXGBParamMap,
|
||||
$(numRound), $(numWorkers), $(customObj), $(customEval), $(useExternalMemory),
|
||||
$(missing), hasGroup = false)
|
||||
val (_booster, _metrics) = XGBoost.trainDistributed(trainingSet, derivedXGBParamMap,
|
||||
hasGroup = false, evalRDDMap)
|
||||
val model = new XGBoostClassificationModel(uid, _numClasses, _booster)
|
||||
val summary = XGBoostTrainingSummary(_metrics)
|
||||
model.setSummary(summary)
|
||||
@@ -290,12 +285,12 @@ class XGBoostClassificationModel private[ml](
|
||||
val bBooster = dataset.sparkSession.sparkContext.broadcast(_booster)
|
||||
val appName = dataset.sparkSession.sparkContext.appName
|
||||
|
||||
val rdd = dataset.asInstanceOf[Dataset[Row]].rdd.mapPartitions { rowIterator =>
|
||||
val inputRDD = dataset.asInstanceOf[Dataset[Row]].rdd
|
||||
val predictionRDD = dataset.asInstanceOf[Dataset[Row]].rdd.mapPartitions { rowIterator =>
|
||||
if (rowIterator.hasNext) {
|
||||
val rabitEnv = Array("DMLC_TASK_ID" -> TaskContext.getPartitionId().toString).toMap
|
||||
Rabit.init(rabitEnv.asJava)
|
||||
val (rowItr1, rowItr2) = rowIterator.duplicate
|
||||
val featuresIterator = rowItr2.map(row => row.getAs[Vector](
|
||||
val featuresIterator = rowIterator.map(row => row.getAs[Vector](
|
||||
$(featuresCol))).toList.iterator
|
||||
import DataUtils._
|
||||
val cacheInfo = {
|
||||
@@ -312,19 +307,27 @@ class XGBoostClassificationModel private[ml](
|
||||
val Array(rawPredictionItr, probabilityItr, predLeafItr, predContribItr) =
|
||||
producePredictionItrs(bBooster, dm)
|
||||
Rabit.shutdown()
|
||||
produceResultIterator(rowItr1, rawPredictionItr, probabilityItr, predLeafItr,
|
||||
Iterator(rawPredictionItr, probabilityItr, predLeafItr,
|
||||
predContribItr)
|
||||
} finally {
|
||||
dm.delete()
|
||||
}
|
||||
} else {
|
||||
Iterator[Row]()
|
||||
Iterator()
|
||||
}
|
||||
}
|
||||
val resultRDD = inputRDD.zipPartitions(predictionRDD, preservesPartitioning = true) {
|
||||
case (inputIterator, predictionItr) =>
|
||||
if (inputIterator.hasNext) {
|
||||
produceResultIterator(inputIterator, predictionItr.next(), predictionItr.next(),
|
||||
predictionItr.next(), predictionItr.next())
|
||||
} else {
|
||||
Iterator()
|
||||
}
|
||||
}
|
||||
|
||||
bBooster.unpersist(blocking = false)
|
||||
|
||||
dataset.sparkSession.createDataFrame(rdd, generateResultSchema(schema))
|
||||
dataset.sparkSession.createDataFrame(resultRDD, generateResultSchema(schema))
|
||||
}
|
||||
|
||||
private def produceResultIterator(
|
||||
@@ -416,7 +419,9 @@ class XGBoostClassificationModel private[ml](
|
||||
var numColsOutput = 0
|
||||
|
||||
val rawPredictionUDF = udf { rawPrediction: mutable.WrappedArray[Float] =>
|
||||
Vectors.dense(rawPrediction.map(_.toDouble).toArray)
|
||||
val raw = rawPrediction.map(_.toDouble).toArray
|
||||
val rawPredictions = if (numClasses == 2) Array(-raw(0), raw(0)) else raw
|
||||
Vectors.dense(rawPredictions)
|
||||
}
|
||||
|
||||
val probabilityUDF = udf { probability: mutable.WrappedArray[Float] =>
|
||||
|
||||
@@ -43,7 +43,7 @@ import org.apache.spark.broadcast.Broadcast
|
||||
|
||||
private[spark] trait XGBoostRegressorParams extends GeneralParams with BoosterParams
|
||||
with LearningTaskParams with HasBaseMarginCol with HasWeightCol with HasGroupCol
|
||||
with ParamMapFuncs with HasLeafPredictionCol with HasContribPredictionCol
|
||||
with ParamMapFuncs with HasLeafPredictionCol with HasContribPredictionCol with NonParamVariables
|
||||
|
||||
class XGBoostRegressor (
|
||||
override val uid: String,
|
||||
@@ -174,27 +174,19 @@ class XGBoostRegressor (
|
||||
col($(baseMarginCol))
|
||||
}
|
||||
val group = if (!isDefined(groupCol) || $(groupCol).isEmpty) lit(-1) else col($(groupCol))
|
||||
|
||||
val instances: RDD[XGBLabeledPoint] = dataset.select(
|
||||
col($(labelCol)).cast(FloatType),
|
||||
col($(featuresCol)),
|
||||
weight.cast(FloatType),
|
||||
group.cast(IntegerType),
|
||||
baseMargin.cast(FloatType)
|
||||
).rdd.map {
|
||||
case Row(label: Float, features: Vector, weight: Float, group: Int, baseMargin: Float) =>
|
||||
val (indices, values) = features match {
|
||||
case v: SparseVector => (v.indices, v.values.map(_.toFloat))
|
||||
case v: DenseVector => (null, v.values.map(_.toFloat))
|
||||
}
|
||||
XGBLabeledPoint(label, indices, values, weight, group, baseMargin)
|
||||
val trainingSet: RDD[XGBLabeledPoint] = DataUtils.convertDataFrameToXGBLabeledPointRDDs(
|
||||
col($(labelCol)), col($(featuresCol)), weight, baseMargin, Some(group),
|
||||
dataset.asInstanceOf[DataFrame]).head
|
||||
val evalRDDMap = getEvalSets(xgboostParams).map {
|
||||
case (name, dataFrame) => (name,
|
||||
DataUtils.convertDataFrameToXGBLabeledPointRDDs(col($(labelCol)), col($(featuresCol)),
|
||||
weight, baseMargin, Some(group), dataFrame).head)
|
||||
}
|
||||
transformSchema(dataset.schema, logging = true)
|
||||
val derivedXGBParamMap = MLlib2XGBoostParams
|
||||
// All non-null param maps in XGBoostRegressor are in derivedXGBParamMap.
|
||||
val (_booster, _metrics) = XGBoost.trainDistributed(instances, derivedXGBParamMap,
|
||||
$(numRound), $(numWorkers), $(customObj), $(customEval), $(useExternalMemory),
|
||||
$(missing), hasGroup = group != lit(-1))
|
||||
val (_booster, _metrics) = XGBoost.trainDistributed(trainingSet, derivedXGBParamMap,
|
||||
hasGroup = group != lit(-1), evalRDDMap)
|
||||
val model = new XGBoostRegressionModel(uid, _booster)
|
||||
val summary = XGBoostTrainingSummary(_metrics)
|
||||
model.setSummary(summary)
|
||||
@@ -265,13 +257,12 @@ class XGBoostRegressionModel private[ml] (
|
||||
|
||||
val bBooster = dataset.sparkSession.sparkContext.broadcast(_booster)
|
||||
val appName = dataset.sparkSession.sparkContext.appName
|
||||
|
||||
val rdd = dataset.asInstanceOf[Dataset[Row]].rdd.mapPartitions { rowIterator =>
|
||||
val inputRDD = dataset.asInstanceOf[Dataset[Row]].rdd
|
||||
val predictionRDD = dataset.asInstanceOf[Dataset[Row]].rdd.mapPartitions { rowIterator =>
|
||||
if (rowIterator.hasNext) {
|
||||
val rabitEnv = Array("DMLC_TASK_ID" -> TaskContext.getPartitionId().toString).toMap
|
||||
Rabit.init(rabitEnv.asJava)
|
||||
val (rowItr1, rowItr2) = rowIterator.duplicate
|
||||
val featuresIterator = rowItr2.map(row => row.getAs[Vector](
|
||||
val featuresIterator = rowIterator.map(row => row.getAs[Vector](
|
||||
$(featuresCol))).toList.iterator
|
||||
import DataUtils._
|
||||
val cacheInfo = {
|
||||
@@ -281,7 +272,6 @@ class XGBoostRegressionModel private[ml] (
|
||||
null
|
||||
}
|
||||
}
|
||||
|
||||
val dm = new DMatrix(
|
||||
XGBoost.removeMissingValues(featuresIterator.map(_.asXGB), $(missing)),
|
||||
cacheInfo)
|
||||
@@ -289,16 +279,25 @@ class XGBoostRegressionModel private[ml] (
|
||||
val Array(originalPredictionItr, predLeafItr, predContribItr) =
|
||||
producePredictionItrs(bBooster, dm)
|
||||
Rabit.shutdown()
|
||||
produceResultIterator(rowItr1, originalPredictionItr, predLeafItr, predContribItr)
|
||||
Iterator(originalPredictionItr, predLeafItr, predContribItr)
|
||||
} finally {
|
||||
dm.delete()
|
||||
}
|
||||
} else {
|
||||
Iterator[Row]()
|
||||
Iterator()
|
||||
}
|
||||
}
|
||||
val resultRDD = inputRDD.zipPartitions(predictionRDD, preservesPartitioning = true) {
|
||||
case (inputIterator, predictionItr) =>
|
||||
if (inputIterator.hasNext) {
|
||||
produceResultIterator(inputIterator, predictionItr.next(), predictionItr.next(),
|
||||
predictionItr.next())
|
||||
} else {
|
||||
Iterator()
|
||||
}
|
||||
}
|
||||
bBooster.unpersist(blocking = false)
|
||||
dataset.sparkSession.createDataFrame(rdd, generateResultSchema(schema))
|
||||
dataset.sparkSession.createDataFrame(resultRDD, generateResultSchema(schema))
|
||||
}
|
||||
|
||||
private def produceResultIterator(
|
||||
|
||||
@@ -18,12 +18,17 @@ package ml.dmlc.xgboost4j.scala.spark
|
||||
|
||||
class XGBoostTrainingSummary private(
|
||||
val trainObjectiveHistory: Array[Float],
|
||||
val testObjectiveHistory: Option[Array[Float]]
|
||||
) extends Serializable {
|
||||
val validationObjectiveHistory: (String, Array[Float])*) extends Serializable {
|
||||
|
||||
override def toString: String = {
|
||||
val train = trainObjectiveHistory.toList
|
||||
val test = testObjectiveHistory.map(_.toList)
|
||||
s"XGBoostTrainingSummary(trainObjectiveHistory=$train, testObjectiveHistory=$test)"
|
||||
val train = trainObjectiveHistory.mkString(",")
|
||||
val vaidationObjectiveHistoryString = {
|
||||
validationObjectiveHistory.map {
|
||||
case (name, metrics) =>
|
||||
s"${name}ObjectiveHistory=${metrics.mkString(",")}"
|
||||
}.mkString(";")
|
||||
}
|
||||
s"XGBoostTrainingSummary(trainObjectiveHistory=$train; $vaidationObjectiveHistoryString)"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,6 +36,6 @@ private[xgboost4j] object XGBoostTrainingSummary {
|
||||
def apply(metrics: Map[String, Array[Float]]): XGBoostTrainingSummary = {
|
||||
new XGBoostTrainingSummary(
|
||||
trainObjectiveHistory = metrics("train"),
|
||||
testObjectiveHistory = metrics.get("test"))
|
||||
metrics.filter(_._1 != "train").toSeq: _*)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,10 +50,21 @@ private[spark] trait BoosterParams extends Params {
|
||||
* overfitting. [default=6] range: [1, Int.MaxValue]
|
||||
*/
|
||||
final val maxDepth = new IntParam(this, "maxDepth", "maximum depth of a tree, increase this " +
|
||||
"value will make model more complex/likely to be overfitting.", (value: Int) => value >= 1)
|
||||
"value will make model more complex/likely to be overfitting.", (value: Int) => value >= 0)
|
||||
|
||||
final def getMaxDepth: Int = $(maxDepth)
|
||||
|
||||
|
||||
/**
|
||||
* Maximum number of nodes to be added. Only relevant when grow_policy=lossguide is set.
|
||||
*/
|
||||
final val maxLeaves = new IntParam(this, "maxLeaves",
|
||||
"Maximum number of nodes to be added. Only relevant when grow_policy=lossguide is set.",
|
||||
(value: Int) => value >= 0)
|
||||
|
||||
final def getMaxLeaves: Int = $(maxLeaves)
|
||||
|
||||
|
||||
/**
|
||||
* minimum sum of instance weight(hessian) needed in a child. If the tree partition step results
|
||||
* in a leaf node with the sum of instance weight less than min_child_weight, then the building
|
||||
@@ -147,7 +158,9 @@ private[spark] trait BoosterParams extends Params {
|
||||
* growth policy for fast histogram algorithm
|
||||
*/
|
||||
final val growPolicy = new Param[String](this, "growPolicy",
|
||||
"growth policy for fast histogram algorithm",
|
||||
"Controls a way new nodes are added to the tree. Currently supported only if" +
|
||||
" tree_method is set to hist. Choices: depthwise, lossguide. depthwise: split at nodes" +
|
||||
" closest to the root. lossguide: split at nodes with highest loss change.",
|
||||
(value: String) => BoosterParams.supportedGrowthPolicies.contains(value))
|
||||
|
||||
final def getGrowPolicy: String = $(growPolicy)
|
||||
@@ -242,6 +255,22 @@ private[spark] trait BoosterParams extends Params {
|
||||
|
||||
final def getTreeLimit: Int = $(treeLimit)
|
||||
|
||||
final val monotoneConstraints = new Param[String](this, name = "monotoneConstraints",
|
||||
doc = "a list in length of number of features, 1 indicate monotonic increasing, - 1 means " +
|
||||
"decreasing, 0 means no constraint. If it is shorter than number of features, 0 will be " +
|
||||
"padded ")
|
||||
|
||||
final def getMonotoneConstraints: String = $(monotoneConstraints)
|
||||
|
||||
final val interactionConstraints = new Param[String](this,
|
||||
name = "interactionConstraints",
|
||||
doc = "Constraints for interaction representing permitted interactions. The constraints" +
|
||||
" must be specified in the form of a nest list, e.g. [[0, 1], [2, 3, 4]]," +
|
||||
" where each inner list is a group of indices of features that are allowed to interact" +
|
||||
" with each other. See tutorial for more information")
|
||||
|
||||
final def getInteractionConstraints: String = $(interactionConstraints)
|
||||
|
||||
setDefault(eta -> 0.3, gamma -> 0, maxDepth -> 6,
|
||||
minChildWeight -> 1, maxDeltaStep -> 0,
|
||||
growPolicy -> "depthwise", maxBins -> 16,
|
||||
|
||||
@@ -22,26 +22,7 @@ import org.json4s.{DefaultFormats, Extraction, NoTypeHints}
|
||||
import org.json4s.jackson.JsonMethods.{compact, parse, render}
|
||||
|
||||
import org.apache.spark.ml.param.{Param, ParamPair, Params}
|
||||
|
||||
class GroupDataParam(
|
||||
parent: Params,
|
||||
name: String,
|
||||
doc: String) extends Param[Seq[Seq[Int]]](parent, name, doc) {
|
||||
|
||||
/** Creates a param pair with the given value (for Java). */
|
||||
override def w(value: Seq[Seq[Int]]): ParamPair[Seq[Seq[Int]]] = super.w(value)
|
||||
|
||||
override def jsonEncode(value: Seq[Seq[Int]]): String = {
|
||||
import org.json4s.jackson.Serialization
|
||||
implicit val formats = Serialization.formats(NoTypeHints)
|
||||
compact(render(Extraction.decompose(value)))
|
||||
}
|
||||
|
||||
override def jsonDecode(json: String): Seq[Seq[Int]] = {
|
||||
implicit val formats = DefaultFormats
|
||||
parse(json).extract[Seq[Seq[Int]]]
|
||||
}
|
||||
}
|
||||
import org.apache.spark.sql.DataFrame
|
||||
|
||||
class CustomEvalParam(
|
||||
parent: Params,
|
||||
|
||||
@@ -22,6 +22,14 @@ import ml.dmlc.xgboost4j.scala.spark.TrackerConf
|
||||
import org.apache.spark.ml.param._
|
||||
import scala.collection.mutable
|
||||
|
||||
import ml.dmlc.xgboost4j.{LabeledPoint => XGBLabeledPoint}
|
||||
|
||||
import org.apache.spark.ml.linalg.{DenseVector, SparseVector, Vector}
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.apache.spark.sql.{Column, DataFrame, Row}
|
||||
import org.apache.spark.sql.functions.col
|
||||
import org.apache.spark.sql.types.{FloatType, IntegerType}
|
||||
|
||||
private[spark] trait GeneralParams extends Params {
|
||||
|
||||
/**
|
||||
@@ -57,14 +65,27 @@ private[spark] trait GeneralParams extends Params {
|
||||
final def getUseExternalMemory: Boolean = $(useExternalMemory)
|
||||
|
||||
/**
|
||||
* Deprecated. Please use verbosity instead.
|
||||
* 0 means printing running messages, 1 means silent mode. default: 0
|
||||
*/
|
||||
final val silent = new IntParam(this, "silent",
|
||||
"Deprecated. Please use verbosity instead. " +
|
||||
"0 means printing running messages, 1 means silent mode.",
|
||||
(value: Int) => value >= 0 && value <= 1)
|
||||
|
||||
final def getSilent: Int = $(silent)
|
||||
|
||||
/**
|
||||
* Verbosity of printing messages. Valid values are 0 (silent), 1 (warning), 2 (info), 3 (debug).
|
||||
* default: 1
|
||||
*/
|
||||
final val verbosity = new IntParam(this, "verbosity",
|
||||
"Verbosity of printing messages. Valid values are 0 (silent), 1 (warning), 2 (info), " +
|
||||
"3 (debug).",
|
||||
(value: Int) => value >= 0 && value <= 3)
|
||||
|
||||
final def getVerbosity: Int = $(verbosity)
|
||||
|
||||
/**
|
||||
* customized objective function provided by user. default: null
|
||||
*/
|
||||
@@ -151,11 +172,10 @@ private[spark] trait GeneralParams extends Params {
|
||||
final def getSeed: Long = $(seed)
|
||||
|
||||
setDefault(numRound -> 1, numWorkers -> 1, nthread -> 1,
|
||||
useExternalMemory -> false, silent -> 0,
|
||||
useExternalMemory -> false, silent -> 0, verbosity -> 1,
|
||||
customObj -> null, customEval -> null, missing -> Float.NaN,
|
||||
trackerConf -> TrackerConf(), seed -> 0, timeoutRequestWorkers -> 30 * 60 * 1000L,
|
||||
checkpointPath -> "", checkpointInterval -> -1
|
||||
)
|
||||
checkpointPath -> "", checkpointInterval -> -1)
|
||||
}
|
||||
|
||||
trait HasLeafPredictionCol extends Params {
|
||||
@@ -224,10 +244,11 @@ private[spark] trait ParamMapFuncs extends Params {
|
||||
def XGBoostToMLlibParams(xgboostParams: Map[String, Any]): Unit = {
|
||||
for ((paramName, paramValue) <- xgboostParams) {
|
||||
if ((paramName == "booster" && paramValue != "gbtree") ||
|
||||
(paramName == "updater" && paramValue != "grow_histmaker,prune")) {
|
||||
(paramName == "updater" && paramValue != "grow_histmaker,prune" &&
|
||||
paramValue != "hist")) {
|
||||
throw new IllegalArgumentException(s"you specified $paramName as $paramValue," +
|
||||
s" XGBoost-Spark only supports gbtree as booster type" +
|
||||
" and grow_histmaker,prune as the updater type")
|
||||
" and grow_histmaker,prune or hist as the updater type")
|
||||
}
|
||||
val name = CaseFormat.LOWER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, paramName)
|
||||
params.find(_.name == name) match {
|
||||
|
||||
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright (c) 2014 by Contributors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ml.dmlc.xgboost4j.scala.spark.params
|
||||
|
||||
import org.apache.spark.sql.DataFrame
|
||||
|
||||
trait NonParamVariables {
|
||||
protected var evalSetsMap: Map[String, DataFrame] = Map.empty
|
||||
|
||||
def setEvalSets(evalSets: Map[String, DataFrame]): this.type = {
|
||||
evalSetsMap = evalSets
|
||||
this
|
||||
}
|
||||
|
||||
def getEvalSets(params: Map[String, Any]): Map[String, DataFrame] = {
|
||||
if (params.contains("eval_sets")) {
|
||||
params("eval_sets").asInstanceOf[Map[String, DataFrame]]
|
||||
} else {
|
||||
evalSetsMap
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -20,7 +20,7 @@ import java.net.URL
|
||||
|
||||
import org.apache.commons.logging.LogFactory
|
||||
|
||||
import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd}
|
||||
import org.apache.spark.scheduler._
|
||||
import org.codehaus.jackson.map.ObjectMapper
|
||||
import scala.collection.JavaConverters._
|
||||
import scala.concurrent.ExecutionContext.Implicits.global
|
||||
@@ -98,9 +98,11 @@ class SparkParallelismTracker(
|
||||
*/
|
||||
def execute[T](body: => T): T = {
|
||||
if (timeout <= 0) {
|
||||
logger.info("starting training without setting timeout for waiting for resources")
|
||||
body
|
||||
} else {
|
||||
try {
|
||||
logger.info(s"starting training with timeout set as $timeout ms for waiting for resources")
|
||||
waitForCondition(numAliveCores >= requestedCores, timeout)
|
||||
} catch {
|
||||
case _: TimeoutException =>
|
||||
@@ -112,16 +114,26 @@ class SparkParallelismTracker(
|
||||
}
|
||||
}
|
||||
|
||||
private class ErrorInXGBoostTraining(msg: String) extends ControlThrowable {
|
||||
override def toString: String = s"ErrorInXGBoostTraining: $msg"
|
||||
}
|
||||
|
||||
private[spark] class TaskFailedListener extends SparkListener {
|
||||
|
||||
private[this] val logger = LogFactory.getLog("XGBoostTaskFailedListener")
|
||||
|
||||
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = {
|
||||
taskEnd.reason match {
|
||||
case reason: TaskFailedReason =>
|
||||
throw new ErrorInXGBoostTraining(s"ExecutorLost during XGBoost Training: " +
|
||||
s"${reason.toErrorString}")
|
||||
case taskEndReason: TaskFailedReason =>
|
||||
logger.error(s"Training Task Failed during XGBoost Training: " +
|
||||
s"$taskEndReason, stopping SparkContext")
|
||||
// Spark does not allow ListenerThread to shutdown SparkContext so that we have to do it
|
||||
// in a separate thread
|
||||
val sparkContextKiller = new Thread() {
|
||||
override def run(): Unit = {
|
||||
LiveListenerBus.withinListenerThread.withValue(false) {
|
||||
SparkContext.getOrCreate().stop()
|
||||
}
|
||||
}
|
||||
}
|
||||
sparkContextKiller.setDaemon(true)
|
||||
sparkContextKiller.start()
|
||||
case _ =>
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,3 +98,12 @@ object Ranking extends TrainTestData {
|
||||
getResourceLines(resource).map(_.toInt).toList
|
||||
}
|
||||
}
|
||||
|
||||
object Synthetic extends {
|
||||
val train: Seq[XGBLabeledPoint] = Seq(
|
||||
XGBLabeledPoint(1.0f, Array(0, 1), Array(1.0f, 2.0f)),
|
||||
XGBLabeledPoint(0.0f, Array(0, 1, 2), Array(1.0f, 2.0f, 3.0f)),
|
||||
XGBLabeledPoint(0.0f, Array(0, 1, 2), Array(1.0f, 2.0f, 3.0f)),
|
||||
XGBLabeledPoint(1.0f, Array(0, 1), Array(1.0f, 2.0f))
|
||||
)
|
||||
}
|
||||
|
||||
@@ -17,11 +17,14 @@
|
||||
package ml.dmlc.xgboost4j.scala.spark
|
||||
|
||||
import ml.dmlc.xgboost4j.scala.{DMatrix, XGBoost => ScalaXGBoost}
|
||||
|
||||
import org.apache.spark.ml.linalg._
|
||||
import org.apache.spark.ml.param.ParamMap
|
||||
import org.apache.spark.sql._
|
||||
import org.scalatest.FunSuite
|
||||
|
||||
import org.apache.spark.Partitioner
|
||||
|
||||
class XGBoostClassifierSuite extends FunSuite with PerTest {
|
||||
|
||||
test("XGBoost-Spark XGBoostClassifier ouput should match XGBoost4j") {
|
||||
@@ -60,10 +63,11 @@ class XGBoostClassifierSuite extends FunSuite with PerTest {
|
||||
collect().map(row => (row.getAs[Int]("id"), row.getAs[DenseVector]("rawPrediction"))).toMap
|
||||
|
||||
assert(testDF.count() === prediction4.size)
|
||||
// the vector length in rawPrediction column is 2 since we have to fit to the evaluator in Spark
|
||||
for (i <- prediction3.indices) {
|
||||
assert(prediction3(i).length === prediction4(i).values.length)
|
||||
assert(prediction3(i).length === prediction4(i).values.length - 1)
|
||||
for (j <- prediction3(i).indices) {
|
||||
assert(prediction3(i)(j) === prediction4(i)(j))
|
||||
assert(prediction3(i)(j) === prediction4(i)(j + 1))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,7 +140,7 @@ class XGBoostClassifierSuite extends FunSuite with PerTest {
|
||||
assert(predictionDF.columns.contains("final_prediction") === false)
|
||||
|
||||
assert(model.summary.trainObjectiveHistory.length === 5)
|
||||
assert(model.summary.testObjectiveHistory.isEmpty)
|
||||
assert(model.summary.validationObjectiveHistory.isEmpty)
|
||||
}
|
||||
|
||||
test("XGBoost and Spark parameters synchronize correctly") {
|
||||
@@ -190,31 +194,6 @@ class XGBoostClassifierSuite extends FunSuite with PerTest {
|
||||
assert(count != 0)
|
||||
}
|
||||
|
||||
test("training summary") {
|
||||
val paramMap = Map("eta" -> "1", "max_depth" -> "6", "silent" -> "1",
|
||||
"objective" -> "binary:logistic", "num_round" -> 5, "nWorkers" -> numWorkers)
|
||||
|
||||
val trainingDF = buildDataFrame(Classification.train)
|
||||
val xgb = new XGBoostClassifier(paramMap)
|
||||
val model = xgb.fit(trainingDF)
|
||||
|
||||
assert(model.summary.trainObjectiveHistory.length === 5)
|
||||
assert(model.summary.testObjectiveHistory.isEmpty)
|
||||
}
|
||||
|
||||
test("train/test split") {
|
||||
val paramMap = Map("eta" -> "1", "max_depth" -> "6", "silent" -> "1",
|
||||
"objective" -> "binary:logistic", "train_test_ratio" -> "0.5",
|
||||
"num_round" -> 5, "num_workers" -> numWorkers)
|
||||
val training = buildDataFrame(Classification.train)
|
||||
|
||||
val xgb = new XGBoostClassifier(paramMap)
|
||||
val model = xgb.fit(training)
|
||||
val Some(testObjectiveHistory) = model.summary.testObjectiveHistory
|
||||
assert(testObjectiveHistory.length === 5)
|
||||
assert(model.summary.trainObjectiveHistory !== testObjectiveHistory)
|
||||
}
|
||||
|
||||
test("test predictionLeaf") {
|
||||
val paramMap = Map("eta" -> "1", "max_depth" -> "6", "silent" -> "1",
|
||||
"objective" -> "binary:logistic", "train_test_ratio" -> "0.5",
|
||||
@@ -287,4 +266,46 @@ class XGBoostClassifierSuite extends FunSuite with PerTest {
|
||||
assert(resultDF.columns.contains("predictLeaf"))
|
||||
assert(resultDF.columns.contains("predictContrib"))
|
||||
}
|
||||
|
||||
test("infrequent features") {
|
||||
val paramMap = Map("eta" -> "1", "max_depth" -> "6", "silent" -> "1",
|
||||
"objective" -> "binary:logistic",
|
||||
"num_round" -> 5, "num_workers" -> 2)
|
||||
import DataUtils._
|
||||
val sparkSession = SparkSession.builder().getOrCreate()
|
||||
import sparkSession.implicits._
|
||||
val repartitioned = sc.parallelize(Synthetic.train, 3).map(lp => (lp.label, lp)).partitionBy(
|
||||
new Partitioner {
|
||||
override def numPartitions: Int = 2
|
||||
|
||||
override def getPartition(key: Any): Int = key.asInstanceOf[Float].toInt
|
||||
}
|
||||
).map(_._2).zipWithIndex().map {
|
||||
case (lp, id) =>
|
||||
(id, lp.label, lp.features)
|
||||
}.toDF("id", "label", "features")
|
||||
val xgb = new XGBoostClassifier(paramMap)
|
||||
xgb.fit(repartitioned)
|
||||
}
|
||||
|
||||
test("infrequent features (use_external_memory)") {
|
||||
val paramMap = Map("eta" -> "1", "max_depth" -> "6", "silent" -> "1",
|
||||
"objective" -> "binary:logistic",
|
||||
"num_round" -> 5, "num_workers" -> 2, "use_external_memory" -> true)
|
||||
import DataUtils._
|
||||
val sparkSession = SparkSession.builder().getOrCreate()
|
||||
import sparkSession.implicits._
|
||||
val repartitioned = sc.parallelize(Synthetic.train, 3).map(lp => (lp.label, lp)).partitionBy(
|
||||
new Partitioner {
|
||||
override def numPartitions: Int = 2
|
||||
|
||||
override def getPartition(key: Any): Int = key.asInstanceOf[Float].toInt
|
||||
}
|
||||
).map(_._2).zipWithIndex().map {
|
||||
case (lp, id) =>
|
||||
(id, lp.label, lp.features)
|
||||
}.toDF("id", "label", "features")
|
||||
val xgb = new XGBoostClassifier(paramMap)
|
||||
xgb.fit(repartitioned)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,18 +18,21 @@ package ml.dmlc.xgboost4j.scala.spark
|
||||
|
||||
import java.nio.file.Files
|
||||
import java.util.concurrent.LinkedBlockingDeque
|
||||
import ml.dmlc.xgboost4j.java.Rabit
|
||||
|
||||
import ml.dmlc.xgboost4j.{LabeledPoint => XGBLabeledPoint}
|
||||
import ml.dmlc.xgboost4j.scala.DMatrix
|
||||
import ml.dmlc.xgboost4j.scala.rabit.RabitTracker
|
||||
import ml.dmlc.xgboost4j.scala.{XGBoost => SXGBoost, _}
|
||||
import org.apache.hadoop.fs.{FileSystem, Path}
|
||||
|
||||
import org.apache.spark.TaskContext
|
||||
import org.apache.spark.ml.linalg.Vectors
|
||||
import org.apache.spark.sql._
|
||||
import org.scalatest.FunSuite
|
||||
import scala.util.Random
|
||||
|
||||
import ml.dmlc.xgboost4j.java.Rabit
|
||||
|
||||
class XGBoostGeneralSuite extends FunSuite with PerTest {
|
||||
|
||||
test("test Rabit allreduce to validate Scala-implemented Rabit tracker") {
|
||||
@@ -77,11 +80,11 @@ class XGBoostGeneralSuite extends FunSuite with PerTest {
|
||||
val trainingRDD = sc.parallelize(Classification.train)
|
||||
val (booster, metrics) = XGBoost.trainDistributed(
|
||||
trainingRDD,
|
||||
List("eta" -> "1", "max_depth" -> "6", "silent" -> "1",
|
||||
"objective" -> "binary:logistic").toMap,
|
||||
round = 5, nWorkers = numWorkers, eval = null, obj = null, useExternalMemory = false,
|
||||
hasGroup = false, missing = Float.NaN)
|
||||
|
||||
List("eta" -> "1", "max_depth" -> "6",
|
||||
"objective" -> "binary:logistic", "num_round" -> 5, "num_workers" -> numWorkers,
|
||||
"custom_eval" -> null, "custom_obj" -> null, "use_external_memory" -> false,
|
||||
"missing" -> Float.NaN).toMap,
|
||||
hasGroup = false)
|
||||
assert(booster != null)
|
||||
}
|
||||
|
||||
@@ -89,7 +92,7 @@ class XGBoostGeneralSuite extends FunSuite with PerTest {
|
||||
val eval = new EvalError()
|
||||
val training = buildDataFrame(Classification.train)
|
||||
val testDM = new DMatrix(Classification.test.iterator)
|
||||
val paramMap = Map("eta" -> "1", "max_depth" -> "6", "silent" -> "1",
|
||||
val paramMap = Map("eta" -> "1", "max_depth" -> "6",
|
||||
"objective" -> "binary:logistic", "num_round" -> 5, "num_workers" -> numWorkers,
|
||||
"use_external_memory" -> true)
|
||||
val model = new XGBoostClassifier(paramMap).fit(training)
|
||||
@@ -101,73 +104,120 @@ class XGBoostGeneralSuite extends FunSuite with PerTest {
|
||||
val eval = new EvalError()
|
||||
val training = buildDataFrame(Classification.train)
|
||||
val testDM = new DMatrix(Classification.test.iterator)
|
||||
val paramMap = Map("eta" -> "1", "max_depth" -> "6", "silent" -> "1",
|
||||
val paramMap = Map("eta" -> "1", "max_depth" -> "6",
|
||||
"objective" -> "binary:logistic", "num_round" -> 5, "num_workers" -> numWorkers,
|
||||
"tracker_conf" -> TrackerConf(60 * 60 * 1000, "scala"))
|
||||
val model = new XGBoostClassifier(paramMap).fit(training)
|
||||
assert(eval.eval(model._booster.predict(testDM, outPutMargin = true), testDM) < 0.1)
|
||||
}
|
||||
|
||||
|
||||
ignore("test with fast histo depthwise") {
|
||||
test("test with quantile hist with monotone_constraints (lossguide)") {
|
||||
val eval = new EvalError()
|
||||
val training = buildDataFrame(Classification.train)
|
||||
val testDM = new DMatrix(Classification.test.iterator)
|
||||
val paramMap = Map("eta" -> "1", "gamma" -> "0.5", "max_depth" -> "6", "silent" -> "1",
|
||||
"objective" -> "binary:logistic", "tree_method" -> "hist", "grow_policy" -> "depthwise",
|
||||
"eval_metric" -> "error", "num_round" -> 5, "num_workers" -> math.min(numWorkers, 2))
|
||||
// TODO: histogram algorithm seems to be very very sensitive to worker number
|
||||
val paramMap = Map("eta" -> "1",
|
||||
"max_depth" -> "6",
|
||||
"objective" -> "binary:logistic", "tree_method" -> "hist", "grow_policy" -> "lossguide",
|
||||
"num_round" -> 5, "num_workers" -> numWorkers, "monotone_constraints" -> "(1, 0)")
|
||||
val model = new XGBoostClassifier(paramMap).fit(training)
|
||||
assert(eval.eval(model._booster.predict(testDM, outPutMargin = true), testDM) < 0.1)
|
||||
}
|
||||
|
||||
ignore("test with fast histo lossguide") {
|
||||
test("test with quantile hist with interaction_constraints (lossguide)") {
|
||||
val eval = new EvalError()
|
||||
val training = buildDataFrame(Classification.train)
|
||||
val testDM = new DMatrix(Classification.test.iterator)
|
||||
val paramMap = Map("eta" -> "1", "gamma" -> "0.5", "max_depth" -> "0", "silent" -> "1",
|
||||
val paramMap = Map("eta" -> "1",
|
||||
"max_depth" -> "6",
|
||||
"objective" -> "binary:logistic", "tree_method" -> "hist", "grow_policy" -> "lossguide",
|
||||
"max_leaves" -> "8", "eval_metric" -> "error", "num_round" -> 5,
|
||||
"num_workers" -> math.min(numWorkers, 2))
|
||||
"num_round" -> 5, "num_workers" -> numWorkers, "interaction_constraints" -> "[[1,2],[2,3,4]]")
|
||||
val model = new XGBoostClassifier(paramMap).fit(training)
|
||||
assert(eval.eval(model._booster.predict(testDM, outPutMargin = true), testDM) < 0.1)
|
||||
}
|
||||
|
||||
test("test with quantile hist with monotone_constraints (depthwise)") {
|
||||
val eval = new EvalError()
|
||||
val training = buildDataFrame(Classification.train)
|
||||
val testDM = new DMatrix(Classification.test.iterator)
|
||||
val paramMap = Map("eta" -> "1",
|
||||
"max_depth" -> "6",
|
||||
"objective" -> "binary:logistic", "tree_method" -> "hist", "grow_policy" -> "depthwise",
|
||||
"num_round" -> 5, "num_workers" -> numWorkers, "monotone_constraints" -> "(1, 0)")
|
||||
val model = new XGBoostClassifier(paramMap).fit(training)
|
||||
assert(eval.eval(model._booster.predict(testDM, outPutMargin = true), testDM) < 0.1)
|
||||
}
|
||||
|
||||
test("test with quantile hist with interaction_constraints (depthwise)") {
|
||||
val eval = new EvalError()
|
||||
val training = buildDataFrame(Classification.train)
|
||||
val testDM = new DMatrix(Classification.test.iterator)
|
||||
val paramMap = Map("eta" -> "1",
|
||||
"max_depth" -> "6",
|
||||
"objective" -> "binary:logistic", "tree_method" -> "hist", "grow_policy" -> "depthwise",
|
||||
"num_round" -> 5, "num_workers" -> numWorkers, "interaction_constraints" -> "[[1,2],[2,3,4]]")
|
||||
val model = new XGBoostClassifier(paramMap).fit(training)
|
||||
assert(eval.eval(model._booster.predict(testDM, outPutMargin = true), testDM) < 0.1)
|
||||
}
|
||||
|
||||
test("test with quantile hist depthwise") {
|
||||
val eval = new EvalError()
|
||||
val training = buildDataFrame(Classification.train)
|
||||
val testDM = new DMatrix(Classification.test.iterator)
|
||||
val paramMap = Map("eta" -> "1",
|
||||
"max_depth" -> "6",
|
||||
"objective" -> "binary:logistic", "tree_method" -> "hist", "grow_policy" -> "depthwise",
|
||||
"num_round" -> 5, "num_workers" -> numWorkers)
|
||||
val model = new XGBoostClassifier(paramMap).fit(training)
|
||||
assert(eval.eval(model._booster.predict(testDM, outPutMargin = true), testDM) < 0.1)
|
||||
}
|
||||
|
||||
test("test with quantile hist lossguide") {
|
||||
val eval = new EvalError()
|
||||
val training = buildDataFrame(Classification.train)
|
||||
val testDM = new DMatrix(Classification.test.iterator)
|
||||
val paramMap = Map("eta" -> "1", "gamma" -> "0.5", "max_depth" -> "0",
|
||||
"objective" -> "binary:logistic", "tree_method" -> "hist", "grow_policy" -> "lossguide",
|
||||
"max_leaves" -> "8", "num_round" -> 5,
|
||||
"num_workers" -> numWorkers)
|
||||
val model = new XGBoostClassifier(paramMap).fit(training)
|
||||
val x = eval.eval(model._booster.predict(testDM, outPutMargin = true), testDM)
|
||||
assert(x < 0.1)
|
||||
}
|
||||
|
||||
ignore("test with fast histo lossguide with max bin") {
|
||||
test("test with quantile hist lossguide with max bin") {
|
||||
val eval = new EvalError()
|
||||
val training = buildDataFrame(Classification.train)
|
||||
val testDM = new DMatrix(Classification.test.iterator)
|
||||
val paramMap = Map("eta" -> "1", "gamma" -> "0.5", "max_depth" -> "0", "silent" -> "0",
|
||||
val paramMap = Map("eta" -> "1", "gamma" -> "0.5", "max_depth" -> "0",
|
||||
"objective" -> "binary:logistic", "tree_method" -> "hist",
|
||||
"grow_policy" -> "lossguide", "max_leaves" -> "8", "max_bin" -> "16",
|
||||
"eval_metric" -> "error", "num_round" -> 5, "num_workers" -> math.min(numWorkers, 2))
|
||||
"eval_metric" -> "error", "num_round" -> 5, "num_workers" -> numWorkers)
|
||||
val model = new XGBoostClassifier(paramMap).fit(training)
|
||||
val x = eval.eval(model._booster.predict(testDM, outPutMargin = true), testDM)
|
||||
assert(x < 0.1)
|
||||
}
|
||||
|
||||
ignore("test with fast histo depthwidth with max depth") {
|
||||
test("test with quantile hist depthwidth with max depth") {
|
||||
val eval = new EvalError()
|
||||
val training = buildDataFrame(Classification.train)
|
||||
val testDM = new DMatrix(Classification.test.iterator)
|
||||
val paramMap = Map("eta" -> "1", "gamma" -> "0.5", "max_depth" -> "0", "silent" -> "0",
|
||||
val paramMap = Map("eta" -> "1", "gamma" -> "0.5", "max_depth" -> "6",
|
||||
"objective" -> "binary:logistic", "tree_method" -> "hist",
|
||||
"grow_policy" -> "depthwise", "max_leaves" -> "8", "max_depth" -> "2",
|
||||
"eval_metric" -> "error", "num_round" -> 10, "num_workers" -> math.min(numWorkers, 2))
|
||||
"grow_policy" -> "depthwise", "max_depth" -> "2",
|
||||
"eval_metric" -> "error", "num_round" -> 10, "num_workers" -> numWorkers)
|
||||
val model = new XGBoostClassifier(paramMap).fit(training)
|
||||
val x = eval.eval(model._booster.predict(testDM, outPutMargin = true), testDM)
|
||||
assert(x < 0.1)
|
||||
}
|
||||
|
||||
ignore("test with fast histo depthwidth with max depth and max bin") {
|
||||
test("test with quantile hist depthwidth with max depth and max bin") {
|
||||
val eval = new EvalError()
|
||||
val training = buildDataFrame(Classification.train)
|
||||
val testDM = new DMatrix(Classification.test.iterator)
|
||||
val paramMap = Map("eta" -> "1", "gamma" -> "0.5", "max_depth" -> "0", "silent" -> "0",
|
||||
val paramMap = Map("eta" -> "1", "gamma" -> "0.5", "max_depth" -> "6",
|
||||
"objective" -> "binary:logistic", "tree_method" -> "hist",
|
||||
"grow_policy" -> "depthwise", "max_depth" -> "2", "max_bin" -> "2",
|
||||
"eval_metric" -> "error", "num_round" -> 10, "num_workers" -> math.min(numWorkers, 2))
|
||||
"eval_metric" -> "error", "num_round" -> 10, "num_workers" -> numWorkers)
|
||||
val model = new XGBoostClassifier(paramMap).fit(training)
|
||||
val x = eval.eval(model._booster.predict(testDM, outPutMargin = true), testDM)
|
||||
assert(x < 0.1)
|
||||
@@ -191,7 +241,7 @@ class XGBoostGeneralSuite extends FunSuite with PerTest {
|
||||
}
|
||||
|
||||
val denseDF = buildDenseDataFrame().repartition(4)
|
||||
val paramMap = List("eta" -> "1", "max_depth" -> "2", "silent" -> "1",
|
||||
val paramMap = List("eta" -> "1", "max_depth" -> "2",
|
||||
"objective" -> "binary:logistic", "missing" -> -0.1f, "num_workers" -> numWorkers).toMap
|
||||
val model = new XGBoostClassifier(paramMap).fit(denseDF)
|
||||
model.transform(denseDF).collect()
|
||||
@@ -201,7 +251,7 @@ class XGBoostGeneralSuite extends FunSuite with PerTest {
|
||||
val eval = new EvalError()
|
||||
val training = buildDataFrame(Classification.train)
|
||||
val testDM = new DMatrix(Classification.test.iterator)
|
||||
val paramMap = Map("eta" -> "1", "max_depth" -> "6", "silent" -> "1",
|
||||
val paramMap = Map("eta" -> "1", "max_depth" -> "6",
|
||||
"objective" -> "binary:logistic", "timeout_request_workers" -> 0L,
|
||||
"num_round" -> 5, "num_workers" -> numWorkers)
|
||||
val model = new XGBoostClassifier(paramMap).fit(training)
|
||||
@@ -215,7 +265,7 @@ class XGBoostGeneralSuite extends FunSuite with PerTest {
|
||||
val testDM = new DMatrix(Classification.test.iterator)
|
||||
|
||||
val tmpPath = Files.createTempDirectory("model1").toAbsolutePath.toString
|
||||
val paramMap = Map("eta" -> "1", "max_depth" -> 2, "silent" -> "1",
|
||||
val paramMap = Map("eta" -> "1", "max_depth" -> 2,
|
||||
"objective" -> "binary:logistic", "checkpoint_path" -> tmpPath,
|
||||
"checkpoint_interval" -> 2, "num_workers" -> numWorkers)
|
||||
|
||||
@@ -267,13 +317,101 @@ class XGBoostGeneralSuite extends FunSuite with PerTest {
|
||||
|
||||
test("distributed training with group data") {
|
||||
val trainingRDD = sc.parallelize(Ranking.train, 5)
|
||||
val (booster, metrics) = XGBoost.trainDistributed(
|
||||
val (booster, _) = XGBoost.trainDistributed(
|
||||
trainingRDD,
|
||||
List("eta" -> "1", "max_depth" -> "6", "silent" -> "1",
|
||||
"objective" -> "binary:logistic").toMap,
|
||||
round = 5, nWorkers = numWorkers, eval = null, obj = null, useExternalMemory = false,
|
||||
hasGroup = true, missing = Float.NaN)
|
||||
List("eta" -> "1", "max_depth" -> "6",
|
||||
"objective" -> "rank:pairwise", "num_round" -> 5, "num_workers" -> numWorkers,
|
||||
"custom_eval" -> null, "custom_obj" -> null, "use_external_memory" -> false,
|
||||
"missing" -> Float.NaN).toMap,
|
||||
hasGroup = true)
|
||||
|
||||
assert(booster != null)
|
||||
}
|
||||
|
||||
test("training summary") {
|
||||
val paramMap = Map("eta" -> "1", "max_depth" -> "6",
|
||||
"objective" -> "binary:logistic", "num_round" -> 5, "nWorkers" -> numWorkers)
|
||||
|
||||
val trainingDF = buildDataFrame(Classification.train)
|
||||
val xgb = new XGBoostClassifier(paramMap)
|
||||
val model = xgb.fit(trainingDF)
|
||||
|
||||
assert(model.summary.trainObjectiveHistory.length === 5)
|
||||
assert(model.summary.validationObjectiveHistory.isEmpty)
|
||||
}
|
||||
|
||||
test("train/test split") {
|
||||
val paramMap = Map("eta" -> "1", "max_depth" -> "6",
|
||||
"objective" -> "binary:logistic", "train_test_ratio" -> "0.5",
|
||||
"num_round" -> 5, "num_workers" -> numWorkers)
|
||||
val training = buildDataFrame(Classification.train)
|
||||
|
||||
val xgb = new XGBoostClassifier(paramMap)
|
||||
val model = xgb.fit(training)
|
||||
assert(model.summary.validationObjectiveHistory.length === 1)
|
||||
assert(model.summary.validationObjectiveHistory(0)._1 === "test")
|
||||
assert(model.summary.validationObjectiveHistory(0)._2.length === 5)
|
||||
assert(model.summary.trainObjectiveHistory !== model.summary.validationObjectiveHistory(0))
|
||||
}
|
||||
|
||||
test("train with multiple validation datasets (non-ranking)") {
|
||||
val training = buildDataFrame(Classification.train)
|
||||
val Array(train, eval1, eval2) = training.randomSplit(Array(0.6, 0.2, 0.2))
|
||||
val paramMap1 = Map("eta" -> "1", "max_depth" -> "6",
|
||||
"objective" -> "binary:logistic",
|
||||
"num_round" -> 5, "num_workers" -> numWorkers)
|
||||
|
||||
val xgb1 = new XGBoostClassifier(paramMap1).setEvalSets(Map("eval1" -> eval1, "eval2" -> eval2))
|
||||
val model1 = xgb1.fit(train)
|
||||
assert(model1.summary.validationObjectiveHistory.length === 2)
|
||||
assert(model1.summary.validationObjectiveHistory.map(_._1).toSet === Set("eval1", "eval2"))
|
||||
assert(model1.summary.validationObjectiveHistory(0)._2.length === 5)
|
||||
assert(model1.summary.validationObjectiveHistory(1)._2.length === 5)
|
||||
assert(model1.summary.trainObjectiveHistory !== model1.summary.validationObjectiveHistory(0))
|
||||
assert(model1.summary.trainObjectiveHistory !== model1.summary.validationObjectiveHistory(1))
|
||||
|
||||
val paramMap2 = Map("eta" -> "1", "max_depth" -> "6",
|
||||
"objective" -> "binary:logistic",
|
||||
"num_round" -> 5, "num_workers" -> numWorkers,
|
||||
"eval_sets" -> Map("eval1" -> eval1, "eval2" -> eval2))
|
||||
val xgb2 = new XGBoostClassifier(paramMap2)
|
||||
val model2 = xgb2.fit(train)
|
||||
assert(model2.summary.validationObjectiveHistory.length === 2)
|
||||
assert(model2.summary.validationObjectiveHistory.map(_._1).toSet === Set("eval1", "eval2"))
|
||||
assert(model2.summary.validationObjectiveHistory(0)._2.length === 5)
|
||||
assert(model2.summary.validationObjectiveHistory(1)._2.length === 5)
|
||||
assert(model2.summary.trainObjectiveHistory !== model2.summary.validationObjectiveHistory(0))
|
||||
assert(model2.summary.trainObjectiveHistory !== model2.summary.validationObjectiveHistory(1))
|
||||
}
|
||||
|
||||
test("train with multiple validation datasets (ranking)") {
|
||||
val training = buildDataFrameWithGroup(Ranking.train, 5)
|
||||
val Array(train, eval1, eval2) = training.randomSplit(Array(0.6, 0.2, 0.2))
|
||||
val paramMap1 = Map("eta" -> "1", "max_depth" -> "6",
|
||||
"objective" -> "rank:pairwise",
|
||||
"num_round" -> 5, "num_workers" -> numWorkers, "group_col" -> "group")
|
||||
val xgb1 = new XGBoostRegressor(paramMap1).setEvalSets(Map("eval1" -> eval1, "eval2" -> eval2))
|
||||
val model1 = xgb1.fit(train)
|
||||
assert(model1 != null)
|
||||
assert(model1.summary.validationObjectiveHistory.length === 2)
|
||||
assert(model1.summary.validationObjectiveHistory.map(_._1).toSet === Set("eval1", "eval2"))
|
||||
assert(model1.summary.validationObjectiveHistory(0)._2.length === 5)
|
||||
assert(model1.summary.validationObjectiveHistory(1)._2.length === 5)
|
||||
assert(model1.summary.trainObjectiveHistory !== model1.summary.validationObjectiveHistory(0))
|
||||
assert(model1.summary.trainObjectiveHistory !== model1.summary.validationObjectiveHistory(1))
|
||||
|
||||
val paramMap2 = Map("eta" -> "1", "max_depth" -> "6",
|
||||
"objective" -> "rank:pairwise",
|
||||
"num_round" -> 5, "num_workers" -> numWorkers, "group_col" -> "group",
|
||||
"eval_sets" -> Map("eval1" -> eval1, "eval2" -> eval2))
|
||||
val xgb2 = new XGBoostRegressor(paramMap2)
|
||||
val model2 = xgb2.fit(train)
|
||||
assert(model2 != null)
|
||||
assert(model2.summary.validationObjectiveHistory.length === 2)
|
||||
assert(model2.summary.validationObjectiveHistory.map(_._1).toSet === Set("eval1", "eval2"))
|
||||
assert(model2.summary.validationObjectiveHistory(0)._2.length === 5)
|
||||
assert(model2.summary.validationObjectiveHistory(1)._2.length === 5)
|
||||
assert(model2.summary.trainObjectiveHistory !== model2.summary.validationObjectiveHistory(0))
|
||||
assert(model2.summary.trainObjectiveHistory !== model2.summary.validationObjectiveHistory(1))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,10 +6,10 @@
|
||||
<parent>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost-jvm</artifactId>
|
||||
<version>0.81</version>
|
||||
<version>0.82-SNAPSHOT</version>
|
||||
</parent>
|
||||
<artifactId>xgboost4j</artifactId>
|
||||
<version>0.81</version>
|
||||
<version>0.82-SNAPSHOT</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<dependencies>
|
||||
|
||||
@@ -16,9 +16,12 @@
|
||||
package ml.dmlc.xgboost4j.java;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.esotericsoftware.kryo.Kryo;
|
||||
import com.esotericsoftware.kryo.KryoSerializable;
|
||||
@@ -395,6 +398,25 @@ public class Booster implements Serializable, KryoSerializable {
|
||||
return modelInfos[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Supported feature importance types
|
||||
*
|
||||
* WEIGHT = Number of nodes that a feature was used to determine a split
|
||||
* GAIN = Average information gain per split for a feature
|
||||
* COVER = Average cover per split for a feature
|
||||
* TOTAL_GAIN = Total information gain over all splits of a feature
|
||||
* TOTAL_COVER = Total cover over all splits of a feature
|
||||
*/
|
||||
public static class FeatureImportanceType {
|
||||
public static final String WEIGHT = "weight";
|
||||
public static final String GAIN = "gain";
|
||||
public static final String COVER = "cover";
|
||||
public static final String TOTAL_GAIN = "total_gain";
|
||||
public static final String TOTAL_COVER = "total_cover";
|
||||
public static final Set<String> ACCEPTED_TYPES = new HashSet<>(
|
||||
Arrays.asList(WEIGHT, GAIN, COVER, TOTAL_GAIN, TOTAL_COVER));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get importance of each feature with specified feature names.
|
||||
*
|
||||
@@ -403,6 +425,28 @@ public class Booster implements Serializable, KryoSerializable {
|
||||
*/
|
||||
public Map<String, Integer> getFeatureScore(String[] featureNames) throws XGBoostError {
|
||||
String[] modelInfos = getModelDump(featureNames, false);
|
||||
return getFeatureWeightsFromModel(modelInfos);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get importance of each feature
|
||||
*
|
||||
* @return featureScoreMap key: feature index, value: feature importance score, can be nill
|
||||
* @throws XGBoostError native error
|
||||
*/
|
||||
public Map<String, Integer> getFeatureScore(String featureMap) throws XGBoostError {
|
||||
String[] modelInfos = getModelDump(featureMap, false);
|
||||
return getFeatureWeightsFromModel(modelInfos);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the importance of each feature based purely on weights (number of splits)
|
||||
*
|
||||
* @return featureScoreMap key: feature index,
|
||||
* value: feature importance score based on weight
|
||||
* @throws XGBoostError native error
|
||||
*/
|
||||
private Map<String, Integer> getFeatureWeightsFromModel(String[] modelInfos) throws XGBoostError {
|
||||
Map<String, Integer> featureScore = new HashMap<>();
|
||||
for (String tree : modelInfos) {
|
||||
for (String node : tree.split("\n")) {
|
||||
@@ -423,30 +467,91 @@ public class Booster implements Serializable, KryoSerializable {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get importance of each feature
|
||||
* Get the feature importances for gain or cover (average or total)
|
||||
*
|
||||
* @return featureScoreMap key: feature index, value: feature importance score, can be nill
|
||||
* @return featureImportanceMap key: feature index,
|
||||
* values: feature importance score based on gain or cover
|
||||
* @throws XGBoostError native error
|
||||
*/
|
||||
public Map<String, Integer> getFeatureScore(String featureMap) throws XGBoostError {
|
||||
String[] modelInfos = getModelDump(featureMap, false);
|
||||
Map<String, Integer> featureScore = new HashMap<>();
|
||||
for (String tree : modelInfos) {
|
||||
for (String node : tree.split("\n")) {
|
||||
public Map<String, Double> getScore(
|
||||
String[] featureNames, String importanceType) throws XGBoostError {
|
||||
String[] modelInfos = getModelDump(featureNames, true);
|
||||
return getFeatureImportanceFromModel(modelInfos, importanceType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the feature importances for gain or cover (average or total), with feature names
|
||||
*
|
||||
* @return featureImportanceMap key: feature name,
|
||||
* values: feature importance score based on gain or cover
|
||||
* @throws XGBoostError native error
|
||||
*/
|
||||
public Map<String, Double> getScore(
|
||||
String featureMap, String importanceType) throws XGBoostError {
|
||||
String[] modelInfos = getModelDump(featureMap, true);
|
||||
return getFeatureImportanceFromModel(modelInfos, importanceType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the importance of each feature based on information gain or cover
|
||||
*
|
||||
* @return featureImportanceMap key: feature index, value: feature importance score
|
||||
* based on information gain or cover
|
||||
* @throws XGBoostError native error
|
||||
*/
|
||||
private Map<String, Double> getFeatureImportanceFromModel(
|
||||
String[] modelInfos, String importanceType) throws XGBoostError {
|
||||
if (!FeatureImportanceType.ACCEPTED_TYPES.contains(importanceType)) {
|
||||
throw new AssertionError(String.format("Importance type %s is not supported",
|
||||
importanceType));
|
||||
}
|
||||
Map<String, Double> importanceMap = new HashMap<>();
|
||||
Map<String, Double> weightMap = new HashMap<>();
|
||||
if (importanceType == FeatureImportanceType.WEIGHT) {
|
||||
Map<String, Integer> importanceWeights = getFeatureWeightsFromModel(modelInfos);
|
||||
for (String feature: importanceWeights.keySet()) {
|
||||
importanceMap.put(feature, new Double(importanceWeights.get(feature)));
|
||||
}
|
||||
return importanceMap;
|
||||
}
|
||||
/* Each split in the tree has this text form:
|
||||
"0:[f28<-9.53674316e-07] yes=1,no=2,missing=1,gain=4000.53101,cover=1628.25"
|
||||
So the line has to be split according to whether cover or gain is desired */
|
||||
String splitter = "gain=";
|
||||
if (importanceType == FeatureImportanceType.COVER
|
||||
|| importanceType == FeatureImportanceType.TOTAL_COVER) {
|
||||
splitter = "cover=";
|
||||
}
|
||||
for (String tree: modelInfos) {
|
||||
for (String node: tree.split("\n")) {
|
||||
String[] array = node.split("\\[");
|
||||
if (array.length == 1) {
|
||||
continue;
|
||||
}
|
||||
String fid = array[1].split("\\]")[0];
|
||||
fid = fid.split("<")[0];
|
||||
if (featureScore.containsKey(fid)) {
|
||||
featureScore.put(fid, 1 + featureScore.get(fid));
|
||||
String[] fidWithImportance = array[1].split("\\]");
|
||||
// Extract gain or cover from string after closing bracket
|
||||
Double importance = Double.parseDouble(
|
||||
fidWithImportance[1].split(splitter)[1].split(",")[0]
|
||||
);
|
||||
String fid = fidWithImportance[0].split("<")[0];
|
||||
if (importanceMap.containsKey(fid)) {
|
||||
importanceMap.put(fid, importance + importanceMap.get(fid));
|
||||
weightMap.put(fid, 1d + weightMap.get(fid));
|
||||
} else {
|
||||
featureScore.put(fid, 1);
|
||||
importanceMap.put(fid, importance);
|
||||
weightMap.put(fid, 1d);
|
||||
}
|
||||
}
|
||||
}
|
||||
return featureScore;
|
||||
/* By default we calculate total gain and total cover.
|
||||
Divide by the number of nodes per feature to get gain / cover */
|
||||
if (importanceType == FeatureImportanceType.COVER
|
||||
|| importanceType == FeatureImportanceType.GAIN) {
|
||||
for (String fid: importanceMap.keySet()) {
|
||||
importanceMap.put(fid, importanceMap.get(fid)/weightMap.get(fid));
|
||||
}
|
||||
}
|
||||
return importanceMap;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -140,6 +140,8 @@ public class XGBoost {
|
||||
//collect eval matrixs
|
||||
String[] evalNames;
|
||||
DMatrix[] evalMats;
|
||||
float bestScore;
|
||||
int bestIteration;
|
||||
List<String> names = new ArrayList<String>();
|
||||
List<DMatrix> mats = new ArrayList<DMatrix>();
|
||||
|
||||
@@ -150,6 +152,12 @@ public class XGBoost {
|
||||
|
||||
evalNames = names.toArray(new String[names.size()]);
|
||||
evalMats = mats.toArray(new DMatrix[mats.size()]);
|
||||
if (isMaximizeEvaluation(params)) {
|
||||
bestScore = -Float.MAX_VALUE;
|
||||
} else {
|
||||
bestScore = Float.MAX_VALUE;
|
||||
}
|
||||
bestIteration = 0;
|
||||
metrics = metrics == null ? new float[evalNames.length][round] : metrics;
|
||||
|
||||
//collect all data matrixs
|
||||
@@ -196,12 +204,27 @@ public class XGBoost {
|
||||
for (int i = 0; i < metricsOut.length; i++) {
|
||||
metrics[i][iter] = metricsOut[i];
|
||||
}
|
||||
|
||||
// If there is more than one evaluation datasets, the last one would be used
|
||||
// to determinate early stop.
|
||||
float score = metricsOut[metricsOut.length - 1];
|
||||
if (isMaximizeEvaluation(params)) {
|
||||
// Update best score if the current score is better (no update when equal)
|
||||
if (score > bestScore) {
|
||||
bestScore = score;
|
||||
bestIteration = iter;
|
||||
}
|
||||
} else {
|
||||
if (score < bestScore) {
|
||||
bestScore = score;
|
||||
bestIteration = iter;
|
||||
}
|
||||
}
|
||||
if (earlyStoppingRounds > 0) {
|
||||
boolean onTrack = judgeIfTrainingOnTrack(params, earlyStoppingRounds, metrics, iter);
|
||||
if (!onTrack) {
|
||||
String reversedDirection = getReversedDirection(params);
|
||||
if (shouldEarlyStop(earlyStoppingRounds, iter, bestIteration)) {
|
||||
Rabit.trackerPrint(String.format(
|
||||
"early stopping after %d %s rounds", earlyStoppingRounds, reversedDirection));
|
||||
"early stopping after %d rounds away from the best iteration",
|
||||
earlyStoppingRounds));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -214,30 +237,11 @@ public class XGBoost {
|
||||
return booster;
|
||||
}
|
||||
|
||||
static boolean judgeIfTrainingOnTrack(
|
||||
Map<String, Object> params, int earlyStoppingRounds, float[][] metrics, int iter) {
|
||||
boolean maximizeEvaluationMetrics = getMetricsExpectedDirection(params);
|
||||
boolean onTrack = false;
|
||||
float[] criterion = metrics[metrics.length - 1];
|
||||
for (int shift = 0; shift < Math.min(iter, earlyStoppingRounds) - 1; shift++) {
|
||||
onTrack |= maximizeEvaluationMetrics ?
|
||||
criterion[iter - shift] >= criterion[iter - shift - 1] :
|
||||
criterion[iter - shift] <= criterion[iter - shift - 1];
|
||||
}
|
||||
return onTrack;
|
||||
static boolean shouldEarlyStop(int earlyStoppingRounds, int iter, int bestIteration) {
|
||||
return iter - bestIteration >= earlyStoppingRounds;
|
||||
}
|
||||
|
||||
private static String getReversedDirection(Map<String, Object> params) {
|
||||
String reversedDirection = null;
|
||||
if (Boolean.valueOf(String.valueOf(params.get("maximize_evaluation_metrics")))) {
|
||||
reversedDirection = "descending";
|
||||
} else if (!Boolean.valueOf(String.valueOf(params.get("maximize_evaluation_metrics")))) {
|
||||
reversedDirection = "ascending";
|
||||
}
|
||||
return reversedDirection;
|
||||
}
|
||||
|
||||
private static boolean getMetricsExpectedDirection(Map<String, Object> params) {
|
||||
private static boolean isMaximizeEvaluation(Map<String, Object> params) {
|
||||
try {
|
||||
String maximize = String.valueOf(params.get("maximize_evaluation_metrics"));
|
||||
assert(maximize != null);
|
||||
|
||||
@@ -204,7 +204,7 @@ class Booster private[xgboost4j](private[xgboost4j] var booster: JBooster)
|
||||
|
||||
|
||||
/**
|
||||
* Get importance of each feature
|
||||
* Get importance of each feature based on weight only (number of splits)
|
||||
*
|
||||
* @return featureScoreMap key: feature index, value: feature importance score
|
||||
*/
|
||||
@@ -214,7 +214,8 @@ class Booster private[xgboost4j](private[xgboost4j] var booster: JBooster)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get importance of each feature with specified feature names.
|
||||
* Get importance of each feature based on weight only
|
||||
* (number of splits), with specified feature names.
|
||||
*
|
||||
* @return featureScoreMap key: feature name, value: feature importance score
|
||||
*/
|
||||
@@ -223,6 +224,31 @@ class Booster private[xgboost4j](private[xgboost4j] var booster: JBooster)
|
||||
booster.getFeatureScore(featureNames).asScala
|
||||
}
|
||||
|
||||
/**
|
||||
* Get importance of each feature based on information gain or cover
|
||||
* Supported: ["gain, "cover", "total_gain", "total_cover"]
|
||||
*
|
||||
* @return featureScoreMap key: feature index, value: feature importance score
|
||||
*/
|
||||
@throws(classOf[XGBoostError])
|
||||
def getScore(featureMap: String, importanceType: String): Map[String, Double] = {
|
||||
Map(booster.getScore(featureMap, importanceType)
|
||||
.asScala.mapValues(_.doubleValue).toSeq: _*)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get importance of each feature based on information gain or cover
|
||||
* , with specified feature names.
|
||||
* Supported: ["gain, "cover", "total_gain", "total_cover"]
|
||||
*
|
||||
* @return featureScoreMap key: feature name, value: feature importance score
|
||||
*/
|
||||
@throws(classOf[XGBoostError])
|
||||
def getScore(featureNames: Array[String], importanceType: String): Map[String, Double] = {
|
||||
Map(booster.getScore(featureNames, importanceType)
|
||||
.asScala.mapValues(_.doubleValue).toSeq: _*)
|
||||
}
|
||||
|
||||
def getVersion: Int = booster.getVersion
|
||||
|
||||
def toByteArray: Array[Byte] = {
|
||||
|
||||
@@ -27,7 +27,7 @@ trait ObjectiveTrait extends IObjective {
|
||||
*
|
||||
* @param predicts untransformed margin predicts
|
||||
* @param dtrain training data
|
||||
* @return List with two float array, correspond to first order grad and second order grad
|
||||
* @return List with two float array, correspond to grad and hess
|
||||
*/
|
||||
def getGradient(predicts: Array[Array[Float]], dtrain: DMatrix): List[Array[Float]]
|
||||
|
||||
|
||||
@@ -139,7 +139,7 @@ public class BoosterImplTest {
|
||||
}
|
||||
|
||||
private static class IncreasingEval implements IEvaluation {
|
||||
private int value = 0;
|
||||
private int value = 1;
|
||||
|
||||
@Override
|
||||
public String getMetric() {
|
||||
@@ -153,70 +153,166 @@ public class BoosterImplTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDescendMetrics() {
|
||||
Map<String, Object> paramMap = new HashMap<String, Object>() {
|
||||
{
|
||||
put("max_depth", 3);
|
||||
put("silent", 1);
|
||||
put("objective", "binary:logistic");
|
||||
put("maximize_evaluation_metrics", "false");
|
||||
}
|
||||
};
|
||||
float[][] metrics = new float[1][5];
|
||||
for (int i = 0; i < 5; i++) {
|
||||
public void testDescendMetricsWithBoundaryCondition() {
|
||||
// maximize_evaluation_metrics = false
|
||||
int totalIterations = 11;
|
||||
int earlyStoppingRound = 10;
|
||||
float[][] metrics = new float[1][totalIterations];
|
||||
for (int i = 0; i < totalIterations; i++) {
|
||||
metrics[0][i] = i;
|
||||
}
|
||||
boolean onTrack = XGBoost.judgeIfTrainingOnTrack(paramMap, 5, metrics, 4);
|
||||
TestCase.assertFalse(onTrack);
|
||||
for (int i = 0; i < 5; i++) {
|
||||
metrics[0][i] = 5 - i;
|
||||
int bestIteration = 0;
|
||||
|
||||
for (int itr = 0; itr < totalIterations; itr++) {
|
||||
boolean es = XGBoost.shouldEarlyStop(earlyStoppingRound, itr, bestIteration);
|
||||
if (itr == totalIterations - 1) {
|
||||
TestCase.assertTrue(es);
|
||||
} else {
|
||||
TestCase.assertFalse(es);
|
||||
}
|
||||
}
|
||||
onTrack = XGBoost.judgeIfTrainingOnTrack(paramMap, 5, metrics, 4);
|
||||
TestCase.assertTrue(onTrack);
|
||||
for (int i = 0; i < 5; i++) {
|
||||
metrics[0][i] = 5 - i;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEarlyStoppingForMultipleMetrics() {
|
||||
// maximize_evaluation_metrics = true
|
||||
int earlyStoppingRound = 3;
|
||||
int totalIterations = 5;
|
||||
int numOfMetrics = 3;
|
||||
float[][] metrics = new float[numOfMetrics][totalIterations];
|
||||
// Only assign metric values to the first dataset, zeros for other datasets
|
||||
for (int i = 0; i < numOfMetrics; i++) {
|
||||
for (int j = 0; j < totalIterations; j++) {
|
||||
metrics[0][j] = j;
|
||||
}
|
||||
}
|
||||
int bestIteration;
|
||||
|
||||
for (int i = 0; i < totalIterations; i++) {
|
||||
bestIteration = i;
|
||||
boolean es = XGBoost.shouldEarlyStop(earlyStoppingRound, i, bestIteration);
|
||||
TestCase.assertFalse(es);
|
||||
}
|
||||
|
||||
// when we have multiple datasets, only the last one was used to determinate early stop
|
||||
// Here we changed the metric of the first dataset, it doesn't have any effect to the final result
|
||||
for (int i = 0; i < totalIterations; i++) {
|
||||
metrics[0][i] = totalIterations - i;
|
||||
}
|
||||
for (int i = 0; i < totalIterations; i++) {
|
||||
bestIteration = i;
|
||||
boolean es = XGBoost.shouldEarlyStop(earlyStoppingRound, i, bestIteration);
|
||||
TestCase.assertFalse(es);
|
||||
}
|
||||
|
||||
// Now assign metric values to the last dataset.
|
||||
for (int i = 0; i < totalIterations; i++) {
|
||||
metrics[2][i] = totalIterations - i;
|
||||
}
|
||||
bestIteration = 0;
|
||||
|
||||
for (int i = 0; i < totalIterations; i++) {
|
||||
// if any metrics off, we need to stop
|
||||
boolean es = XGBoost.shouldEarlyStop(earlyStoppingRound, i, bestIteration);
|
||||
if (i >= earlyStoppingRound) {
|
||||
TestCase.assertTrue(es);
|
||||
} else {
|
||||
TestCase.assertFalse(es);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDescendMetrics() {
|
||||
// maximize_evaluation_metrics = false
|
||||
int totalIterations = 10;
|
||||
int earlyStoppingRounds = 5;
|
||||
float[][] metrics = new float[1][totalIterations];
|
||||
for (int i = 0; i < totalIterations; i++) {
|
||||
metrics[0][i] = i;
|
||||
}
|
||||
int bestIteration = 0;
|
||||
|
||||
boolean es = XGBoost.shouldEarlyStop(earlyStoppingRounds, totalIterations - 1, bestIteration);
|
||||
TestCase.assertTrue(es);
|
||||
for (int i = 0; i < totalIterations; i++) {
|
||||
metrics[0][i] = totalIterations - i;
|
||||
}
|
||||
bestIteration = totalIterations - 1;
|
||||
|
||||
es = XGBoost.shouldEarlyStop(earlyStoppingRounds, totalIterations - 1, bestIteration);
|
||||
TestCase.assertFalse(es);
|
||||
|
||||
for (int i = 0; i < totalIterations; i++) {
|
||||
metrics[0][i] = totalIterations - i;
|
||||
}
|
||||
metrics[0][4] = 1;
|
||||
metrics[0][9] = 5;
|
||||
|
||||
bestIteration = 4;
|
||||
|
||||
es = XGBoost.shouldEarlyStop(earlyStoppingRounds, totalIterations - 1, bestIteration);
|
||||
TestCase.assertTrue(es);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAscendMetricsWithBoundaryCondition() {
|
||||
// maximize_evaluation_metrics = true
|
||||
int totalIterations = 11;
|
||||
int earlyStoppingRounds = 10;
|
||||
float[][] metrics = new float[1][totalIterations];
|
||||
for (int i = 0; i < totalIterations; i++) {
|
||||
metrics[0][i] = totalIterations - i;
|
||||
}
|
||||
int bestIteration = 0;
|
||||
|
||||
for (int itr = 0; itr < totalIterations; itr++) {
|
||||
boolean es = XGBoost.shouldEarlyStop(earlyStoppingRounds, itr, bestIteration);
|
||||
if (itr == totalIterations - 1) {
|
||||
TestCase.assertTrue(es);
|
||||
} else {
|
||||
TestCase.assertFalse(es);
|
||||
}
|
||||
}
|
||||
metrics[0][0] = 1;
|
||||
metrics[0][2] = 5;
|
||||
onTrack = XGBoost.judgeIfTrainingOnTrack(paramMap, 5, metrics, 4);
|
||||
TestCase.assertTrue(onTrack);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAscendMetrics() {
|
||||
Map<String, Object> paramMap = new HashMap<String, Object>() {
|
||||
{
|
||||
put("max_depth", 3);
|
||||
put("silent", 1);
|
||||
put("objective", "binary:logistic");
|
||||
put("maximize_evaluation_metrics", "true");
|
||||
}
|
||||
};
|
||||
float[][] metrics = new float[1][5];
|
||||
for (int i = 0; i < 5; i++) {
|
||||
// maximize_evaluation_metrics = true
|
||||
int totalIterations = 10;
|
||||
int earlyStoppingRounds = 5;
|
||||
float[][] metrics = new float[1][totalIterations];
|
||||
for (int i = 0; i < totalIterations; i++) {
|
||||
metrics[0][i] = totalIterations - i;
|
||||
}
|
||||
int bestIteration = 0;
|
||||
|
||||
boolean es = XGBoost.shouldEarlyStop(earlyStoppingRounds, totalIterations - 1, bestIteration);
|
||||
TestCase.assertTrue(es);
|
||||
for (int i = 0; i < totalIterations; i++) {
|
||||
metrics[0][i] = i;
|
||||
}
|
||||
boolean onTrack = XGBoost.judgeIfTrainingOnTrack(paramMap, 5, metrics, 4);
|
||||
TestCase.assertTrue(onTrack);
|
||||
for (int i = 0; i < 5; i++) {
|
||||
metrics[0][i] = 5 - i;
|
||||
}
|
||||
onTrack = XGBoost.judgeIfTrainingOnTrack(paramMap, 5, metrics, 4);
|
||||
TestCase.assertFalse(onTrack);
|
||||
for (int i = 0; i < 5; i++) {
|
||||
bestIteration = totalIterations - 1;
|
||||
|
||||
es = XGBoost.shouldEarlyStop(earlyStoppingRounds, totalIterations - 1, bestIteration);
|
||||
TestCase.assertFalse(es);
|
||||
|
||||
for (int i = 0; i < totalIterations; i++) {
|
||||
metrics[0][i] = i;
|
||||
}
|
||||
metrics[0][0] = 6;
|
||||
metrics[0][2] = 1;
|
||||
onTrack = XGBoost.judgeIfTrainingOnTrack(paramMap, 5, metrics, 4);
|
||||
TestCase.assertTrue(onTrack);
|
||||
metrics[0][4] = 9;
|
||||
metrics[0][9] = 4;
|
||||
|
||||
bestIteration = 4;
|
||||
|
||||
es = XGBoost.shouldEarlyStop(earlyStoppingRounds, totalIterations - 1, bestIteration);
|
||||
TestCase.assertTrue(es);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBoosterEarlyStop() throws XGBoostError, IOException {
|
||||
DMatrix trainMat = new DMatrix("../../demo/data/agaricus.txt.train");
|
||||
DMatrix testMat = new DMatrix("../../demo/data/agaricus.txt.test");
|
||||
// testBoosterWithFastHistogram(trainMat, testMat);
|
||||
Map<String, Object> paramMap = new HashMap<String, Object>() {
|
||||
{
|
||||
put("max_depth", 3);
|
||||
@@ -236,6 +332,12 @@ public class BoosterImplTest {
|
||||
earlyStoppingRound);
|
||||
|
||||
// Make sure we've stopped early.
|
||||
for (int w = 0; w < watches.size(); w++) {
|
||||
for (int r = 0; r <= earlyStoppingRound; r++) {
|
||||
TestCase.assertFalse(0.0f == metrics[w][r]);
|
||||
}
|
||||
}
|
||||
|
||||
for (int w = 0; w < watches.size(); w++) {
|
||||
for (int r = earlyStoppingRound + 1; r < round; r++) {
|
||||
TestCase.assertEquals(0.0f, metrics[w][r]);
|
||||
@@ -243,27 +345,27 @@ public class BoosterImplTest {
|
||||
}
|
||||
}
|
||||
|
||||
private void testWithFastHisto(DMatrix trainingSet, Map<String, DMatrix> watches, int round,
|
||||
private void testWithQuantileHisto(DMatrix trainingSet, Map<String, DMatrix> watches, int round,
|
||||
Map<String, Object> paramMap, float threshold) throws XGBoostError {
|
||||
float[][] metrics = new float[watches.size()][round];
|
||||
Booster booster = XGBoost.train(trainingSet, paramMap, round, watches,
|
||||
metrics, null, null, 0);
|
||||
for (int i = 0; i < metrics.length; i++)
|
||||
for (int j = 1; j < metrics[i].length; j++) {
|
||||
TestCase.assertTrue(metrics[i][j] >= metrics[i][j - 1]);
|
||||
TestCase.assertTrue(metrics[i][j] >= metrics[i][j - 1] ||
|
||||
Math.abs(metrics[i][j] - metrics[i][j - 1]) < 0.1);
|
||||
}
|
||||
for (int i = 0; i < metrics.length; i++)
|
||||
for (int j = 0; j < metrics[i].length; j++) {
|
||||
TestCase.assertTrue(metrics[i][j] >= threshold);
|
||||
TestCase.assertTrue(metrics[i][j] >= threshold);
|
||||
}
|
||||
booster.dispose();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFastHistoDepthWise() throws XGBoostError {
|
||||
public void testQuantileHistoDepthWise() throws XGBoostError {
|
||||
DMatrix trainMat = new DMatrix("../../demo/data/agaricus.txt.train");
|
||||
DMatrix testMat = new DMatrix("../../demo/data/agaricus.txt.test");
|
||||
// testBoosterWithFastHistogram(trainMat, testMat);
|
||||
Map<String, Object> paramMap = new HashMap<String, Object>() {
|
||||
{
|
||||
put("max_depth", 3);
|
||||
@@ -277,14 +379,13 @@ public class BoosterImplTest {
|
||||
Map<String, DMatrix> watches = new HashMap<>();
|
||||
watches.put("training", trainMat);
|
||||
watches.put("test", testMat);
|
||||
testWithFastHisto(trainMat, watches, 10, paramMap, 0.0f);
|
||||
testWithQuantileHisto(trainMat, watches, 10, paramMap, 0.95f);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFastHistoLossGuide() throws XGBoostError {
|
||||
public void testQuantileHistoLossGuide() throws XGBoostError {
|
||||
DMatrix trainMat = new DMatrix("../../demo/data/agaricus.txt.train");
|
||||
DMatrix testMat = new DMatrix("../../demo/data/agaricus.txt.test");
|
||||
// testBoosterWithFastHistogram(trainMat, testMat);
|
||||
Map<String, Object> paramMap = new HashMap<String, Object>() {
|
||||
{
|
||||
put("max_depth", 0);
|
||||
@@ -299,14 +400,13 @@ public class BoosterImplTest {
|
||||
Map<String, DMatrix> watches = new HashMap<>();
|
||||
watches.put("training", trainMat);
|
||||
watches.put("test", testMat);
|
||||
testWithFastHisto(trainMat, watches, 10, paramMap, 0.0f);
|
||||
testWithQuantileHisto(trainMat, watches, 10, paramMap, 0.95f);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFastHistoLossGuideMaxBin() throws XGBoostError {
|
||||
public void testQuantileHistoLossGuideMaxBin() throws XGBoostError {
|
||||
DMatrix trainMat = new DMatrix("../../demo/data/agaricus.txt.train");
|
||||
DMatrix testMat = new DMatrix("../../demo/data/agaricus.txt.test");
|
||||
// testBoosterWithFastHistogram(trainMat, testMat);
|
||||
Map<String, Object> paramMap = new HashMap<String, Object>() {
|
||||
{
|
||||
put("max_depth", 0);
|
||||
@@ -321,7 +421,7 @@ public class BoosterImplTest {
|
||||
};
|
||||
Map<String, DMatrix> watches = new HashMap<>();
|
||||
watches.put("training", trainMat);
|
||||
testWithFastHisto(trainMat, watches, 10, paramMap, 0.0f);
|
||||
testWithQuantileHisto(trainMat, watches, 10, paramMap, 0.95f);
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -341,7 +441,7 @@ public class BoosterImplTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetFeatureImportance() throws XGBoostError {
|
||||
public void testGetFeatureScore() throws XGBoostError {
|
||||
DMatrix trainMat = new DMatrix("../../demo/data/agaricus.txt.train");
|
||||
DMatrix testMat = new DMatrix("../../demo/data/agaricus.txt.test");
|
||||
|
||||
@@ -353,38 +453,81 @@ public class BoosterImplTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFastHistoDepthwiseMaxDepth() throws XGBoostError {
|
||||
public void testGetFeatureImportanceGain() throws XGBoostError {
|
||||
DMatrix trainMat = new DMatrix("../../demo/data/agaricus.txt.train");
|
||||
DMatrix testMat = new DMatrix("../../demo/data/agaricus.txt.test");
|
||||
// testBoosterWithFastHistogram(trainMat, testMat);
|
||||
|
||||
Booster booster = trainBooster(trainMat, testMat);
|
||||
String[] featureNames = new String[126];
|
||||
for(int i = 0; i < 126; i++) featureNames[i] = "test_feature_name_" + i;
|
||||
Map<String, Double> scoreMap = booster.getScore(featureNames, "gain");
|
||||
for (String fName: scoreMap.keySet()) TestCase.assertTrue(fName.startsWith("test_feature_name_"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetFeatureImportanceTotalGain() throws XGBoostError {
|
||||
DMatrix trainMat = new DMatrix("../../demo/data/agaricus.txt.train");
|
||||
DMatrix testMat = new DMatrix("../../demo/data/agaricus.txt.test");
|
||||
|
||||
Booster booster = trainBooster(trainMat, testMat);
|
||||
String[] featureNames = new String[126];
|
||||
for(int i = 0; i < 126; i++) featureNames[i] = "test_feature_name_" + i;
|
||||
Map<String, Double> scoreMap = booster.getScore(featureNames, "total_gain");
|
||||
for (String fName: scoreMap.keySet()) TestCase.assertTrue(fName.startsWith("test_feature_name_"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetFeatureImportanceCover() throws XGBoostError {
|
||||
DMatrix trainMat = new DMatrix("../../demo/data/agaricus.txt.train");
|
||||
DMatrix testMat = new DMatrix("../../demo/data/agaricus.txt.test");
|
||||
|
||||
Booster booster = trainBooster(trainMat, testMat);
|
||||
String[] featureNames = new String[126];
|
||||
for(int i = 0; i < 126; i++) featureNames[i] = "test_feature_name_" + i;
|
||||
Map<String, Double> scoreMap = booster.getScore(featureNames, "cover");
|
||||
for (String fName: scoreMap.keySet()) TestCase.assertTrue(fName.startsWith("test_feature_name_"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetFeatureImportanceTotalCover() throws XGBoostError {
|
||||
DMatrix trainMat = new DMatrix("../../demo/data/agaricus.txt.train");
|
||||
DMatrix testMat = new DMatrix("../../demo/data/agaricus.txt.test");
|
||||
|
||||
Booster booster = trainBooster(trainMat, testMat);
|
||||
String[] featureNames = new String[126];
|
||||
for(int i = 0; i < 126; i++) featureNames[i] = "test_feature_name_" + i;
|
||||
Map<String, Double> scoreMap = booster.getScore(featureNames, "total_cover");
|
||||
for (String fName: scoreMap.keySet()) TestCase.assertTrue(fName.startsWith("test_feature_name_"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testQuantileHistoDepthwiseMaxDepth() throws XGBoostError {
|
||||
DMatrix trainMat = new DMatrix("../../demo/data/agaricus.txt.train");
|
||||
Map<String, Object> paramMap = new HashMap<String, Object>() {
|
||||
{
|
||||
put("max_depth", 3);
|
||||
put("silent", 1);
|
||||
put("objective", "binary:logistic");
|
||||
put("tree_method", "hist");
|
||||
put("max_depth", 2);
|
||||
put("grow_policy", "depthwise");
|
||||
put("eval_metric", "auc");
|
||||
}
|
||||
};
|
||||
Map<String, DMatrix> watches = new HashMap<>();
|
||||
watches.put("training", trainMat);
|
||||
testWithFastHisto(trainMat, watches, 10, paramMap, 0.85f);
|
||||
testWithQuantileHisto(trainMat, watches, 10, paramMap, 0.95f);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFastHistoDepthwiseMaxDepthMaxBin() throws XGBoostError {
|
||||
public void testQuantileHistoDepthwiseMaxDepthMaxBin() throws XGBoostError {
|
||||
DMatrix trainMat = new DMatrix("../../demo/data/agaricus.txt.train");
|
||||
DMatrix testMat = new DMatrix("../../demo/data/agaricus.txt.test");
|
||||
// testBoosterWithFastHistogram(trainMat, testMat);
|
||||
Map<String, Object> paramMap = new HashMap<String, Object>() {
|
||||
{
|
||||
put("max_depth", 3);
|
||||
put("silent", 1);
|
||||
put("objective", "binary:logistic");
|
||||
put("tree_method", "hist");
|
||||
put("max_depth", 2);
|
||||
put("max_bin", 2);
|
||||
put("grow_policy", "depthwise");
|
||||
put("eval_metric", "auc");
|
||||
@@ -392,7 +535,7 @@ public class BoosterImplTest {
|
||||
};
|
||||
Map<String, DMatrix> watches = new HashMap<>();
|
||||
watches.put("training", trainMat);
|
||||
testWithFastHisto(trainMat, watches, 10, paramMap, 0.85f);
|
||||
testWithQuantileHisto(trainMat, watches, 10, paramMap, 0.95f);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -77,7 +77,7 @@ class ScalaBoosterImplSuite extends FunSuite {
|
||||
XGBoost.train(trainMat, paramMap, round, watches)
|
||||
}
|
||||
|
||||
private def trainBoosterWithFastHisto(
|
||||
private def trainBoosterWithQuantileHisto(
|
||||
trainMat: DMatrix,
|
||||
watches: Map[String, DMatrix],
|
||||
round: Int,
|
||||
@@ -146,57 +146,57 @@ class ScalaBoosterImplSuite extends FunSuite {
|
||||
XGBoost.crossValidation(trainMat, params, round, nfold)
|
||||
}
|
||||
|
||||
test("test with fast histo depthwise") {
|
||||
test("test with quantile histo depthwise") {
|
||||
val trainMat = new DMatrix("../../demo/data/agaricus.txt.train")
|
||||
val testMat = new DMatrix("../../demo/data/agaricus.txt.test")
|
||||
val paramMap = List("max_depth" -> "3", "silent" -> "0",
|
||||
"objective" -> "binary:logistic", "tree_method" -> "hist",
|
||||
"grow_policy" -> "depthwise", "eval_metric" -> "auc").toMap
|
||||
trainBoosterWithFastHisto(trainMat, Map("training" -> trainMat, "test" -> testMat),
|
||||
round = 10, paramMap, 0.0f)
|
||||
trainBoosterWithQuantileHisto(trainMat, Map("training" -> trainMat, "test" -> testMat),
|
||||
round = 10, paramMap, 0.95f)
|
||||
}
|
||||
|
||||
test("test with fast histo lossguide") {
|
||||
test("test with quantile histo lossguide") {
|
||||
val trainMat = new DMatrix("../../demo/data/agaricus.txt.train")
|
||||
val testMat = new DMatrix("../../demo/data/agaricus.txt.test")
|
||||
val paramMap = List("max_depth" -> "0", "silent" -> "0",
|
||||
"objective" -> "binary:logistic", "tree_method" -> "hist",
|
||||
"grow_policy" -> "lossguide", "max_leaves" -> "8", "eval_metric" -> "auc").toMap
|
||||
trainBoosterWithFastHisto(trainMat, Map("training" -> trainMat, "test" -> testMat),
|
||||
round = 10, paramMap, 0.0f)
|
||||
trainBoosterWithQuantileHisto(trainMat, Map("training" -> trainMat, "test" -> testMat),
|
||||
round = 10, paramMap, 0.95f)
|
||||
}
|
||||
|
||||
test("test with fast histo lossguide with max bin") {
|
||||
test("test with quantile histo lossguide with max bin") {
|
||||
val trainMat = new DMatrix("../../demo/data/agaricus.txt.train")
|
||||
val testMat = new DMatrix("../../demo/data/agaricus.txt.test")
|
||||
val paramMap = List("max_depth" -> "0", "silent" -> "0",
|
||||
"objective" -> "binary:logistic", "tree_method" -> "hist",
|
||||
"grow_policy" -> "lossguide", "max_leaves" -> "8", "max_bin" -> "16",
|
||||
"eval_metric" -> "auc").toMap
|
||||
trainBoosterWithFastHisto(trainMat, Map("training" -> trainMat),
|
||||
round = 10, paramMap, 0.0f)
|
||||
trainBoosterWithQuantileHisto(trainMat, Map("training" -> trainMat),
|
||||
round = 10, paramMap, 0.95f)
|
||||
}
|
||||
|
||||
test("test with fast histo depthwidth with max depth") {
|
||||
test("test with quantile histo depthwidth with max depth") {
|
||||
val trainMat = new DMatrix("../../demo/data/agaricus.txt.train")
|
||||
val testMat = new DMatrix("../../demo/data/agaricus.txt.test")
|
||||
val paramMap = List("max_depth" -> "0", "silent" -> "0",
|
||||
"objective" -> "binary:logistic", "tree_method" -> "hist",
|
||||
"grow_policy" -> "depthwise", "max_leaves" -> "8", "max_depth" -> "2",
|
||||
"eval_metric" -> "auc").toMap
|
||||
trainBoosterWithFastHisto(trainMat, Map("training" -> trainMat),
|
||||
round = 10, paramMap, 0.85f)
|
||||
trainBoosterWithQuantileHisto(trainMat, Map("training" -> trainMat),
|
||||
round = 10, paramMap, 0.95f)
|
||||
}
|
||||
|
||||
test("test with fast histo depthwidth with max depth and max bin") {
|
||||
test("test with quantile histo depthwidth with max depth and max bin") {
|
||||
val trainMat = new DMatrix("../../demo/data/agaricus.txt.train")
|
||||
val testMat = new DMatrix("../../demo/data/agaricus.txt.test")
|
||||
val paramMap = List("max_depth" -> "0", "silent" -> "0",
|
||||
"objective" -> "binary:logistic", "tree_method" -> "hist",
|
||||
"grow_policy" -> "depthwise", "max_depth" -> "2", "max_bin" -> "2",
|
||||
"eval_metric" -> "auc").toMap
|
||||
trainBoosterWithFastHisto(trainMat, Map("training" -> trainMat),
|
||||
round = 10, paramMap, 0.85f)
|
||||
trainBoosterWithQuantileHisto(trainMat, Map("training" -> trainMat),
|
||||
round = 10, paramMap, 0.95f)
|
||||
}
|
||||
|
||||
test("test training from existing model in scala") {
|
||||
|
||||
@@ -2,8 +2,25 @@
|
||||
|
||||
ignore=tests
|
||||
|
||||
extension-pkg-whitelist=numpy
|
||||
|
||||
disiable=unexpected-special-method-signature,too-many-nested-blocks
|
||||
|
||||
dummy-variables-rgx=(unused|)_.*
|
||||
|
||||
reports=no
|
||||
|
||||
[BASIC]
|
||||
|
||||
# Enforce naming convention
|
||||
const-naming-style=UPPER_CASE
|
||||
class-naming-style=PascalCase
|
||||
function-naming-style=snake_case
|
||||
method-naming-style=snake_case
|
||||
attr-naming-style=snake_case
|
||||
argument-naming-style=snake_case
|
||||
variable-naming-style=snake_case
|
||||
class-attribute-naming-style=snake_case
|
||||
|
||||
# Allow single-letter variables
|
||||
variable-rgx=[a-zA-Z_][a-z0-9_]{0,30}$
|
||||
|
||||
@@ -28,8 +28,8 @@ Please install ``gcc@5`` from `Homebrew <https://brew.sh/>`_::
|
||||
|
||||
After installing ``gcc@5``, set it as your compiler::
|
||||
|
||||
export CC = gcc-5
|
||||
export CXX = g++-5
|
||||
export CC=gcc-5
|
||||
export CXX=g++-5
|
||||
|
||||
Linux
|
||||
-----
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# pylint: disable=invalid-name, exec-used
|
||||
"""Setup xgboost package."""
|
||||
from __future__ import absolute_import
|
||||
import io
|
||||
import sys
|
||||
import os
|
||||
from setuptools import setup, find_packages
|
||||
@@ -31,7 +32,7 @@ print("Install libxgboost from: %s" % LIB_PATH)
|
||||
setup(name='xgboost',
|
||||
version=open(os.path.join(CURRENT_DIR, 'xgboost/VERSION')).read().strip(),
|
||||
description="XGBoost Python Package",
|
||||
long_description=open(os.path.join(CURRENT_DIR, 'README.rst')).read(),
|
||||
long_description=io.open(os.path.join(CURRENT_DIR, 'README.rst'), encoding='utf-8').read(),
|
||||
install_requires=[
|
||||
'numpy',
|
||||
'scipy',
|
||||
|
||||
@@ -25,6 +25,9 @@ if echo "${OSTYPE}" | grep -q "darwin"; then
|
||||
elif which g++-7; then
|
||||
export CC=gcc-7
|
||||
export CXX=g++-7
|
||||
elif which g++-8; then
|
||||
export CC=gcc-8
|
||||
export CXX=g++-8
|
||||
elif which clang++; then
|
||||
export CC=clang
|
||||
export CXX=clang++
|
||||
|
||||
@@ -188,8 +188,8 @@ def early_stop(stopping_rounds, maximize=False, verbose=True):
|
||||
msg = ("Multiple eval metrics have been passed: "
|
||||
"'{0}' will be used for early stopping.\n\n")
|
||||
rabit.tracker_print(msg.format(env.evaluation_result_list[-1][0]))
|
||||
maximize_metrics = ('auc', 'map', 'ndcg')
|
||||
maximize_at_n_metrics = ('auc@', 'map@', 'ndcg@')
|
||||
maximize_metrics = ('auc', 'aucpr', 'map', 'ndcg')
|
||||
maximize_at_n_metrics = ('auc@', 'aucpr@' 'map@', 'ndcg@')
|
||||
maximize_score = maximize
|
||||
metric_label = env.evaluation_result_list[-1][0]
|
||||
metric = metric_label.split('-', 1)[-1]
|
||||
|
||||
@@ -49,7 +49,11 @@ except ImportError:
|
||||
|
||||
# dt
|
||||
try:
|
||||
from datatable import DataTable
|
||||
import datatable
|
||||
if hasattr(datatable, "Frame"):
|
||||
DataTable = datatable.Frame
|
||||
else:
|
||||
DataTable = datatable.DataTable
|
||||
DT_INSTALLED = True
|
||||
except ImportError:
|
||||
|
||||
|
||||
@@ -3,19 +3,27 @@
|
||||
# pylint: disable=too-many-branches, too-many-lines, W0141
|
||||
"""Core XGBoost Library."""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import collections
|
||||
# pylint: disable=no-name-in-module,import-error
|
||||
try:
|
||||
from collections.abc import Mapping # Python 3
|
||||
except ImportError:
|
||||
from collections import Mapping # Python 2
|
||||
# pylint: enable=no-name-in-module,import-error
|
||||
import ctypes
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
import scipy.sparse
|
||||
|
||||
from .compat import STRING_TYPES, PY3, DataFrame, MultiIndex, py_str, PANDAS_INSTALLED, DataTable
|
||||
from .compat import (STRING_TYPES, PY3, DataFrame, MultiIndex, py_str,
|
||||
PANDAS_INSTALLED, DataTable)
|
||||
from .libpath import find_lib_path
|
||||
|
||||
|
||||
# c_bst_ulong corresponds to bst_ulong defined in xgboost/c_api.h
|
||||
c_bst_ulong = ctypes.c_uint64
|
||||
|
||||
@@ -117,18 +125,23 @@ def _load_lib():
|
||||
lib_paths = find_lib_path()
|
||||
if len(lib_paths) == 0:
|
||||
return None
|
||||
pathBackup = os.environ['PATH']
|
||||
try:
|
||||
pathBackup = os.environ['PATH'].split(os.pathsep)
|
||||
except KeyError:
|
||||
pathBackup = []
|
||||
lib_success = False
|
||||
os_error_list = []
|
||||
for lib_path in lib_paths:
|
||||
try:
|
||||
# needed when the lib is linked with non-system-available dependencies
|
||||
os.environ['PATH'] = pathBackup + os.pathsep + os.path.dirname(lib_path)
|
||||
os.environ['PATH'] = os.pathsep.join(pathBackup + [os.path.dirname(lib_path)])
|
||||
lib = ctypes.cdll.LoadLibrary(lib_path)
|
||||
lib_success = True
|
||||
except OSError as e:
|
||||
os_error_list.append(str(e))
|
||||
continue
|
||||
finally:
|
||||
os.environ['PATH'] = os.pathsep.join(pathBackup)
|
||||
if not lib_success:
|
||||
libname = os.path.basename(lib_paths[0])
|
||||
raise XGBoostError(
|
||||
@@ -274,10 +287,10 @@ def _maybe_dt_data(data, feature_names, feature_types):
|
||||
return data, feature_names, feature_types
|
||||
|
||||
data_types_names = tuple(lt.name for lt in data.ltypes)
|
||||
if not all(type_name in DT_TYPE_MAPPER for type_name in data_types_names):
|
||||
bad_fields = [data.names[i] for i, type_name in
|
||||
enumerate(data_types_names) if type_name not in DT_TYPE_MAPPER]
|
||||
|
||||
bad_fields = [data.names[i]
|
||||
for i, type_name in enumerate(data_types_names)
|
||||
if type_name not in DT_TYPE_MAPPER]
|
||||
if bad_fields:
|
||||
msg = """DataFrame.types for data must be int, float or bool.
|
||||
Did not expect the data types in fields """
|
||||
raise ValueError(msg + ', '.join(bad_fields))
|
||||
@@ -304,7 +317,7 @@ def _maybe_dt_array(array):
|
||||
|
||||
# below requires new dt version
|
||||
# extract first column
|
||||
array = array.tonumpy()[:, 0].astype('float')
|
||||
array = array.to_numpy()[:, 0].astype('float')
|
||||
|
||||
return array
|
||||
|
||||
@@ -327,7 +340,7 @@ class DMatrix(object):
|
||||
"""
|
||||
Parameters
|
||||
----------
|
||||
data : string/numpy array/scipy.sparse/pd.DataFrame/DataTable
|
||||
data : string/numpy.array/scipy.sparse/pd.DataFrame/dt.Frame
|
||||
Data source of DMatrix.
|
||||
When data is string type, it represents the path libsvm format txt file,
|
||||
or binary file that xgboost can read from.
|
||||
@@ -338,6 +351,14 @@ class DMatrix(object):
|
||||
None, defaults to np.nan.
|
||||
weight : list or numpy 1-D array , optional
|
||||
Weight for each instance.
|
||||
|
||||
.. note:: For ranking task, weights are per-group.
|
||||
|
||||
In ranking task, one weight is assigned to each group (not each data
|
||||
point). This is because we only care about the relative ordering of
|
||||
data points within each group, so it doesn't make sense to assign
|
||||
weights to individual data points.
|
||||
|
||||
silent : boolean, optional
|
||||
Whether print messages during construction
|
||||
feature_names : list, optional
|
||||
@@ -369,6 +390,10 @@ class DMatrix(object):
|
||||
label = _maybe_dt_array(label)
|
||||
weight = _maybe_dt_array(weight)
|
||||
|
||||
if isinstance(data, list):
|
||||
warnings.warn('Initializing DMatrix from List is deprecated.',
|
||||
DeprecationWarning)
|
||||
|
||||
if isinstance(data, STRING_TYPES):
|
||||
self.handle = ctypes.c_void_p()
|
||||
_check_call(_LIB.XGDMatrixCreateFromFile(c_str(data),
|
||||
@@ -472,16 +497,20 @@ class DMatrix(object):
|
||||
|
||||
def _init_from_dt(self, data, nthread):
|
||||
"""
|
||||
Initialize data from a DataTable
|
||||
Initialize data from a datatable Frame.
|
||||
"""
|
||||
cols = []
|
||||
ptrs = (ctypes.c_void_p * data.ncols)()
|
||||
for icol in range(data.ncols):
|
||||
col = data.internal.column(icol)
|
||||
cols.append(col)
|
||||
# int64_t (void*)
|
||||
ptr = col.data_pointer
|
||||
ptrs[icol] = ctypes.c_void_p(ptr)
|
||||
if hasattr(data, "internal") and hasattr(data.internal, "column"):
|
||||
# datatable>0.8.0
|
||||
for icol in range(data.ncols):
|
||||
col = data.internal.column(icol)
|
||||
ptr = col.data_pointer
|
||||
ptrs[icol] = ctypes.c_void_p(ptr)
|
||||
else:
|
||||
# datatable<=0.8.0
|
||||
from datatable.internal import frame_column_data_r
|
||||
for icol in range(data.ncols):
|
||||
ptrs[icol] = frame_column_data_r(data, icol)
|
||||
|
||||
# always return stypes for dt ingestion
|
||||
feature_type_strings = (ctypes.c_char_p * data.ncols)()
|
||||
@@ -555,6 +584,11 @@ class DMatrix(object):
|
||||
data: numpy array
|
||||
The array of data to be set
|
||||
"""
|
||||
if getattr(data, 'base', None) is not None and \
|
||||
data.base is not None and isinstance(data, np.ndarray) \
|
||||
and isinstance(data.base, np.ndarray) and (not data.flags.c_contiguous):
|
||||
self.set_float_info_npy2d(field, data)
|
||||
return
|
||||
c_data = c_array(ctypes.c_float, data)
|
||||
_check_call(_LIB.XGDMatrixSetFloatInfo(self.handle,
|
||||
c_str(field),
|
||||
@@ -573,7 +607,14 @@ class DMatrix(object):
|
||||
data: numpy array
|
||||
The array of data to be set
|
||||
"""
|
||||
data = np.array(data, copy=False, dtype=np.float32)
|
||||
if getattr(data, 'base', None) is not None and \
|
||||
data.base is not None and isinstance(data, np.ndarray) \
|
||||
and isinstance(data.base, np.ndarray) and (not data.flags.c_contiguous):
|
||||
warnings.warn("Use subset (sliced data) of np.ndarray is not recommended " +
|
||||
"because it will generate extra copies and increase memory consumption")
|
||||
data = np.array(data, copy=True, dtype=np.float32)
|
||||
else:
|
||||
data = np.array(data, copy=False, dtype=np.float32)
|
||||
c_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
|
||||
_check_call(_LIB.XGDMatrixSetFloatInfo(self.handle,
|
||||
c_str(field),
|
||||
@@ -591,6 +632,14 @@ class DMatrix(object):
|
||||
data: numpy array
|
||||
The array of data to be set
|
||||
"""
|
||||
if getattr(data, 'base', None) is not None and \
|
||||
data.base is not None and isinstance(data, np.ndarray) \
|
||||
and isinstance(data.base, np.ndarray) and (not data.flags.c_contiguous):
|
||||
warnings.warn("Use subset (sliced data) of np.ndarray is not recommended " +
|
||||
"because it will generate extra copies and increase memory consumption")
|
||||
data = np.array(data, copy=True, dtype=ctypes.c_uint)
|
||||
else:
|
||||
data = np.array(data, copy=False, dtype=ctypes.c_uint)
|
||||
_check_call(_LIB.XGDMatrixSetUIntInfo(self.handle,
|
||||
c_str(field),
|
||||
c_array(ctypes.c_uint, data),
|
||||
@@ -638,6 +687,13 @@ class DMatrix(object):
|
||||
----------
|
||||
weight : array like
|
||||
Weight for each data point
|
||||
|
||||
.. note:: For ranking task, weights are per-group.
|
||||
|
||||
In ranking task, one weight is assigned to each group (not each data
|
||||
point). This is because we only care about the relative ordering of
|
||||
data points within each group, so it doesn't make sense to assign
|
||||
weights to individual data points.
|
||||
"""
|
||||
self.set_float_info('weight', weight)
|
||||
|
||||
@@ -649,6 +705,13 @@ class DMatrix(object):
|
||||
----------
|
||||
weight : array like
|
||||
Weight for each data point in numpy 2D array
|
||||
|
||||
.. note:: For ranking task, weights are per-group.
|
||||
|
||||
In ranking task, one weight is assigned to each group (not each data
|
||||
point). This is because we only care about the relative ordering of
|
||||
data points within each group, so it doesn't make sense to assign
|
||||
weights to individual data points.
|
||||
"""
|
||||
self.set_float_info_npy2d('weight', weight)
|
||||
|
||||
@@ -850,6 +913,7 @@ class DMatrix(object):
|
||||
|
||||
|
||||
class Booster(object):
|
||||
# pylint: disable=too-many-public-methods
|
||||
"""A Booster of XGBoost.
|
||||
|
||||
Booster is the model of xgboost, that contains low level routines for
|
||||
@@ -1016,7 +1080,7 @@ class Booster(object):
|
||||
value: optional
|
||||
value of the specified parameter, when params is str key
|
||||
"""
|
||||
if isinstance(params, collections.Mapping):
|
||||
if isinstance(params, Mapping):
|
||||
params = params.items()
|
||||
elif isinstance(params, STRING_TYPES) and value is not None:
|
||||
params = [(params, value)]
|
||||
@@ -1024,8 +1088,8 @@ class Booster(object):
|
||||
_check_call(_LIB.XGBoosterSetParam(self.handle, c_str(key), c_str(str(val))))
|
||||
|
||||
def update(self, dtrain, iteration, fobj=None):
|
||||
"""
|
||||
Update for one iteration, with objective function calculated internally.
|
||||
"""Update for one iteration, with objective function calculated
|
||||
internally. This function should not be called directly by users.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
@@ -1035,6 +1099,7 @@ class Booster(object):
|
||||
Current iteration number.
|
||||
fobj : function
|
||||
Customized objective function.
|
||||
|
||||
"""
|
||||
if not isinstance(dtrain, DMatrix):
|
||||
raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__))
|
||||
@@ -1049,8 +1114,9 @@ class Booster(object):
|
||||
self.boost(dtrain, grad, hess)
|
||||
|
||||
def boost(self, dtrain, grad, hess):
|
||||
"""
|
||||
Boost the booster for one iteration, with customized gradient statistics.
|
||||
"""Boost the booster for one iteration, with customized gradient
|
||||
statistics. Like :func:`xgboost.core.Booster.update`, this
|
||||
function should not be called directly by users.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
@@ -1060,6 +1126,7 @@ class Booster(object):
|
||||
The first order of gradient.
|
||||
hess : list
|
||||
The second order of gradient.
|
||||
|
||||
"""
|
||||
if len(grad) != len(hess):
|
||||
raise ValueError('grad / hess length mismatch: {} / {}'.format(len(grad), len(hess)))
|
||||
@@ -1431,8 +1498,7 @@ class Booster(object):
|
||||
importance_type: str, default 'weight'
|
||||
One of the importance types defined above.
|
||||
"""
|
||||
|
||||
if self.booster != 'gbtree':
|
||||
if getattr(self, 'booster', None) is not None and self.booster not in {'gbtree', 'dart'}:
|
||||
raise ValueError('Feature importance is not defined for Booster type {}'
|
||||
.format(self.booster))
|
||||
|
||||
@@ -1513,6 +1579,91 @@ class Booster(object):
|
||||
|
||||
return gmap
|
||||
|
||||
def trees_to_dataframe(self, fmap=''):
|
||||
"""Parse a boosted tree model text dump into a pandas DataFrame structure.
|
||||
|
||||
This feature is only defined when the decision tree model is chosen as base
|
||||
learner (`booster in {gbtree, dart}`). It is not defined for other base learner
|
||||
types, such as linear learners (`booster=gblinear`).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fmap: str (optional)
|
||||
The name of feature map file.
|
||||
"""
|
||||
# pylint: disable=too-many-locals
|
||||
if not PANDAS_INSTALLED:
|
||||
raise Exception(('pandas must be available to use this method.'
|
||||
'Install pandas before calling again.'))
|
||||
|
||||
if getattr(self, 'booster', None) is not None and self.booster not in {'gbtree', 'dart'}:
|
||||
raise ValueError('This method is not defined for Booster type {}'
|
||||
.format(self.booster))
|
||||
|
||||
tree_ids = []
|
||||
node_ids = []
|
||||
fids = []
|
||||
splits = []
|
||||
y_directs = []
|
||||
n_directs = []
|
||||
missings = []
|
||||
gains = []
|
||||
covers = []
|
||||
|
||||
trees = self.get_dump(fmap, with_stats=True)
|
||||
for i, tree in enumerate(trees):
|
||||
for line in tree.split('\n'):
|
||||
arr = line.split('[')
|
||||
# Leaf node
|
||||
if len(arr) == 1:
|
||||
# Last element of line.split is an empy string
|
||||
if arr == ['']:
|
||||
continue
|
||||
# parse string
|
||||
parse = arr[0].split(':')
|
||||
stats = re.split('=|,', parse[1])
|
||||
|
||||
# append to lists
|
||||
tree_ids.append(i)
|
||||
node_ids.append(int(re.findall(r'\b\d+\b', parse[0])[0]))
|
||||
fids.append('Leaf')
|
||||
splits.append(float('NAN'))
|
||||
y_directs.append(float('NAN'))
|
||||
n_directs.append(float('NAN'))
|
||||
missings.append(float('NAN'))
|
||||
gains.append(float(stats[1]))
|
||||
covers.append(float(stats[3]))
|
||||
# Not a Leaf Node
|
||||
else:
|
||||
# parse string
|
||||
fid = arr[1].split(']')
|
||||
parse = fid[0].split('<')
|
||||
stats = re.split('=|,', fid[1])
|
||||
|
||||
# append to lists
|
||||
tree_ids.append(i)
|
||||
node_ids.append(int(re.findall(r'\b\d+\b', arr[0])[0]))
|
||||
fids.append(parse[0])
|
||||
splits.append(float(parse[1]))
|
||||
str_i = str(i)
|
||||
y_directs.append(str_i + '-' + stats[1])
|
||||
n_directs.append(str_i + '-' + stats[3])
|
||||
missings.append(str_i + '-' + stats[5])
|
||||
gains.append(float(stats[7]))
|
||||
covers.append(float(stats[9]))
|
||||
|
||||
ids = [str(t_id) + '-' + str(n_id) for t_id, n_id in zip(tree_ids, node_ids)]
|
||||
df = DataFrame({'Tree': tree_ids, 'Node': node_ids, 'ID': ids,
|
||||
'Feature': fids, 'Split': splits, 'Yes': y_directs,
|
||||
'No': n_directs, 'Missing': missings, 'Gain': gains,
|
||||
'Cover': covers})
|
||||
|
||||
if callable(getattr(df, 'sort_values', None)):
|
||||
# pylint: disable=no-member
|
||||
return df.sort_values(['Tree', 'Node']).reset_index(drop=True)
|
||||
# pylint: disable=no-member
|
||||
return df.sort(['Tree', 'Node']).reset_index(drop=True)
|
||||
|
||||
def _validate_features(self, data):
|
||||
"""
|
||||
Validate Booster and data's feature_names are identical.
|
||||
|
||||
@@ -16,7 +16,6 @@ def plot_importance(booster, ax=None, height=0.2,
|
||||
xlabel='F score', ylabel='Features',
|
||||
importance_type='weight', max_num_features=None,
|
||||
grid=True, show_values=True, **kwargs):
|
||||
|
||||
"""Plot importance based on fitted trees.
|
||||
|
||||
Parameters
|
||||
@@ -124,17 +123,17 @@ _EDGEPAT = re.compile(r'yes=(\d+),no=(\d+),missing=(\d+)')
|
||||
_EDGEPAT2 = re.compile(r'yes=(\d+),no=(\d+)')
|
||||
|
||||
|
||||
def _parse_node(graph, text):
|
||||
def _parse_node(graph, text, condition_node_params, leaf_node_params):
|
||||
"""parse dumped node"""
|
||||
match = _NODEPAT.match(text)
|
||||
if match is not None:
|
||||
node = match.group(1)
|
||||
graph.node(node, label=match.group(2), shape='circle')
|
||||
graph.node(node, label=match.group(2), **condition_node_params)
|
||||
return node
|
||||
match = _LEAFPAT.match(text)
|
||||
if match is not None:
|
||||
node = match.group(1)
|
||||
graph.node(node, label=match.group(2), shape='box')
|
||||
graph.node(node, label=match.group(2), **leaf_node_params)
|
||||
return node
|
||||
raise ValueError('Unable to parse node: {0}'.format(text))
|
||||
|
||||
@@ -164,8 +163,8 @@ def _parse_edge(graph, node, text, yes_color='#0000FF', no_color='#FF0000'):
|
||||
|
||||
|
||||
def to_graphviz(booster, fmap='', num_trees=0, rankdir='UT',
|
||||
yes_color='#0000FF', no_color='#FF0000', **kwargs):
|
||||
|
||||
yes_color='#0000FF', no_color='#FF0000',
|
||||
condition_node_params=None, leaf_node_params=None, **kwargs):
|
||||
"""Convert specified tree to graphviz instance. IPython can automatically plot the
|
||||
returned graphiz instance. Otherwise, you should call .render() method
|
||||
of the returned graphiz instance.
|
||||
@@ -184,6 +183,18 @@ def to_graphviz(booster, fmap='', num_trees=0, rankdir='UT',
|
||||
Edge color when meets the node condition.
|
||||
no_color : str, default '#FF0000'
|
||||
Edge color when doesn't meet the node condition.
|
||||
condition_node_params : dict (optional)
|
||||
condition node configuration,
|
||||
{'shape':'box',
|
||||
'style':'filled,rounded',
|
||||
'fillcolor':'#78bceb'
|
||||
}
|
||||
leaf_node_params : dict (optional)
|
||||
leaf node configuration
|
||||
{'shape':'box',
|
||||
'style':'filled',
|
||||
'fillcolor':'#e48038'
|
||||
}
|
||||
kwargs :
|
||||
Other keywords passed to graphviz graph_attr
|
||||
|
||||
@@ -192,6 +203,11 @@ def to_graphviz(booster, fmap='', num_trees=0, rankdir='UT',
|
||||
ax : matplotlib Axes
|
||||
"""
|
||||
|
||||
if condition_node_params is None:
|
||||
condition_node_params = {}
|
||||
if leaf_node_params is None:
|
||||
leaf_node_params = {}
|
||||
|
||||
try:
|
||||
from graphviz import Digraph
|
||||
except ImportError:
|
||||
@@ -212,7 +228,9 @@ def to_graphviz(booster, fmap='', num_trees=0, rankdir='UT',
|
||||
|
||||
for i, text in enumerate(tree):
|
||||
if text[0].isdigit():
|
||||
node = _parse_node(graph, text)
|
||||
node = _parse_node(
|
||||
graph, text, condition_node_params=condition_node_params,
|
||||
leaf_node_params=leaf_node_params)
|
||||
else:
|
||||
if i == 0:
|
||||
# 1st string must be node
|
||||
@@ -256,7 +274,8 @@ def plot_tree(booster, fmap='', num_trees=0, rankdir='UT', ax=None, **kwargs):
|
||||
if ax is None:
|
||||
_, ax = plt.subplots(1, 1)
|
||||
|
||||
g = to_graphviz(booster, fmap=fmap, num_trees=num_trees, rankdir=rankdir, **kwargs)
|
||||
g = to_graphviz(booster, fmap=fmap, num_trees=num_trees,
|
||||
rankdir=rankdir, **kwargs)
|
||||
|
||||
s = BytesIO()
|
||||
s.write(g.pipe(format='png'))
|
||||
|
||||
@@ -100,6 +100,9 @@ class XGBModel(XGBModelBase):
|
||||
missing : float, optional
|
||||
Value in the data which needs to be present as a missing value. If
|
||||
None, defaults to np.nan.
|
||||
importance_type: string, default "gain"
|
||||
The feature importance type for the feature_importances_ property: either "gain",
|
||||
"weight", "cover", "total_gain" or "total_cover".
|
||||
\*\*kwargs : dict, optional
|
||||
Keyword arguments for XGBoost Booster object. Full documentation of parameters can
|
||||
be found here: https://github.com/dmlc/xgboost/blob/master/doc/parameter.rst.
|
||||
@@ -133,7 +136,8 @@ class XGBModel(XGBModelBase):
|
||||
n_jobs=1, nthread=None, gamma=0, min_child_weight=1, max_delta_step=0,
|
||||
subsample=1, colsample_bytree=1, colsample_bylevel=1,
|
||||
reg_alpha=0, reg_lambda=1, scale_pos_weight=1,
|
||||
base_score=0.5, random_state=0, seed=None, missing=None, **kwargs):
|
||||
base_score=0.5, random_state=0, seed=None, missing=None,
|
||||
importance_type="gain", **kwargs):
|
||||
if not SKLEARN_INSTALLED:
|
||||
raise XGBoostError('sklearn needs to be installed in order to use this module')
|
||||
self.max_depth = max_depth
|
||||
@@ -159,6 +163,7 @@ class XGBModel(XGBModelBase):
|
||||
self.random_state = random_state
|
||||
self.nthread = nthread
|
||||
self.n_jobs = n_jobs
|
||||
self.importance_type = importance_type
|
||||
|
||||
def __setstate__(self, state):
|
||||
# backward compatibility code
|
||||
@@ -232,7 +237,7 @@ class XGBModel(XGBModelBase):
|
||||
else:
|
||||
xgb_params['nthread'] = n_jobs
|
||||
|
||||
xgb_params['silent'] = 1 if self.silent else 0
|
||||
xgb_params['verbosity'] = 0 if self.silent else 0
|
||||
|
||||
if xgb_params['nthread'] <= 0:
|
||||
xgb_params.pop('nthread', None)
|
||||
@@ -513,12 +518,12 @@ class XGBModel(XGBModelBase):
|
||||
feature_importances_ : array of shape ``[n_features]``
|
||||
|
||||
"""
|
||||
if self.booster != 'gbtree':
|
||||
if getattr(self, 'booster', None) is not None and self.booster != 'gbtree':
|
||||
raise AttributeError('Feature importance is not defined for Booster type {}'
|
||||
.format(self.booster))
|
||||
b = self.get_booster()
|
||||
fs = b.get_fscore()
|
||||
all_features = [fs.get(f, 0.) for f in b.feature_names]
|
||||
score = b.get_score(importance_type=self.importance_type)
|
||||
all_features = [score.get(f, 0.) for f in b.feature_names]
|
||||
all_features = np.array(all_features, dtype=np.float32)
|
||||
return all_features / all_features.sum()
|
||||
|
||||
@@ -535,13 +540,21 @@ class XGBModel(XGBModelBase):
|
||||
|
||||
Returns
|
||||
-------
|
||||
coef_ : array of shape ``[n_features]``
|
||||
coef_ : array of shape ``[n_features]`` or ``[n_classes, n_features]``
|
||||
"""
|
||||
if self.booster != 'gblinear':
|
||||
if getattr(self, 'booster', None) is not None and self.booster != 'gblinear':
|
||||
raise AttributeError('Coefficients are not defined for Booster type {}'
|
||||
.format(self.booster))
|
||||
b = self.get_booster()
|
||||
return json.loads(b.get_dump(dump_format='json')[0])['weight']
|
||||
coef = np.array(json.loads(b.get_dump(dump_format='json')[0])['weight'])
|
||||
# Logic for multiclass classification
|
||||
n_classes = getattr(self, 'n_classes_', None)
|
||||
if n_classes is not None:
|
||||
if n_classes > 2:
|
||||
assert len(coef.shape) == 1
|
||||
assert coef.shape[0] % n_classes == 0
|
||||
coef = coef.reshape((n_classes, -1))
|
||||
return coef
|
||||
|
||||
@property
|
||||
def intercept_(self):
|
||||
@@ -556,13 +569,13 @@ class XGBModel(XGBModelBase):
|
||||
|
||||
Returns
|
||||
-------
|
||||
intercept_ : array of shape ``[n_features]``
|
||||
intercept_ : array of shape ``(1,)`` or ``[n_classes]``
|
||||
"""
|
||||
if self.booster != 'gblinear':
|
||||
if getattr(self, 'booster', None) is not None and self.booster != 'gblinear':
|
||||
raise AttributeError('Intercept (bias) is not defined for Booster type {}'
|
||||
.format(self.booster))
|
||||
b = self.get_booster()
|
||||
return json.loads(b.get_dump(dump_format='json')[0])['bias']
|
||||
return np.array(json.loads(b.get_dump(dump_format='json')[0])['bias'])
|
||||
|
||||
|
||||
class XGBClassifier(XGBModel, XGBClassifierBase):
|
||||
@@ -618,11 +631,11 @@ class XGBClassifier(XGBModel, XGBClassifierBase):
|
||||
early_stopping_rounds : int, optional
|
||||
Activates early stopping. Validation error needs to decrease at
|
||||
least every <early_stopping_rounds> round(s) to continue training.
|
||||
Requires at least one item in evals. If there's more than one,
|
||||
will use the last. Returns the model from the last iteration
|
||||
(not the best one). If early stopping occurs, the model will
|
||||
have three additional fields: bst.best_score, bst.best_iteration
|
||||
and bst.best_ntree_limit.
|
||||
Requires at least one item in evals. If there's more than one,
|
||||
will use the last. If early stopping occurs, the model will have
|
||||
three additional fields: bst.best_score, bst.best_iteration and
|
||||
bst.best_ntree_limit (bst.best_ntree_limit is the ntree_limit parameter
|
||||
default value in predict method if not any other value is specified).
|
||||
(Use bst.best_ntree_limit to get the correct value if num_parallel_tree
|
||||
and/or num_class appears in the parameters)
|
||||
verbose : bool
|
||||
@@ -696,7 +709,7 @@ class XGBClassifier(XGBModel, XGBClassifierBase):
|
||||
evals=evals,
|
||||
early_stopping_rounds=early_stopping_rounds,
|
||||
evals_result=evals_result, obj=obj, feval=feval,
|
||||
verbose_eval=verbose, xgb_model=None,
|
||||
verbose_eval=verbose, xgb_model=xgb_model,
|
||||
callbacks=callbacks)
|
||||
|
||||
self.objective = xgb_options["objective"]
|
||||
@@ -872,7 +885,7 @@ class XGBRanker(XGBModel):
|
||||
Whether to print messages while running boosting.
|
||||
objective : string
|
||||
Specify the learning task and the corresponding learning objective.
|
||||
Only "rank:pairwise" is supported currently.
|
||||
The objective name must start with "rank:".
|
||||
booster: string
|
||||
Specify which booster to use: gbtree, gblinear or dart.
|
||||
nthread : int
|
||||
@@ -986,13 +999,29 @@ class XGBRanker(XGBModel):
|
||||
group : array_like
|
||||
group size of training data
|
||||
sample_weight : array_like
|
||||
instance weights
|
||||
group weights
|
||||
|
||||
.. note:: Weights are per-group for ranking tasks
|
||||
|
||||
In ranking task, one weight is assigned to each group (not each data
|
||||
point). This is because we only care about the relative ordering of
|
||||
data points within each group, so it doesn't make sense to assign
|
||||
weights to individual data points.
|
||||
|
||||
eval_set : list, optional
|
||||
A list of (X, y) tuple pairs to use as a validation set for
|
||||
early-stopping
|
||||
sample_weight_eval_set : list, optional
|
||||
A list of the form [L_1, L_2, ..., L_n], where each L_i is a list of
|
||||
instance weights on the i-th validation set.
|
||||
group weights on the i-th validation set.
|
||||
|
||||
.. note:: Weights are per-group for ranking tasks
|
||||
|
||||
In ranking task, one weight is assigned to each group (not each data
|
||||
point). This is because we only care about the relative ordering of
|
||||
data points within each group, so it doesn't make sense to assign
|
||||
weights to individual data points.
|
||||
|
||||
eval_group : list of arrays, optional
|
||||
A list that contains the group size corresponds to each
|
||||
(X, y) pair in eval_set
|
||||
|
||||
2
rabit
2
rabit
Submodule rabit updated: eb2590b774...1cc34f01db
@@ -19,7 +19,7 @@
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <vector>
|
||||
#include "./common/sync.h"
|
||||
#include "./common/common.h"
|
||||
#include "./common/config.h"
|
||||
|
||||
|
||||
@@ -34,8 +34,6 @@ enum CLITask {
|
||||
struct CLIParam : public dmlc::Parameter<CLIParam> {
|
||||
/*! \brief the task name */
|
||||
int task;
|
||||
/*! \brief whether silent */
|
||||
int silent;
|
||||
/*! \brief whether evaluate training statistics */
|
||||
bool eval_train;
|
||||
/*! \brief number of boosting iterations */
|
||||
@@ -83,8 +81,6 @@ struct CLIParam : public dmlc::Parameter<CLIParam> {
|
||||
.add_enum("dump", kDumpModel)
|
||||
.add_enum("pred", kPredict)
|
||||
.describe("Task to be performed by the CLI program.");
|
||||
DMLC_DECLARE_FIELD(silent).set_default(0).set_range(0, 2)
|
||||
.describe("Silent level during the task.");
|
||||
DMLC_DECLARE_FIELD(eval_train).set_default(false)
|
||||
.describe("Whether evaluate on training data during training.");
|
||||
DMLC_DECLARE_FIELD(num_round).set_default(10).set_lower_bound(1)
|
||||
@@ -126,10 +122,10 @@ struct CLIParam : public dmlc::Parameter<CLIParam> {
|
||||
DMLC_DECLARE_ALIAS(name_fmap, fmap);
|
||||
}
|
||||
// customized configure function of CLIParam
|
||||
inline void Configure(const std::vector<std::pair<std::string, std::string> >& cfg) {
|
||||
this->cfg = cfg;
|
||||
this->InitAllowUnknown(cfg);
|
||||
for (const auto& kv : cfg) {
|
||||
inline void Configure(const std::vector<std::pair<std::string, std::string> >& _cfg) {
|
||||
this->cfg = _cfg;
|
||||
this->InitAllowUnknown(_cfg);
|
||||
for (const auto& kv : _cfg) {
|
||||
if (!strncmp("eval[", kv.first.c_str(), 5)) {
|
||||
char evname[256];
|
||||
CHECK_EQ(sscanf(kv.first.c_str(), "eval[%[^]]", evname), 1)
|
||||
@@ -141,13 +137,13 @@ struct CLIParam : public dmlc::Parameter<CLIParam> {
|
||||
// constraint.
|
||||
if (name_pred == "stdout") {
|
||||
save_period = 0;
|
||||
silent = 1;
|
||||
this->cfg.emplace_back(std::make_pair("silent", "0"));
|
||||
}
|
||||
if (dsplit == 0 && rabit::IsDistributed()) {
|
||||
dsplit = 2;
|
||||
}
|
||||
if (rabit::GetRank() != 0) {
|
||||
silent = 2;
|
||||
this->cfg.emplace_back(std::make_pair("silent", "1"));
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -162,15 +158,20 @@ void CLITrain(const CLIParam& param) {
|
||||
}
|
||||
// load in data.
|
||||
std::shared_ptr<DMatrix> dtrain(
|
||||
DMatrix::Load(param.train_path, param.silent != 0, param.dsplit == 2));
|
||||
DMatrix::Load(
|
||||
param.train_path,
|
||||
ConsoleLogger::GlobalVerbosity() > ConsoleLogger::DefaultVerbosity(),
|
||||
param.dsplit == 2));
|
||||
std::vector<std::shared_ptr<DMatrix> > deval;
|
||||
std::vector<std::shared_ptr<DMatrix> > cache_mats;
|
||||
std::vector<DMatrix*> eval_datasets;
|
||||
cache_mats.push_back(dtrain);
|
||||
for (size_t i = 0; i < param.eval_data_names.size(); ++i) {
|
||||
deval.emplace_back(
|
||||
std::shared_ptr<DMatrix>(DMatrix::Load(param.eval_data_paths[i],
|
||||
param.silent != 0, param.dsplit == 2)));
|
||||
std::shared_ptr<DMatrix>(DMatrix::Load(
|
||||
param.eval_data_paths[i],
|
||||
ConsoleLogger::GlobalVerbosity() > ConsoleLogger::DefaultVerbosity(),
|
||||
param.dsplit == 2)));
|
||||
eval_datasets.push_back(deval.back().get());
|
||||
cache_mats.push_back(deval.back());
|
||||
}
|
||||
@@ -194,17 +195,14 @@ void CLITrain(const CLIParam& param) {
|
||||
learner->InitModel();
|
||||
}
|
||||
}
|
||||
if (param.silent == 0) {
|
||||
LOG(INFO) << "Loading data: " << dmlc::GetTime() - tstart_data_load << " sec";
|
||||
}
|
||||
LOG(INFO) << "Loading data: " << dmlc::GetTime() - tstart_data_load << " sec";
|
||||
|
||||
// start training.
|
||||
const double start = dmlc::GetTime();
|
||||
for (int i = version / 2; i < param.num_round; ++i) {
|
||||
double elapsed = dmlc::GetTime() - start;
|
||||
if (version % 2 == 0) {
|
||||
if (param.silent == 0) {
|
||||
LOG(CONSOLE) << "boosting round " << i << ", " << elapsed << " sec elapsed";
|
||||
}
|
||||
LOG(INFO) << "boosting round " << i << ", " << elapsed << " sec elapsed";
|
||||
learner->UpdateOneIter(i, dtrain.get());
|
||||
if (learner->AllowLazyCheckPoint()) {
|
||||
rabit::LazyCheckPoint(learner.get());
|
||||
@@ -220,9 +218,7 @@ void CLITrain(const CLIParam& param) {
|
||||
LOG(TRACKER) << res;
|
||||
}
|
||||
} else {
|
||||
if (param.silent < 2) {
|
||||
LOG(CONSOLE) << res;
|
||||
}
|
||||
LOG(CONSOLE) << res;
|
||||
}
|
||||
if (param.save_period != 0 &&
|
||||
(i + 1) % param.save_period == 0 &&
|
||||
@@ -261,10 +257,8 @@ void CLITrain(const CLIParam& param) {
|
||||
learner->Save(fo.get());
|
||||
}
|
||||
|
||||
if (param.silent == 0) {
|
||||
double elapsed = dmlc::GetTime() - start;
|
||||
LOG(CONSOLE) << "update end, " << elapsed << " sec in all";
|
||||
}
|
||||
double elapsed = dmlc::GetTime() - start;
|
||||
LOG(INFO) << "update end, " << elapsed << " sec in all";
|
||||
}
|
||||
|
||||
void CLIDumpModel(const CLIParam& param) {
|
||||
@@ -311,7 +305,10 @@ void CLIPredict(const CLIParam& param) {
|
||||
<< "Test dataset parameter test:data must be specified.";
|
||||
// load data
|
||||
std::unique_ptr<DMatrix> dtest(
|
||||
DMatrix::Load(param.test_path, param.silent != 0, param.dsplit == 2));
|
||||
DMatrix::Load(
|
||||
param.test_path,
|
||||
ConsoleLogger::GlobalVerbosity() > ConsoleLogger::DefaultVerbosity(),
|
||||
param.dsplit == 2));
|
||||
// load model
|
||||
CHECK_NE(param.model_in, "NULL")
|
||||
<< "Must specify model_in for predict";
|
||||
@@ -321,14 +318,11 @@ void CLIPredict(const CLIParam& param) {
|
||||
learner->Load(fi.get());
|
||||
learner->Configure(param.cfg);
|
||||
|
||||
if (param.silent == 0) {
|
||||
LOG(CONSOLE) << "start prediction...";
|
||||
}
|
||||
LOG(INFO) << "start prediction...";
|
||||
HostDeviceVector<bst_float> preds;
|
||||
learner->Predict(dtest.get(), param.pred_margin, &preds, param.ntree_limit);
|
||||
if (param.silent == 0) {
|
||||
LOG(CONSOLE) << "writing prediction to " << param.name_pred;
|
||||
}
|
||||
LOG(CONSOLE) << "writing prediction to " << param.name_pred;
|
||||
|
||||
std::unique_ptr<dmlc::Stream> fo(
|
||||
dmlc::Stream::Create(param.name_pred.c_str(), "w"));
|
||||
dmlc::ostream os(fo.get());
|
||||
|
||||
@@ -27,6 +27,6 @@ GlobalRandomEngine& GlobalRandom() {
|
||||
int AllVisibleImpl::AllVisible() {
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#endif // !defined(XGBOOST_USE_CUDA)
|
||||
|
||||
} // namespace xgboost
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
|
||||
#define WITH_CUDA() false
|
||||
|
||||
#endif
|
||||
#endif // defined(__CUDACC__)
|
||||
|
||||
namespace dh {
|
||||
#if defined(__CUDACC__)
|
||||
@@ -44,7 +44,7 @@ inline cudaError_t ThrowOnCudaError(cudaError_t code, const char *file,
|
||||
}
|
||||
return code;
|
||||
}
|
||||
#endif
|
||||
#endif // defined(__CUDACC__)
|
||||
} // namespace dh
|
||||
|
||||
namespace xgboost {
|
||||
@@ -147,61 +147,86 @@ struct AllVisibleImpl {
|
||||
*/
|
||||
class GPUSet {
|
||||
public:
|
||||
using GpuIdType = int;
|
||||
static constexpr GpuIdType kAll = -1;
|
||||
|
||||
explicit GPUSet(int start = 0, int ndevices = 0)
|
||||
: devices_(start, start + ndevices) {}
|
||||
|
||||
static GPUSet Empty() { return GPUSet(); }
|
||||
|
||||
static GPUSet Range(int start, int ndevices) {
|
||||
return ndevices <= 0 ? Empty() : GPUSet{start, ndevices};
|
||||
static GPUSet Range(GpuIdType start, GpuIdType n_gpus) {
|
||||
return n_gpus <= 0 ? Empty() : GPUSet{start, n_gpus};
|
||||
}
|
||||
/*! \brief ndevices and num_rows both are upper bounds. */
|
||||
static GPUSet All(int ndevices, int num_rows = std::numeric_limits<int>::max()) {
|
||||
int n_devices_visible = AllVisible().Size();
|
||||
if (ndevices < 0 || ndevices > n_devices_visible) {
|
||||
ndevices = n_devices_visible;
|
||||
/*! \brief n_gpus and num_rows both are upper bounds. */
|
||||
static GPUSet All(GpuIdType gpu_id, GpuIdType n_gpus,
|
||||
GpuIdType num_rows = std::numeric_limits<GpuIdType>::max()) {
|
||||
CHECK_GE(gpu_id, 0) << "gpu_id must be >= 0.";
|
||||
CHECK_GE(n_gpus, -1) << "n_gpus must be >= -1.";
|
||||
|
||||
GpuIdType const n_devices_visible = AllVisible().Size();
|
||||
if (n_devices_visible == 0 || n_gpus == 0) { return Empty(); }
|
||||
|
||||
GpuIdType const n_available_devices = n_devices_visible - gpu_id;
|
||||
|
||||
if (n_gpus == kAll) { // Use all devices starting from `gpu_id'.
|
||||
CHECK(gpu_id < n_devices_visible)
|
||||
<< "\ngpu_id should be less than number of visible devices.\ngpu_id: "
|
||||
<< gpu_id
|
||||
<< ", number of visible devices: "
|
||||
<< n_devices_visible;
|
||||
GpuIdType n_devices =
|
||||
n_available_devices < num_rows ? n_available_devices : num_rows;
|
||||
return Range(gpu_id, n_devices);
|
||||
} else { // Use devices in ( gpu_id, gpu_id + n_gpus ).
|
||||
CHECK_LE(n_gpus, n_available_devices)
|
||||
<< "Starting from gpu id: " << gpu_id << ", there are only "
|
||||
<< n_available_devices << " available devices, while n_gpus is set to: "
|
||||
<< n_gpus;
|
||||
GpuIdType n_devices = n_gpus < num_rows ? n_gpus : num_rows;
|
||||
return Range(gpu_id, n_devices);
|
||||
}
|
||||
// fix-up device number to be limited by number of rows
|
||||
ndevices = ndevices > num_rows ? num_rows : ndevices;
|
||||
return Range(0, ndevices);
|
||||
}
|
||||
static GPUSet AllVisible() {
|
||||
int n = AllVisibleImpl::AllVisible();
|
||||
return Range(0, n);
|
||||
}
|
||||
/*! \brief Ensure gpu_id is correct, so not dependent upon user knowing details */
|
||||
static int GetDeviceIdx(int gpu_id) {
|
||||
auto devices = AllVisible();
|
||||
CHECK(!devices.IsEmpty()) << "Empty device.";
|
||||
return (std::abs(gpu_id) + 0) % devices.Size();
|
||||
}
|
||||
/*! \brief Counting from gpu_id */
|
||||
GPUSet Normalised(int gpu_id) const {
|
||||
return Range(gpu_id, Size());
|
||||
}
|
||||
/*! \brief Counting from 0 */
|
||||
GPUSet Unnormalised() const {
|
||||
return Range(0, Size());
|
||||
}
|
||||
|
||||
int Size() const {
|
||||
int res = *devices_.end() - *devices_.begin();
|
||||
return res < 0 ? 0 : res;
|
||||
static GPUSet AllVisible() {
|
||||
GpuIdType n = AllVisibleImpl::AllVisible();
|
||||
return Range(0, n);
|
||||
}
|
||||
/*! \brief Get normalised device id. */
|
||||
int operator[](int index) const {
|
||||
CHECK(index >= 0 && index < Size());
|
||||
return *devices_.begin() + index;
|
||||
|
||||
size_t Size() const {
|
||||
GpuIdType size = *devices_.end() - *devices_.begin();
|
||||
GpuIdType res = size < 0 ? 0 : size;
|
||||
return static_cast<size_t>(res);
|
||||
}
|
||||
|
||||
/*
|
||||
* By default, we have two configurations of identifying device, one
|
||||
* is the device id obtained from `cudaGetDevice'. But we sometimes
|
||||
* store objects that allocated one for each device in a list, which
|
||||
* requires a zero-based index.
|
||||
*
|
||||
* Hence, `DeviceId' converts a zero-based index to actual device id,
|
||||
* `Index' converts a device id to a zero-based index.
|
||||
*/
|
||||
GpuIdType DeviceId(size_t index) const {
|
||||
GpuIdType result = *devices_.begin() + static_cast<GpuIdType>(index);
|
||||
CHECK(Contains(result)) << "\nDevice " << result << " is not in GPUSet."
|
||||
<< "\nIndex: " << index
|
||||
<< "\nGPUSet: (" << *begin() << ", " << *end() << ")"
|
||||
<< std::endl;
|
||||
return result;
|
||||
}
|
||||
size_t Index(GpuIdType device) const {
|
||||
CHECK(Contains(device)) << "\nDevice " << device << " is not in GPUSet."
|
||||
<< "\nGPUSet: (" << *begin() << ", " << *end() << ")"
|
||||
<< std::endl;
|
||||
size_t result = static_cast<size_t>(device - *devices_.begin());
|
||||
return result;
|
||||
}
|
||||
|
||||
bool IsEmpty() const { return Size() == 0; }
|
||||
/*! \brief Get un-normalised index. */
|
||||
int Index(int device) const {
|
||||
CHECK(Contains(device));
|
||||
return device - *devices_.begin();
|
||||
}
|
||||
|
||||
bool Contains(int device) const {
|
||||
bool Contains(GpuIdType device) const {
|
||||
return *devices_.begin() <= device && device < *devices_.end();
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
#ifdef __CUDACC__
|
||||
#include "device_helpers.cuh"
|
||||
#endif
|
||||
#endif // __CUDACC__
|
||||
|
||||
namespace xgboost {
|
||||
namespace common {
|
||||
@@ -115,7 +115,7 @@ class CompressedBufferWriter {
|
||||
symbol >>= 8;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif // __CUDACC__
|
||||
|
||||
template <typename IterT>
|
||||
void Write(CompressedByteT *buffer, IterT input_begin, IterT input_end) {
|
||||
|
||||
@@ -58,12 +58,12 @@ class ConfigReaderBase {
|
||||
* \brief to be implemented by subclass,
|
||||
* get next token, return EOF if end of file
|
||||
*/
|
||||
virtual char GetChar() = 0;
|
||||
virtual int GetChar() = 0;
|
||||
/*! \brief to be implemented by child, check if end of stream */
|
||||
virtual bool IsEnd() = 0;
|
||||
|
||||
private:
|
||||
char ch_buf_;
|
||||
int ch_buf_;
|
||||
std::string s_name_, s_val_, s_buf_;
|
||||
|
||||
inline void SkipLine() {
|
||||
@@ -79,7 +79,7 @@ class ConfigReaderBase {
|
||||
case '\"': return;
|
||||
case '\r':
|
||||
case '\n': LOG(FATAL)<< "ConfigReader: unterminated string";
|
||||
default: *tok += ch_buf_;
|
||||
default: *tok += static_cast<char>(ch_buf_);
|
||||
}
|
||||
}
|
||||
LOG(FATAL) << "ConfigReader: unterminated string";
|
||||
@@ -89,7 +89,7 @@ class ConfigReaderBase {
|
||||
switch (ch_buf_) {
|
||||
case '\\': *tok += this->GetChar(); break;
|
||||
case '\'': return;
|
||||
default: *tok += ch_buf_;
|
||||
default: *tok += static_cast<char>(ch_buf_);
|
||||
}
|
||||
}
|
||||
LOG(FATAL) << "unterminated string";
|
||||
@@ -128,7 +128,7 @@ class ConfigReaderBase {
|
||||
if (tok->length() != 0) return new_line;
|
||||
break;
|
||||
default:
|
||||
*tok += ch_buf_;
|
||||
*tok += static_cast<char>(ch_buf_);
|
||||
ch_buf_ = this->GetChar();
|
||||
break;
|
||||
}
|
||||
@@ -152,7 +152,7 @@ class ConfigStreamReader: public ConfigReaderBase {
|
||||
explicit ConfigStreamReader(std::istream &fin) : fin_(fin) {}
|
||||
|
||||
protected:
|
||||
char GetChar() override {
|
||||
int GetChar() override {
|
||||
return fin_.get();
|
||||
}
|
||||
/*! \brief to be implemented by child, check if end of stream */
|
||||
|
||||
@@ -19,9 +19,11 @@
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "timer.h"
|
||||
|
||||
#ifdef XGBOOST_USE_NCCL
|
||||
#include "nccl.h"
|
||||
#include "../common/io.h"
|
||||
#endif
|
||||
|
||||
// Uncomment to enable
|
||||
@@ -53,6 +55,16 @@ T *Raw(thrust::device_vector<T> &v) { // NOLINT
|
||||
return raw_pointer_cast(v.data());
|
||||
}
|
||||
|
||||
inline void CudaCheckPointerDevice(void* ptr) {
|
||||
cudaPointerAttributes attr;
|
||||
dh::safe_cuda(cudaPointerGetAttributes(&attr, ptr));
|
||||
int ptr_device = attr.device;
|
||||
int cur_device = -1;
|
||||
cudaGetDevice(&cur_device);
|
||||
CHECK_EQ(ptr_device, cur_device) << "pointer device: " << ptr_device
|
||||
<< "current device: " << cur_device;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
const T *Raw(const thrust::device_vector<T> &v) { // NOLINT
|
||||
return raw_pointer_cast(v.data());
|
||||
@@ -61,7 +73,7 @@ const T *Raw(const thrust::device_vector<T> &v) { // NOLINT
|
||||
// if n_devices=-1, then use all visible devices
|
||||
inline void SynchronizeNDevices(xgboost::GPUSet devices) {
|
||||
devices = devices.IsEmpty() ? xgboost::GPUSet::AllVisible() : devices;
|
||||
for (auto const d : devices.Unnormalised()) {
|
||||
for (auto const d : devices) {
|
||||
safe_cuda(cudaSetDevice(d));
|
||||
safe_cuda(cudaDeviceSynchronize());
|
||||
}
|
||||
@@ -247,6 +259,14 @@ class DVec {
|
||||
|
||||
const T *Data() const { return ptr_; }
|
||||
|
||||
xgboost::common::Span<const T> GetSpan() const {
|
||||
return xgboost::common::Span<const T>(ptr_, this->Size());
|
||||
}
|
||||
|
||||
xgboost::common::Span<T> GetSpan() {
|
||||
return xgboost::common::Span<T>(ptr_, this->Size());
|
||||
}
|
||||
|
||||
std::vector<T> AsVector() const {
|
||||
std::vector<T> h_vector(Size());
|
||||
safe_cuda(cudaSetDevice(device_idx_));
|
||||
@@ -359,6 +379,11 @@ class DVec2 {
|
||||
DVec<T> &D2() { return d2_; }
|
||||
|
||||
T *Current() { return buff_.Current(); }
|
||||
xgboost::common::Span<T> CurrentSpan() {
|
||||
return xgboost::common::Span<T>{
|
||||
buff_.Current(),
|
||||
static_cast<typename xgboost::common::Span<T>::index_type>(Size())};
|
||||
}
|
||||
|
||||
DVec<T> &CurrentDVec() { return buff_.selector == 0 ? D1() : D2(); }
|
||||
|
||||
@@ -461,7 +486,7 @@ class BulkAllocator {
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
void Allocate(int device_idx, bool silent, Args... args) {
|
||||
void Allocate(int device_idx, Args... args) {
|
||||
size_t size = GetSizeBytes(args...);
|
||||
|
||||
char *ptr = AllocateDevice(device_idx, size, MemoryT);
|
||||
@@ -487,8 +512,9 @@ struct CubMemory {
|
||||
~CubMemory() { Free(); }
|
||||
|
||||
template <typename T>
|
||||
T *Pointer() {
|
||||
return static_cast<T *>(d_temp_storage);
|
||||
xgboost::common::Span<T> GetSpan(size_t size) {
|
||||
this->LazyAllocate(size * sizeof(T));
|
||||
return xgboost::common::Span<T>(static_cast<T*>(d_temp_storage), size);
|
||||
}
|
||||
|
||||
void Free() {
|
||||
@@ -743,10 +769,12 @@ void SumReduction(dh::CubMemory &tmp_mem, dh::DVec<T> &in, dh::DVec<T> &out,
|
||||
* @param nVals number of elements in the input array
|
||||
*/
|
||||
template <typename T>
|
||||
typename std::iterator_traits<T>::value_type SumReduction(dh::CubMemory &tmp_mem, T in, int nVals) {
|
||||
typename std::iterator_traits<T>::value_type SumReduction(
|
||||
dh::CubMemory &tmp_mem, T in, int nVals) {
|
||||
using ValueT = typename std::iterator_traits<T>::value_type;
|
||||
size_t tmpSize;
|
||||
dh::safe_cuda(cub::DeviceReduce::Sum(nullptr, tmpSize, in, in, nVals));
|
||||
ValueT *dummy_out = nullptr;
|
||||
dh::safe_cuda(cub::DeviceReduce::Sum(nullptr, tmpSize, in, dummy_out, nVals));
|
||||
// Allocate small extra memory for the return value
|
||||
tmp_mem.LazyAllocate(tmpSize + sizeof(ValueT));
|
||||
auto ptr = reinterpret_cast<ValueT *>(tmp_mem.d_temp_storage) + 1;
|
||||
@@ -769,7 +797,7 @@ typename std::iterator_traits<T>::value_type SumReduction(dh::CubMemory &tmp_mem
|
||||
template <typename T, int BlkDim = 256, int ItemsPerThread = 4>
|
||||
void FillConst(int device_idx, T *out, int len, T def) {
|
||||
dh::LaunchN<ItemsPerThread, BlkDim>(device_idx, len,
|
||||
[=] __device__(int i) { out[i] = def; });
|
||||
[=] __device__(int i) { out[i] = def; });
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -819,14 +847,20 @@ void Gather(int device_idx, T *out, const T *in, const int *instId, int nVals) {
|
||||
*/
|
||||
|
||||
class AllReducer {
|
||||
bool initialised;
|
||||
bool initialised_;
|
||||
size_t allreduce_bytes_; // Keep statistics of the number of bytes communicated
|
||||
size_t allreduce_calls_; // Keep statistics of the number of reduce calls
|
||||
#ifdef XGBOOST_USE_NCCL
|
||||
std::vector<ncclComm_t> comms;
|
||||
std::vector<cudaStream_t> streams;
|
||||
std::vector<int> device_ordinals;
|
||||
std::vector<int> device_ordinals; // device id from CUDA
|
||||
std::vector<int> device_counts; // device count from CUDA
|
||||
ncclUniqueId id;
|
||||
#endif
|
||||
|
||||
public:
|
||||
AllReducer() : initialised(false) {}
|
||||
AllReducer() : initialised_(false), allreduce_bytes_(0),
|
||||
allreduce_calls_(0) {}
|
||||
|
||||
/**
|
||||
* \fn void Init(const std::vector<int> &device_ordinals)
|
||||
@@ -839,17 +873,45 @@ class AllReducer {
|
||||
|
||||
void Init(const std::vector<int> &device_ordinals) {
|
||||
#ifdef XGBOOST_USE_NCCL
|
||||
/** \brief this >monitor . init. */
|
||||
this->device_ordinals = device_ordinals;
|
||||
comms.resize(device_ordinals.size());
|
||||
dh::safe_nccl(ncclCommInitAll(comms.data(),
|
||||
static_cast<int>(device_ordinals.size()),
|
||||
device_ordinals.data()));
|
||||
streams.resize(device_ordinals.size());
|
||||
for (size_t i = 0; i < device_ordinals.size(); i++) {
|
||||
safe_cuda(cudaSetDevice(device_ordinals[i]));
|
||||
safe_cuda(cudaStreamCreate(&streams[i]));
|
||||
this->device_counts.resize(rabit::GetWorldSize());
|
||||
this->comms.resize(device_ordinals.size());
|
||||
this->streams.resize(device_ordinals.size());
|
||||
this->id = GetUniqueId();
|
||||
|
||||
device_counts.at(rabit::GetRank()) = device_ordinals.size();
|
||||
for (size_t i = 0; i < device_counts.size(); i++) {
|
||||
int dev_count = device_counts.at(i);
|
||||
rabit::Allreduce<rabit::op::Sum, int>(&dev_count, 1);
|
||||
device_counts.at(i) = dev_count;
|
||||
}
|
||||
initialised = true;
|
||||
|
||||
int nccl_rank = 0;
|
||||
int nccl_rank_offset = std::accumulate(device_counts.begin(),
|
||||
device_counts.begin() + rabit::GetRank(), 0);
|
||||
int nccl_nranks = std::accumulate(device_counts.begin(),
|
||||
device_counts.end(), 0);
|
||||
nccl_rank += nccl_rank_offset;
|
||||
|
||||
GroupStart();
|
||||
for (size_t i = 0; i < device_ordinals.size(); i++) {
|
||||
int dev = device_ordinals.at(i);
|
||||
dh::safe_cuda(cudaSetDevice(dev));
|
||||
dh::safe_nccl(ncclCommInitRank(
|
||||
&comms.at(i),
|
||||
nccl_nranks, id,
|
||||
nccl_rank));
|
||||
|
||||
nccl_rank++;
|
||||
}
|
||||
GroupEnd();
|
||||
|
||||
for (size_t i = 0; i < device_ordinals.size(); i++) {
|
||||
safe_cuda(cudaSetDevice(device_ordinals.at(i)));
|
||||
safe_cuda(cudaStreamCreate(&streams.at(i)));
|
||||
}
|
||||
initialised_ = true;
|
||||
#else
|
||||
CHECK_EQ(device_ordinals.size(), 1)
|
||||
<< "XGBoost must be compiled with NCCL to use more than one GPU.";
|
||||
@@ -857,7 +919,7 @@ class AllReducer {
|
||||
}
|
||||
~AllReducer() {
|
||||
#ifdef XGBOOST_USE_NCCL
|
||||
if (initialised) {
|
||||
if (initialised_) {
|
||||
for (auto &stream : streams) {
|
||||
dh::safe_cuda(cudaStreamDestroy(stream));
|
||||
}
|
||||
@@ -865,6 +927,11 @@ class AllReducer {
|
||||
ncclCommDestroy(comm);
|
||||
}
|
||||
}
|
||||
if (xgboost::ConsoleLogger::ShouldLog(xgboost::ConsoleLogger::LV::kDebug)) {
|
||||
LOG(CONSOLE) << "======== NCCL Statistics========";
|
||||
LOG(CONSOLE) << "AllReduce calls: " << allreduce_calls_;
|
||||
LOG(CONSOLE) << "AllReduce total MB communicated: " << allreduce_bytes_/1000000;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -899,12 +966,42 @@ class AllReducer {
|
||||
void AllReduceSum(int communication_group_idx, const double *sendbuff,
|
||||
double *recvbuff, int count) {
|
||||
#ifdef XGBOOST_USE_NCCL
|
||||
CHECK(initialised);
|
||||
|
||||
dh::safe_cuda(cudaSetDevice(device_ordinals[communication_group_idx]));
|
||||
CHECK(initialised_);
|
||||
dh::safe_cuda(cudaSetDevice(device_ordinals.at(communication_group_idx)));
|
||||
dh::safe_nccl(ncclAllReduce(sendbuff, recvbuff, count, ncclDouble, ncclSum,
|
||||
comms[communication_group_idx],
|
||||
streams[communication_group_idx]));
|
||||
comms.at(communication_group_idx),
|
||||
streams.at(communication_group_idx)));
|
||||
if(communication_group_idx == 0)
|
||||
{
|
||||
allreduce_bytes_ += count * sizeof(double);
|
||||
allreduce_calls_ += 1;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Allreduce. Use in exactly the same way as NCCL but without needing
|
||||
* streams or comms.
|
||||
*
|
||||
* \param communication_group_idx Zero-based index of the communication group.
|
||||
* \param sendbuff The sendbuff.
|
||||
* \param recvbuff The recvbuff.
|
||||
* \param count Number of elements.
|
||||
*/
|
||||
|
||||
void AllReduceSum(int communication_group_idx, const float *sendbuff,
|
||||
float *recvbuff, int count) {
|
||||
#ifdef XGBOOST_USE_NCCL
|
||||
CHECK(initialised_);
|
||||
dh::safe_cuda(cudaSetDevice(device_ordinals.at(communication_group_idx)));
|
||||
dh::safe_nccl(ncclAllReduce(sendbuff, recvbuff, count, ncclFloat, ncclSum,
|
||||
comms.at(communication_group_idx),
|
||||
streams.at(communication_group_idx)));
|
||||
if(communication_group_idx == 0)
|
||||
{
|
||||
allreduce_bytes_ += count * sizeof(float);
|
||||
allreduce_calls_ += 1;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -922,7 +1019,7 @@ class AllReducer {
|
||||
void AllReduceSum(int communication_group_idx, const int64_t *sendbuff,
|
||||
int64_t *recvbuff, int count) {
|
||||
#ifdef XGBOOST_USE_NCCL
|
||||
CHECK(initialised);
|
||||
CHECK(initialised_);
|
||||
|
||||
dh::safe_cuda(cudaSetDevice(device_ordinals[communication_group_idx]));
|
||||
dh::safe_nccl(ncclAllReduce(sendbuff, recvbuff, count, ncclInt64, ncclSum,
|
||||
@@ -938,12 +1035,35 @@ class AllReducer {
|
||||
*/
|
||||
void Synchronize() {
|
||||
#ifdef XGBOOST_USE_NCCL
|
||||
for (int i = 0; i < device_ordinals.size(); i++) {
|
||||
for (size_t i = 0; i < device_ordinals.size(); i++) {
|
||||
dh::safe_cuda(cudaSetDevice(device_ordinals[i]));
|
||||
dh::safe_cuda(cudaStreamSynchronize(streams[i]));
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef XGBOOST_USE_NCCL
|
||||
/**
|
||||
* \fn ncclUniqueId GetUniqueId()
|
||||
*
|
||||
* \brief Gets the Unique ID from NCCL to be used in setting up interprocess
|
||||
* communication
|
||||
*
|
||||
* \return the Unique ID
|
||||
*/
|
||||
ncclUniqueId GetUniqueId() {
|
||||
static const int RootRank = 0;
|
||||
ncclUniqueId id;
|
||||
if (rabit::GetRank() == RootRank) {
|
||||
dh::safe_nccl(ncclGetUniqueId(&id));
|
||||
}
|
||||
rabit::Broadcast(
|
||||
(void*)&id,
|
||||
(size_t)sizeof(ncclUniqueId),
|
||||
(int)RootRank);
|
||||
return id;
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
class SaveCudaContext {
|
||||
@@ -969,27 +1089,6 @@ class SaveCudaContext {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* \brief Executes some operation on each element of the input vector, using a
|
||||
* single controlling thread for each element.
|
||||
*
|
||||
* \tparam T Generic type parameter.
|
||||
* \tparam FunctionT Type of the function t.
|
||||
* \param shards The shards.
|
||||
* \param f The func_t to process.
|
||||
*/
|
||||
|
||||
template <typename T, typename FunctionT>
|
||||
void ExecuteShards(std::vector<T> *shards, FunctionT f) {
|
||||
SaveCudaContext {
|
||||
[&](){
|
||||
#pragma omp parallel for schedule(static, 1) if (shards->size() > 1)
|
||||
for (int shard = 0; shard < shards->size(); ++shard) {
|
||||
f(shards->at(shard));
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Executes some operation on each element of the input vector, using a
|
||||
* single controlling thread for each element. In addition, passes the shard index
|
||||
@@ -1003,13 +1102,13 @@ void ExecuteShards(std::vector<T> *shards, FunctionT f) {
|
||||
|
||||
template <typename T, typename FunctionT>
|
||||
void ExecuteIndexShards(std::vector<T> *shards, FunctionT f) {
|
||||
SaveCudaContext {
|
||||
[&](){
|
||||
#pragma omp parallel for schedule(static, 1) if (shards->size() > 1)
|
||||
for (int shard = 0; shard < shards->size(); ++shard) {
|
||||
f(shard, shards->at(shard));
|
||||
}
|
||||
}};
|
||||
SaveCudaContext{[&]() {
|
||||
const long shards_size = static_cast<long>(shards->size());
|
||||
#pragma omp parallel for schedule(static, 1) if (shards_size > 1)
|
||||
for (long shard = 0; shard < shards_size; ++shard) {
|
||||
f(shard, shards->at(shard));
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1055,4 +1154,71 @@ xgboost::common::Span<T> ToSpan(thrust::device_vector<T>& vec,
|
||||
using IndexT = typename xgboost::common::Span<T>::index_type;
|
||||
return ToSpan(vec, static_cast<IndexT>(offset), static_cast<IndexT>(size));
|
||||
}
|
||||
|
||||
template <typename FunctionT>
|
||||
class LauncherItr {
|
||||
public:
|
||||
int idx;
|
||||
FunctionT f;
|
||||
XGBOOST_DEVICE LauncherItr() : idx(0) {}
|
||||
XGBOOST_DEVICE LauncherItr(int idx, FunctionT f) : idx(idx), f(f) {}
|
||||
XGBOOST_DEVICE LauncherItr &operator=(int output) {
|
||||
f(idx, output);
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* \brief Thrust compatible iterator type - discards algorithm output and launches device lambda
|
||||
* with the index of the output and the algorithm output as arguments.
|
||||
*
|
||||
* \author Rory
|
||||
* \date 7/9/2017
|
||||
*
|
||||
* \tparam FunctionT Type of the function t.
|
||||
*/
|
||||
template <typename FunctionT>
|
||||
class DiscardLambdaItr {
|
||||
public:
|
||||
// Required iterator traits
|
||||
using self_type = DiscardLambdaItr; // NOLINT
|
||||
using difference_type = ptrdiff_t; // NOLINT
|
||||
using value_type = void; // NOLINT
|
||||
using pointer = value_type *; // NOLINT
|
||||
using reference = LauncherItr<FunctionT>; // NOLINT
|
||||
using iterator_category = typename thrust::detail::iterator_facade_category<
|
||||
thrust::any_system_tag, thrust::random_access_traversal_tag, value_type,
|
||||
reference>::type; // NOLINT
|
||||
private:
|
||||
difference_type offset_;
|
||||
FunctionT f_;
|
||||
public:
|
||||
XGBOOST_DEVICE explicit DiscardLambdaItr(FunctionT f) : offset_(0), f_(f) {}
|
||||
XGBOOST_DEVICE DiscardLambdaItr(difference_type offset, FunctionT f)
|
||||
: offset_(offset), f_(f) {}
|
||||
XGBOOST_DEVICE self_type operator+(const int &b) const {
|
||||
return DiscardLambdaItr(offset_ + b, f_);
|
||||
}
|
||||
XGBOOST_DEVICE self_type operator++() {
|
||||
offset_++;
|
||||
return *this;
|
||||
}
|
||||
XGBOOST_DEVICE self_type operator++(int) {
|
||||
self_type retval = *this;
|
||||
offset_++;
|
||||
return retval;
|
||||
}
|
||||
XGBOOST_DEVICE self_type &operator+=(const int &b) {
|
||||
offset_ += b;
|
||||
return *this;
|
||||
}
|
||||
XGBOOST_DEVICE reference operator*() const {
|
||||
return LauncherItr<FunctionT>(offset_, f_);
|
||||
}
|
||||
XGBOOST_DEVICE reference operator[](int idx) {
|
||||
self_type offset = (*this) + idx;
|
||||
return *offset;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace dh
|
||||
|
||||
@@ -23,7 +23,7 @@ namespace common {
|
||||
* \tparam ValueType type of entries in the sparse matrix
|
||||
* \tparam SizeType type of the index range holder
|
||||
*/
|
||||
template<typename ValueType, typename SizeType = size_t>
|
||||
template<typename ValueType, typename SizeType = std::size_t>
|
||||
struct ParallelGroupBuilder {
|
||||
public:
|
||||
// parallel group builder of data
|
||||
@@ -44,9 +44,9 @@ struct ParallelGroupBuilder {
|
||||
* \param nkeys number of keys in the matrix, can be smaller than expected
|
||||
* \param nthread number of thread that will be used in construction
|
||||
*/
|
||||
inline void InitBudget(size_t nkeys, int nthread) {
|
||||
inline void InitBudget(std::size_t nkeys, int nthread) {
|
||||
thread_rptr_.resize(nthread);
|
||||
for (size_t i = 0; i < thread_rptr_.size(); ++i) {
|
||||
for (std::size_t i = 0; i < thread_rptr_.size(); ++i) {
|
||||
thread_rptr_[i].resize(nkeys);
|
||||
std::fill(thread_rptr_[i].begin(), thread_rptr_[i].end(), 0);
|
||||
}
|
||||
@@ -57,7 +57,7 @@ struct ParallelGroupBuilder {
|
||||
* \param threadid the id of thread that calls this function
|
||||
* \param nelem number of element budget add to this row
|
||||
*/
|
||||
inline void AddBudget(size_t key, int threadid, SizeType nelem = 1) {
|
||||
inline void AddBudget(std::size_t key, int threadid, SizeType nelem = 1) {
|
||||
std::vector<SizeType> &trptr = thread_rptr_[threadid];
|
||||
if (trptr.size() < key + 1) {
|
||||
trptr.resize(key + 1, 0);
|
||||
@@ -67,23 +67,23 @@ struct ParallelGroupBuilder {
|
||||
/*! \brief step 3: initialize the necessary storage */
|
||||
inline void InitStorage() {
|
||||
// set rptr to correct size
|
||||
for (size_t tid = 0; tid < thread_rptr_.size(); ++tid) {
|
||||
for (std::size_t tid = 0; tid < thread_rptr_.size(); ++tid) {
|
||||
if (rptr_.size() <= thread_rptr_[tid].size()) {
|
||||
rptr_.resize(thread_rptr_[tid].size() + 1);
|
||||
rptr_.resize(thread_rptr_[tid].size() + 1); // key + 1
|
||||
}
|
||||
}
|
||||
// initialize rptr to be beginning of each segment
|
||||
size_t start = 0;
|
||||
for (size_t i = 0; i + 1 < rptr_.size(); ++i) {
|
||||
for (size_t tid = 0; tid < thread_rptr_.size(); ++tid) {
|
||||
std::size_t start = 0;
|
||||
for (std::size_t i = 0; i + 1 < rptr_.size(); ++i) {
|
||||
for (std::size_t tid = 0; tid < thread_rptr_.size(); ++tid) {
|
||||
std::vector<SizeType> &trptr = thread_rptr_[tid];
|
||||
if (i < trptr.size()) {
|
||||
size_t ncnt = trptr[i];
|
||||
if (i < trptr.size()) { // i^th row is assigned for this thread
|
||||
std::size_t ncnt = trptr[i]; // how many entries in this row
|
||||
trptr[i] = start;
|
||||
start += ncnt;
|
||||
}
|
||||
}
|
||||
rptr_[i + 1] = start;
|
||||
rptr_[i + 1] = start; // pointer accumulated from all thread
|
||||
}
|
||||
data_.resize(start);
|
||||
}
|
||||
@@ -95,7 +95,7 @@ struct ParallelGroupBuilder {
|
||||
* \param value The value to be pushed to the group.
|
||||
* \param threadid the id of thread that calls this function
|
||||
*/
|
||||
inline void Push(size_t key, ValueType value, int threadid) {
|
||||
void Push(std::size_t key, ValueType value, int threadid) {
|
||||
SizeType &rp = thread_rptr_[threadid][key];
|
||||
data_[rp++] = value;
|
||||
}
|
||||
|
||||
@@ -1,22 +1,48 @@
|
||||
/*!
|
||||
* Copyright 2017 by Contributors
|
||||
* Copyright 2017-2019 by Contributors
|
||||
* \file hist_util.h
|
||||
* \brief Utilities to store histograms
|
||||
* \author Philip Cho, Tianqi Chen
|
||||
*/
|
||||
#include <rabit/rabit.h>
|
||||
#include <dmlc/omp.h>
|
||||
#include <numeric>
|
||||
#include <vector>
|
||||
#include "./sync.h"
|
||||
|
||||
#include "./random.h"
|
||||
#include "./column_matrix.h"
|
||||
#include "./hist_util.h"
|
||||
#include "./quantile.h"
|
||||
|
||||
#if defined(XGBOOST_MM_PREFETCH_PRESENT)
|
||||
#include <xmmintrin.h>
|
||||
#define PREFETCH_READ_T0(addr) _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T0)
|
||||
#elif defined(XGBOOST_BUILTIN_PREFETCH_PRESENT)
|
||||
#define PREFETCH_READ_T0(addr) __builtin_prefetch(reinterpret_cast<const char*>(addr), 0, 3)
|
||||
#else // no SW pre-fetching available; PREFETCH_READ_T0 is no-op
|
||||
#define PREFETCH_READ_T0(addr) do {} while (0)
|
||||
#endif // defined(XGBOOST_MM_PREFETCH_PRESENT)
|
||||
|
||||
namespace xgboost {
|
||||
namespace common {
|
||||
|
||||
HistCutMatrix::HistCutMatrix() {
|
||||
monitor_.Init("HistCutMatrix");
|
||||
}
|
||||
|
||||
size_t HistCutMatrix::SearchGroupIndFromBaseRow(
|
||||
std::vector<bst_uint> const& group_ptr, size_t const base_rowid) const {
|
||||
using KIt = std::vector<bst_uint>::const_iterator;
|
||||
KIt res = std::lower_bound(group_ptr.cbegin(), group_ptr.cend() - 1, base_rowid);
|
||||
// Cannot use CHECK_NE because it will try to print the iterator.
|
||||
bool const found = res != group_ptr.cend() - 1;
|
||||
if (!found) {
|
||||
LOG(FATAL) << "Row " << base_rowid << " does not lie in any group!\n";
|
||||
}
|
||||
size_t group_ind = std::distance(group_ptr.cbegin(), res);
|
||||
return group_ind;
|
||||
}
|
||||
|
||||
void HistCutMatrix::Init(DMatrix* p_fmat, uint32_t max_num_bins) {
|
||||
monitor_.Start("Init");
|
||||
const MetaInfo& info = p_fmat->Info();
|
||||
|
||||
// safe factor for better accuracy
|
||||
@@ -25,30 +51,50 @@ void HistCutMatrix::Init(DMatrix* p_fmat, uint32_t max_num_bins) {
|
||||
|
||||
const int nthread = omp_get_max_threads();
|
||||
|
||||
auto nstep = static_cast<unsigned>((info.num_col_ + nthread - 1) / nthread);
|
||||
auto ncol = static_cast<unsigned>(info.num_col_);
|
||||
unsigned const nstep =
|
||||
static_cast<unsigned>((info.num_col_ + nthread - 1) / nthread);
|
||||
unsigned const ncol = static_cast<unsigned>(info.num_col_);
|
||||
sketchs.resize(info.num_col_);
|
||||
for (auto& s : sketchs) {
|
||||
s.Init(info.num_row_, 1.0 / (max_num_bins * kFactor));
|
||||
}
|
||||
|
||||
const auto& weights = info.weights_.HostVector();
|
||||
|
||||
// Data groups, used in ranking.
|
||||
std::vector<bst_uint> const& group_ptr = info.group_ptr_;
|
||||
size_t const num_groups = group_ptr.size() == 0 ? 0 : group_ptr.size() - 1;
|
||||
// Use group index for weights?
|
||||
bool const use_group_ind = num_groups != 0 && weights.size() != info.num_row_;
|
||||
|
||||
for (const auto &batch : p_fmat->GetRowBatches()) {
|
||||
#pragma omp parallel num_threads(nthread)
|
||||
size_t group_ind = 0;
|
||||
if (use_group_ind) {
|
||||
group_ind = this->SearchGroupIndFromBaseRow(group_ptr, batch.base_rowid);
|
||||
}
|
||||
#pragma omp parallel num_threads(nthread) firstprivate(group_ind, use_group_ind)
|
||||
{
|
||||
CHECK_EQ(nthread, omp_get_num_threads());
|
||||
auto tid = static_cast<unsigned>(omp_get_thread_num());
|
||||
unsigned begin = std::min(nstep * tid, ncol);
|
||||
unsigned end = std::min(nstep * (tid + 1), ncol);
|
||||
|
||||
// do not iterate if no columns are assigned to the thread
|
||||
if (begin < end && end <= ncol) {
|
||||
for (size_t i = 0; i < batch.Size(); ++i) { // NOLINT(*)
|
||||
size_t ridx = batch.base_rowid + i;
|
||||
SparsePage::Inst inst = batch[i];
|
||||
for (auto& ins : inst) {
|
||||
if (ins.index >= begin && ins.index < end) {
|
||||
sketchs[ins.index].Push(ins.fvalue,
|
||||
weights.size() > 0 ? weights[ridx] : 1.0f);
|
||||
size_t const ridx = batch.base_rowid + i;
|
||||
SparsePage::Inst const inst = batch[i];
|
||||
if (use_group_ind &&
|
||||
group_ptr[group_ind] == ridx &&
|
||||
// maximum equals to weights.size() - 1
|
||||
group_ind < num_groups - 1) {
|
||||
// move to next group
|
||||
group_ind++;
|
||||
}
|
||||
for (auto const& entry : inst) {
|
||||
if (entry.index >= begin && entry.index < end) {
|
||||
size_t w_idx = use_group_ind ? group_ind : ridx;
|
||||
sketchs[entry.index].Push(entry.fvalue, info.GetWeight(w_idx));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -57,6 +103,7 @@ void HistCutMatrix::Init(DMatrix* p_fmat, uint32_t max_num_bins) {
|
||||
}
|
||||
|
||||
Init(&sketchs, max_num_bins);
|
||||
monitor_.Stop("Init");
|
||||
}
|
||||
|
||||
void HistCutMatrix::Init
|
||||
@@ -73,9 +120,9 @@ void HistCutMatrix::Init
|
||||
summary_array[i].Reserve(max_num_bins * kFactor);
|
||||
summary_array[i].SetPrune(out, max_num_bins * kFactor);
|
||||
}
|
||||
CHECK_EQ(summary_array.size(), in_sketchs->size());
|
||||
size_t nbytes = WXQSketch::SummaryContainer::CalcMemCost(max_num_bins * kFactor);
|
||||
sreducer.Allreduce(dmlc::BeginPtr(summary_array), nbytes, summary_array.size());
|
||||
|
||||
this->min_val.resize(sketchs.size());
|
||||
row_ptr.push_back(0);
|
||||
for (size_t fid = 0; fid < summary_array.size(); ++fid) {
|
||||
@@ -101,14 +148,17 @@ void HistCutMatrix::Init
|
||||
}
|
||||
}
|
||||
// push a value that is greater than anything
|
||||
if (a.size != 0) {
|
||||
bst_float cpt = a.data[a.size - 1].value;
|
||||
// this must be bigger than last value in a scale
|
||||
bst_float last = cpt + (fabs(cpt) + 1e-5);
|
||||
cut.push_back(last);
|
||||
}
|
||||
const bst_float cpt
|
||||
= (a.size > 0) ? a.data[a.size - 1].value : this->min_val[fid];
|
||||
// this must be bigger than last value in a scale
|
||||
const bst_float last = cpt + (fabs(cpt) + 1e-5);
|
||||
cut.push_back(last);
|
||||
|
||||
row_ptr.push_back(static_cast<bst_uint>(cut.size()));
|
||||
// Ensure that every feature gets at least one quantile point
|
||||
CHECK_LE(cut.size(), std::numeric_limits<uint32_t>::max());
|
||||
auto cut_size = static_cast<uint32_t>(cut.size());
|
||||
CHECK_GT(cut_size, row_ptr.back());
|
||||
row_ptr.push_back(cut_size);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,7 +168,9 @@ uint32_t HistCutMatrix::GetBinIdx(const Entry& e) {
|
||||
auto cend = cut.begin() + row_ptr[fid + 1];
|
||||
CHECK(cbegin != cend);
|
||||
auto it = std::upper_bound(cbegin, cend, e.fvalue);
|
||||
if (it == cend) it = cend - 1;
|
||||
if (it == cend) {
|
||||
it = cend - 1;
|
||||
}
|
||||
uint32_t idx = static_cast<uint32_t>(it - cut.begin());
|
||||
return idx;
|
||||
}
|
||||
@@ -151,6 +203,7 @@ void GHistIndexMatrix::Init(DMatrix* p_fmat, int max_num_bins) {
|
||||
SparsePage::Inst inst = batch[i];
|
||||
|
||||
CHECK_EQ(ibegin + inst.size(), iend);
|
||||
|
||||
for (bst_uint j = 0; j < inst.size(); ++j) {
|
||||
uint32_t idx = cut.GetBinIdx(inst[j]);
|
||||
|
||||
@@ -216,7 +269,7 @@ FindGroups(const std::vector<unsigned>& feature_list,
|
||||
const std::vector<size_t>& feature_nnz,
|
||||
const ColumnMatrix& colmat,
|
||||
size_t nrow,
|
||||
const FastHistParam& param) {
|
||||
const tree::TrainParam& param) {
|
||||
/* Goal: Bundle features together that has little or no "overlap", i.e.
|
||||
only a few data points should have nonzero values for
|
||||
member features.
|
||||
@@ -278,7 +331,7 @@ FindGroups(const std::vector<unsigned>& feature_list,
|
||||
inline std::vector<std::vector<unsigned>>
|
||||
FastFeatureGrouping(const GHistIndexMatrix& gmat,
|
||||
const ColumnMatrix& colmat,
|
||||
const FastHistParam& param) {
|
||||
const tree::TrainParam& param) {
|
||||
const size_t nrow = gmat.row_ptr.size() - 1;
|
||||
const size_t nfeature = gmat.cut.row_ptr.size() - 1;
|
||||
|
||||
@@ -332,7 +385,7 @@ FastFeatureGrouping(const GHistIndexMatrix& gmat,
|
||||
|
||||
void GHistIndexBlockMatrix::Init(const GHistIndexMatrix& gmat,
|
||||
const ColumnMatrix& colmat,
|
||||
const FastHistParam& param) {
|
||||
const tree::TrainParam& param) {
|
||||
cut_ = &gmat.cut;
|
||||
|
||||
const size_t nrow = gmat.row_ptr.size() - 1;
|
||||
@@ -398,56 +451,89 @@ void GHistBuilder::BuildHist(const std::vector<GradientPair>& gpair,
|
||||
const RowSetCollection::Elem row_indices,
|
||||
const GHistIndexMatrix& gmat,
|
||||
GHistRow hist) {
|
||||
data_.resize(nbins_ * nthread_, GHistEntry());
|
||||
std::fill(data_.begin(), data_.end(), GHistEntry());
|
||||
const size_t nthread = static_cast<size_t>(this->nthread_);
|
||||
data_.resize(nbins_ * nthread_);
|
||||
|
||||
constexpr int kUnroll = 8; // loop unrolling factor
|
||||
const auto nthread = static_cast<bst_omp_uint>(this->nthread_);
|
||||
const size_t nrows = row_indices.end - row_indices.begin;
|
||||
const size_t rest = nrows % kUnroll;
|
||||
const size_t* rid = row_indices.begin;
|
||||
const size_t nrows = row_indices.Size();
|
||||
const uint32_t* index = gmat.index.data();
|
||||
const size_t* row_ptr = gmat.row_ptr.data();
|
||||
const float* pgh = reinterpret_cast<const float*>(gpair.data());
|
||||
|
||||
#pragma omp parallel for num_threads(nthread) schedule(guided)
|
||||
for (bst_omp_uint i = 0; i < nrows - rest; i += kUnroll) {
|
||||
const bst_omp_uint tid = omp_get_thread_num();
|
||||
const size_t off = tid * nbins_;
|
||||
size_t rid[kUnroll];
|
||||
size_t ibegin[kUnroll];
|
||||
size_t iend[kUnroll];
|
||||
GradientPair stat[kUnroll];
|
||||
for (int k = 0; k < kUnroll; ++k) {
|
||||
rid[k] = row_indices.begin[i + k];
|
||||
double* hist_data = reinterpret_cast<double*>(hist.data());
|
||||
double* data = reinterpret_cast<double*>(data_.data());
|
||||
|
||||
const size_t block_size = 512;
|
||||
size_t n_blocks = nrows/block_size;
|
||||
n_blocks += !!(nrows - n_blocks*block_size);
|
||||
|
||||
const size_t nthread_to_process = std::min(nthread, n_blocks);
|
||||
memset(thread_init_.data(), '\0', nthread_to_process*sizeof(size_t));
|
||||
|
||||
const size_t cache_line_size = 64;
|
||||
const size_t prefetch_offset = 10;
|
||||
size_t no_prefetch_size = prefetch_offset + cache_line_size/sizeof(*rid);
|
||||
no_prefetch_size = no_prefetch_size > nrows ? nrows : no_prefetch_size;
|
||||
|
||||
#pragma omp parallel for num_threads(nthread_to_process) schedule(guided)
|
||||
for (bst_omp_uint iblock = 0; iblock < n_blocks; iblock++) {
|
||||
dmlc::omp_uint tid = omp_get_thread_num();
|
||||
double* data_local_hist = ((nthread_to_process == 1) ? hist_data :
|
||||
reinterpret_cast<double*>(data_.data() + tid * nbins_));
|
||||
|
||||
if (!thread_init_[tid]) {
|
||||
memset(data_local_hist, '\0', 2*nbins_*sizeof(double));
|
||||
thread_init_[tid] = true;
|
||||
}
|
||||
for (int k = 0; k < kUnroll; ++k) {
|
||||
ibegin[k] = gmat.row_ptr[rid[k]];
|
||||
iend[k] = gmat.row_ptr[rid[k] + 1];
|
||||
}
|
||||
for (int k = 0; k < kUnroll; ++k) {
|
||||
stat[k] = gpair[rid[k]];
|
||||
}
|
||||
for (int k = 0; k < kUnroll; ++k) {
|
||||
for (size_t j = ibegin[k]; j < iend[k]; ++j) {
|
||||
const uint32_t bin = gmat.index[j];
|
||||
data_[off + bin].Add(stat[k]);
|
||||
|
||||
const size_t istart = iblock*block_size;
|
||||
const size_t iend = (((iblock+1)*block_size > nrows) ? nrows : istart + block_size);
|
||||
for (size_t i = istart; i < iend; ++i) {
|
||||
const size_t icol_start = row_ptr[rid[i]];
|
||||
const size_t icol_end = row_ptr[rid[i]+1];
|
||||
|
||||
if (i < nrows - no_prefetch_size) {
|
||||
PREFETCH_READ_T0(row_ptr + rid[i + prefetch_offset]);
|
||||
PREFETCH_READ_T0(pgh + 2*rid[i + prefetch_offset]);
|
||||
}
|
||||
|
||||
for (size_t j = icol_start; j < icol_end; ++j) {
|
||||
const uint32_t idx_bin = 2*index[j];
|
||||
const size_t idx_gh = 2*rid[i];
|
||||
|
||||
data_local_hist[idx_bin] += pgh[idx_gh];
|
||||
data_local_hist[idx_bin+1] += pgh[idx_gh+1];
|
||||
}
|
||||
}
|
||||
}
|
||||
for (size_t i = nrows - rest; i < nrows; ++i) {
|
||||
const size_t rid = row_indices.begin[i];
|
||||
const size_t ibegin = gmat.row_ptr[rid];
|
||||
const size_t iend = gmat.row_ptr[rid + 1];
|
||||
const GradientPair stat = gpair[rid];
|
||||
for (size_t j = ibegin; j < iend; ++j) {
|
||||
const uint32_t bin = gmat.index[j];
|
||||
data_[bin].Add(stat);
|
||||
}
|
||||
}
|
||||
|
||||
/* reduction */
|
||||
const uint32_t nbins = nbins_;
|
||||
#pragma omp parallel for num_threads(nthread) schedule(static)
|
||||
for (bst_omp_uint bin_id = 0; bin_id < bst_omp_uint(nbins); ++bin_id) {
|
||||
for (bst_omp_uint tid = 0; tid < nthread; ++tid) {
|
||||
hist.begin[bin_id].Add(data_[tid * nbins_ + bin_id]);
|
||||
if (nthread_to_process > 1) {
|
||||
const size_t size = (2*nbins_);
|
||||
const size_t block_size = 1024;
|
||||
size_t n_blocks = size/block_size;
|
||||
n_blocks += !!(size - n_blocks*block_size);
|
||||
|
||||
size_t n_worked_bins = 0;
|
||||
for (size_t i = 0; i < nthread_to_process; ++i) {
|
||||
if (thread_init_[i]) {
|
||||
thread_init_[n_worked_bins++] = i;
|
||||
}
|
||||
}
|
||||
|
||||
#pragma omp parallel for num_threads(std::min(nthread, n_blocks)) schedule(guided)
|
||||
for (bst_omp_uint iblock = 0; iblock < n_blocks; iblock++) {
|
||||
const size_t istart = iblock * block_size;
|
||||
const size_t iend = (((iblock + 1) * block_size > size) ? size : istart + block_size);
|
||||
|
||||
const size_t bin = 2 * thread_init_[0] * nbins_;
|
||||
memcpy(hist_data + istart, (data + bin + istart), sizeof(double) * (iend - istart));
|
||||
|
||||
for (size_t i_bin_part = 1; i_bin_part < n_worked_bins; ++i_bin_part) {
|
||||
const size_t bin = 2 * thread_init_[i_bin_part] * nbins_;
|
||||
for (size_t i = istart; i < iend; i++) {
|
||||
hist_data[i] += data[bin + i];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -463,9 +549,10 @@ void GHistBuilder::BuildBlockHist(const std::vector<GradientPair>& gpair,
|
||||
|
||||
#if defined(_OPENMP)
|
||||
const auto nthread = static_cast<bst_omp_uint>(this->nthread_);
|
||||
#endif
|
||||
#endif // defined(_OPENMP)
|
||||
tree::GradStats* p_hist = hist.data();
|
||||
|
||||
#pragma omp parallel for num_threads(nthread) schedule(guided)
|
||||
#pragma omp parallel for num_threads(nthread) schedule(guided)
|
||||
for (bst_omp_uint bid = 0; bid < nblock; ++bid) {
|
||||
auto gmat = gmatb[bid];
|
||||
|
||||
@@ -474,20 +561,17 @@ void GHistBuilder::BuildBlockHist(const std::vector<GradientPair>& gpair,
|
||||
size_t ibegin[kUnroll];
|
||||
size_t iend[kUnroll];
|
||||
GradientPair stat[kUnroll];
|
||||
|
||||
for (int k = 0; k < kUnroll; ++k) {
|
||||
rid[k] = row_indices.begin[i + k];
|
||||
}
|
||||
for (int k = 0; k < kUnroll; ++k) {
|
||||
ibegin[k] = gmat.row_ptr[rid[k]];
|
||||
iend[k] = gmat.row_ptr[rid[k] + 1];
|
||||
}
|
||||
for (int k = 0; k < kUnroll; ++k) {
|
||||
stat[k] = gpair[rid[k]];
|
||||
}
|
||||
for (int k = 0; k < kUnroll; ++k) {
|
||||
for (size_t j = ibegin[k]; j < iend[k]; ++j) {
|
||||
const uint32_t bin = gmat.index[j];
|
||||
hist.begin[bin].Add(stat[k]);
|
||||
p_hist[bin].Add(stat[k]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -498,7 +582,7 @@ void GHistBuilder::BuildBlockHist(const std::vector<GradientPair>& gpair,
|
||||
const GradientPair stat = gpair[rid];
|
||||
for (size_t j = ibegin; j < iend; ++j) {
|
||||
const uint32_t bin = gmat.index[j];
|
||||
hist.begin[bin].Add(stat);
|
||||
p_hist[bin].Add(stat);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -511,25 +595,28 @@ void GHistBuilder::SubtractionTrick(GHistRow self, GHistRow sibling, GHistRow pa
|
||||
|
||||
#if defined(_OPENMP)
|
||||
const auto nthread = static_cast<bst_omp_uint>(this->nthread_);
|
||||
#endif
|
||||
#endif // defined(_OPENMP)
|
||||
tree::GradStats* p_self = self.data();
|
||||
tree::GradStats* p_sibling = sibling.data();
|
||||
tree::GradStats* p_parent = parent.data();
|
||||
|
||||
#pragma omp parallel for num_threads(nthread) schedule(static)
|
||||
#pragma omp parallel for num_threads(nthread) schedule(static)
|
||||
for (bst_omp_uint bin_id = 0;
|
||||
bin_id < static_cast<bst_omp_uint>(nbins - rest); bin_id += kUnroll) {
|
||||
GHistEntry pb[kUnroll];
|
||||
GHistEntry sb[kUnroll];
|
||||
tree::GradStats pb[kUnroll];
|
||||
tree::GradStats sb[kUnroll];
|
||||
for (int k = 0; k < kUnroll; ++k) {
|
||||
pb[k] = parent.begin[bin_id + k];
|
||||
pb[k] = p_parent[bin_id + k];
|
||||
}
|
||||
for (int k = 0; k < kUnroll; ++k) {
|
||||
sb[k] = sibling.begin[bin_id + k];
|
||||
sb[k] = p_sibling[bin_id + k];
|
||||
}
|
||||
for (int k = 0; k < kUnroll; ++k) {
|
||||
self.begin[bin_id + k].SetSubtract(pb[k], sb[k]);
|
||||
p_self[bin_id + k].SetSubstract(pb[k], sb[k]);
|
||||
}
|
||||
}
|
||||
for (uint32_t bin_id = nbins - rest; bin_id < nbins; ++bin_id) {
|
||||
self.begin[bin_id].SetSubtract(parent.begin[bin_id], sibling.begin[bin_id]);
|
||||
p_self[bin_id].SetSubstract(p_parent[bin_id], p_sibling[bin_id]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -116,19 +116,19 @@ struct GPUSketcher {
|
||||
n_rows_(row_end - row_begin), param_(std::move(param)) {
|
||||
}
|
||||
|
||||
void Init(const SparsePage& row_batch, const MetaInfo& info) {
|
||||
void Init(const SparsePage& row_batch, const MetaInfo& info, int gpu_batch_nrows) {
|
||||
num_cols_ = info.num_col_;
|
||||
has_weights_ = info.weights_.Size() > 0;
|
||||
|
||||
// find the batch size
|
||||
if (param_.gpu_batch_nrows == 0) {
|
||||
if (gpu_batch_nrows == 0) {
|
||||
// By default, use no more than 1/16th of GPU memory
|
||||
gpu_batch_nrows_ = dh::TotalMemory(device_) /
|
||||
(16 * num_cols_ * sizeof(Entry));
|
||||
} else if (param_.gpu_batch_nrows == -1) {
|
||||
} else if (gpu_batch_nrows == -1) {
|
||||
gpu_batch_nrows_ = n_rows_;
|
||||
} else {
|
||||
gpu_batch_nrows_ = param_.gpu_batch_nrows;
|
||||
gpu_batch_nrows_ = gpu_batch_nrows;
|
||||
}
|
||||
if (gpu_batch_nrows_ > n_rows_) {
|
||||
gpu_batch_nrows_ = n_rows_;
|
||||
@@ -346,21 +346,24 @@ struct GPUSketcher {
|
||||
}
|
||||
};
|
||||
|
||||
void Sketch(const SparsePage& batch, const MetaInfo& info, HistCutMatrix* hmat) {
|
||||
void Sketch(const SparsePage& batch, const MetaInfo& info,
|
||||
HistCutMatrix* hmat, int gpu_batch_nrows) {
|
||||
// create device shards
|
||||
shards_.resize(dist_.Devices().Size());
|
||||
dh::ExecuteIndexShards(&shards_, [&](int i, std::unique_ptr<DeviceShard>& shard) {
|
||||
size_t start = dist_.ShardStart(info.num_row_, i);
|
||||
size_t size = dist_.ShardSize(info.num_row_, i);
|
||||
shard = std::unique_ptr<DeviceShard>
|
||||
(new DeviceShard(dist_.Devices()[i], start, start + size, param_));
|
||||
shard = std::unique_ptr<DeviceShard>(
|
||||
new DeviceShard(dist_.Devices().DeviceId(i),
|
||||
start, start + size, param_));
|
||||
});
|
||||
|
||||
// compute sketches for each shard
|
||||
dh::ExecuteShards(&shards_, [&](std::unique_ptr<DeviceShard>& shard) {
|
||||
shard->Init(batch, info);
|
||||
shard->Sketch(batch, info);
|
||||
});
|
||||
dh::ExecuteIndexShards(&shards_,
|
||||
[&](int idx, std::unique_ptr<DeviceShard>& shard) {
|
||||
shard->Init(batch, info, gpu_batch_nrows);
|
||||
shard->Sketch(batch, info);
|
||||
});
|
||||
|
||||
// merge the sketches from all shards
|
||||
// TODO(canonizer): do it in a tree-like reduction
|
||||
@@ -379,8 +382,7 @@ struct GPUSketcher {
|
||||
}
|
||||
|
||||
GPUSketcher(tree::TrainParam param, size_t n_rows) : param_(std::move(param)) {
|
||||
dist_ = GPUDistribution::Block(GPUSet::All(param_.n_gpus, n_rows).
|
||||
Normalised(param_.gpu_id));
|
||||
dist_ = GPUDistribution::Block(GPUSet::All(param_.gpu_id, param_.n_gpus, n_rows));
|
||||
}
|
||||
|
||||
std::vector<std::unique_ptr<DeviceShard>> shards_;
|
||||
@@ -390,9 +392,9 @@ struct GPUSketcher {
|
||||
|
||||
void DeviceSketch
|
||||
(const SparsePage& batch, const MetaInfo& info,
|
||||
const tree::TrainParam& param, HistCutMatrix* hmat) {
|
||||
const tree::TrainParam& param, HistCutMatrix* hmat, int gpu_batch_nrows) {
|
||||
GPUSketcher sketcher(param, info.num_row_);
|
||||
sketcher.Sketch(batch, info, hmat);
|
||||
sketcher.Sketch(batch, info, hmat, gpu_batch_nrows);
|
||||
}
|
||||
|
||||
} // namespace common
|
||||
|
||||
@@ -11,48 +11,14 @@
|
||||
#include <limits>
|
||||
#include <vector>
|
||||
#include "row_set.h"
|
||||
#include "../tree/fast_hist_param.h"
|
||||
#include "../tree/param.h"
|
||||
#include "./quantile.h"
|
||||
#include "./timer.h"
|
||||
#include "../include/rabit/rabit.h"
|
||||
|
||||
namespace xgboost {
|
||||
|
||||
namespace common {
|
||||
|
||||
using tree::FastHistParam;
|
||||
|
||||
/*! \brief sums of gradient statistics corresponding to a histogram bin */
|
||||
struct GHistEntry {
|
||||
/*! \brief sum of first-order gradient statistics */
|
||||
double sum_grad{0};
|
||||
/*! \brief sum of second-order gradient statistics */
|
||||
double sum_hess{0};
|
||||
|
||||
GHistEntry() = default;
|
||||
|
||||
inline void Clear() {
|
||||
sum_grad = sum_hess = 0;
|
||||
}
|
||||
|
||||
/*! \brief add a GradientPair to the sum */
|
||||
inline void Add(const GradientPair& e) {
|
||||
sum_grad += e.GetGrad();
|
||||
sum_hess += e.GetHess();
|
||||
}
|
||||
|
||||
/*! \brief add a GHistEntry to the sum */
|
||||
inline void Add(const GHistEntry& e) {
|
||||
sum_grad += e.sum_grad;
|
||||
sum_hess += e.sum_hess;
|
||||
}
|
||||
|
||||
/*! \brief set sum to be difference of two GHistEntry's */
|
||||
inline void SetSubtract(const GHistEntry& a, const GHistEntry& b) {
|
||||
sum_grad = a.sum_grad - b.sum_grad;
|
||||
sum_hess = a.sum_hess - b.sum_hess;
|
||||
}
|
||||
};
|
||||
|
||||
/*! \brief Cut configuration for all the features. */
|
||||
struct HistCutMatrix {
|
||||
/*! \brief Unit pointer to rows by element position */
|
||||
@@ -70,26 +36,26 @@ struct HistCutMatrix {
|
||||
void Init(DMatrix* p_fmat, uint32_t max_num_bins);
|
||||
|
||||
void Init(std::vector<WXQSketch>* sketchs, uint32_t max_num_bins);
|
||||
|
||||
HistCutMatrix();
|
||||
|
||||
protected:
|
||||
virtual size_t SearchGroupIndFromBaseRow(
|
||||
std::vector<bst_uint> const& group_ptr, size_t const base_rowid) const;
|
||||
|
||||
Monitor monitor_;
|
||||
};
|
||||
|
||||
/*! \brief Builds the cut matrix on the GPU */
|
||||
void DeviceSketch
|
||||
(const SparsePage& batch, const MetaInfo& info,
|
||||
const tree::TrainParam& param, HistCutMatrix* hmat);
|
||||
const tree::TrainParam& param, HistCutMatrix* hmat, int gpu_batch_nrows);
|
||||
|
||||
/*!
|
||||
* \brief A single row in global histogram index.
|
||||
* Directly represent the global index in the histogram entry.
|
||||
*/
|
||||
struct GHistIndexRow {
|
||||
/*! \brief The index of the histogram */
|
||||
const uint32_t* index;
|
||||
/*! \brief The size of the histogram */
|
||||
size_t size;
|
||||
GHistIndexRow() = default;
|
||||
GHistIndexRow(const uint32_t* index, size_t size)
|
||||
: index(index), size(size) {}
|
||||
};
|
||||
using GHistIndexRow = Span<uint32_t const>;
|
||||
|
||||
/*!
|
||||
* \brief preprocessed global index matrix, in CSR format
|
||||
@@ -109,7 +75,9 @@ struct GHistIndexMatrix {
|
||||
void Init(DMatrix* p_fmat, int max_num_bins);
|
||||
// get i-th row
|
||||
inline GHistIndexRow operator[](size_t i) const {
|
||||
return {&index[0] + row_ptr[i], row_ptr[i + 1] - row_ptr[i]};
|
||||
return {&index[0] + row_ptr[i],
|
||||
static_cast<GHistIndexRow::index_type>(
|
||||
row_ptr[i + 1] - row_ptr[i])};
|
||||
}
|
||||
inline void GetFeatureCounts(size_t* counts) const {
|
||||
auto nfeature = cut.row_ptr.size() - 1;
|
||||
@@ -132,11 +100,6 @@ struct GHistIndexBlock {
|
||||
|
||||
inline GHistIndexBlock(const size_t* row_ptr, const uint32_t* index)
|
||||
: row_ptr(row_ptr), index(index) {}
|
||||
|
||||
// get i-th row
|
||||
inline GHistIndexRow operator[](size_t i) const {
|
||||
return {&index[0] + row_ptr[i], row_ptr[i + 1] - row_ptr[i]};
|
||||
}
|
||||
};
|
||||
|
||||
class ColumnMatrix;
|
||||
@@ -145,7 +108,7 @@ class GHistIndexBlockMatrix {
|
||||
public:
|
||||
void Init(const GHistIndexMatrix& gmat,
|
||||
const ColumnMatrix& colmat,
|
||||
const FastHistParam& param);
|
||||
const tree::TrainParam& param);
|
||||
|
||||
inline GHistIndexBlock operator[](size_t i) const {
|
||||
return {blocks_[i].row_ptr_begin, blocks_[i].index_begin};
|
||||
@@ -170,20 +133,11 @@ class GHistIndexBlockMatrix {
|
||||
|
||||
/*!
|
||||
* \brief histogram of graident statistics for a single node.
|
||||
* Consists of multiple GHistEntry's, each entry showing total graident statistics
|
||||
* Consists of multiple GradStats, each entry showing total graident statistics
|
||||
* for that particular bin
|
||||
* Uses global bin id so as to represent all features simultaneously
|
||||
*/
|
||||
struct GHistRow {
|
||||
/*! \brief base pointer to first entry */
|
||||
GHistEntry* begin;
|
||||
/*! \brief number of entries */
|
||||
uint32_t size;
|
||||
|
||||
GHistRow() = default;
|
||||
GHistRow(GHistEntry* begin, uint32_t size)
|
||||
: begin(begin), size(size) {}
|
||||
};
|
||||
using GHistRow = Span<tree::GradStats>;
|
||||
|
||||
/*!
|
||||
* \brief histogram of gradient statistics for multiple nodes
|
||||
@@ -191,27 +145,29 @@ struct GHistRow {
|
||||
class HistCollection {
|
||||
public:
|
||||
// access histogram for i-th node
|
||||
inline GHistRow operator[](bst_uint nid) const {
|
||||
GHistRow operator[](bst_uint nid) const {
|
||||
constexpr uint32_t kMax = std::numeric_limits<uint32_t>::max();
|
||||
CHECK_NE(row_ptr_[nid], kMax);
|
||||
return {const_cast<GHistEntry*>(dmlc::BeginPtr(data_) + row_ptr_[nid]), nbins_};
|
||||
tree::GradStats* ptr =
|
||||
const_cast<tree::GradStats*>(dmlc::BeginPtr(data_) + row_ptr_[nid]);
|
||||
return {ptr, nbins_};
|
||||
}
|
||||
|
||||
// have we computed a histogram for i-th node?
|
||||
inline bool RowExists(bst_uint nid) const {
|
||||
bool RowExists(bst_uint nid) const {
|
||||
const uint32_t k_max = std::numeric_limits<uint32_t>::max();
|
||||
return (nid < row_ptr_.size() && row_ptr_[nid] != k_max);
|
||||
}
|
||||
|
||||
// initialize histogram collection
|
||||
inline void Init(uint32_t nbins) {
|
||||
void Init(uint32_t nbins) {
|
||||
nbins_ = nbins;
|
||||
row_ptr_.clear();
|
||||
data_.clear();
|
||||
}
|
||||
|
||||
// create an empty histogram for i-th node
|
||||
inline void AddHistRow(bst_uint nid) {
|
||||
void AddHistRow(bst_uint nid) {
|
||||
constexpr uint32_t kMax = std::numeric_limits<uint32_t>::max();
|
||||
if (nid >= row_ptr_.size()) {
|
||||
row_ptr_.resize(nid + 1, kMax);
|
||||
@@ -226,7 +182,7 @@ class HistCollection {
|
||||
/*! \brief number of all bins over all features */
|
||||
uint32_t nbins_;
|
||||
|
||||
std::vector<GHistEntry> data_;
|
||||
std::vector<tree::GradStats> data_;
|
||||
|
||||
/*! \brief row_ptr_[nid] locates bin for historgram of node nid */
|
||||
std::vector<size_t> row_ptr_;
|
||||
@@ -241,6 +197,7 @@ class GHistBuilder {
|
||||
inline void Init(size_t nthread, uint32_t nbins) {
|
||||
nthread_ = nthread;
|
||||
nbins_ = nbins;
|
||||
thread_init_.resize(nthread_);
|
||||
}
|
||||
|
||||
// construct a histogram via histogram aggregation
|
||||
@@ -256,12 +213,17 @@ class GHistBuilder {
|
||||
// construct a histogram via subtraction trick
|
||||
void SubtractionTrick(GHistRow self, GHistRow sibling, GHistRow parent);
|
||||
|
||||
uint32_t GetNumBins() {
|
||||
return nbins_;
|
||||
}
|
||||
|
||||
private:
|
||||
/*! \brief number of threads for parallel computation */
|
||||
size_t nthread_;
|
||||
/*! \brief number of all bins over all features */
|
||||
uint32_t nbins_;
|
||||
std::vector<GHistEntry> data_;
|
||||
std::vector<size_t> thread_init_;
|
||||
std::vector<tree::GradStats> data_;
|
||||
};
|
||||
|
||||
|
||||
|
||||
@@ -159,4 +159,4 @@ template class HostDeviceVector<size_t>;
|
||||
|
||||
} // namespace xgboost
|
||||
|
||||
#endif
|
||||
#endif // XGBOOST_USE_CUDA
|
||||
|
||||
@@ -46,14 +46,13 @@ template <typename T>
|
||||
struct HostDeviceVectorImpl {
|
||||
struct DeviceShard {
|
||||
DeviceShard()
|
||||
: index_(-1), proper_size_(0), device_(-1), start_(0), perm_d_(false),
|
||||
: proper_size_(0), device_(-1), start_(0), perm_d_(false),
|
||||
cached_size_(~0), vec_(nullptr) {}
|
||||
|
||||
void Init(HostDeviceVectorImpl<T>* vec, int device) {
|
||||
if (vec_ == nullptr) { vec_ = vec; }
|
||||
CHECK_EQ(vec, vec_);
|
||||
device_ = device;
|
||||
index_ = vec_->distribution_.devices_.Index(device);
|
||||
LazyResize(vec_->Size());
|
||||
perm_d_ = vec_->perm_h_.Complementary();
|
||||
}
|
||||
@@ -62,7 +61,6 @@ struct HostDeviceVectorImpl {
|
||||
if (vec_ == nullptr) { vec_ = vec; }
|
||||
CHECK_EQ(vec, vec_);
|
||||
device_ = other.device_;
|
||||
index_ = other.index_;
|
||||
cached_size_ = other.cached_size_;
|
||||
start_ = other.start_;
|
||||
proper_size_ = other.proper_size_;
|
||||
@@ -114,10 +112,11 @@ struct HostDeviceVectorImpl {
|
||||
if (new_size == cached_size_) { return; }
|
||||
// resize is required
|
||||
int ndevices = vec_->distribution_.devices_.Size();
|
||||
start_ = vec_->distribution_.ShardStart(new_size, index_);
|
||||
proper_size_ = vec_->distribution_.ShardProperSize(new_size, index_);
|
||||
int device_index = vec_->distribution_.devices_.Index(device_);
|
||||
start_ = vec_->distribution_.ShardStart(new_size, device_index);
|
||||
proper_size_ = vec_->distribution_.ShardProperSize(new_size, device_index);
|
||||
// The size on this device.
|
||||
size_t size_d = vec_->distribution_.ShardSize(new_size, index_);
|
||||
size_t size_d = vec_->distribution_.ShardSize(new_size, device_index);
|
||||
SetDevice();
|
||||
data_.resize(size_d);
|
||||
cached_size_ = new_size;
|
||||
@@ -154,7 +153,6 @@ struct HostDeviceVectorImpl {
|
||||
}
|
||||
}
|
||||
|
||||
int index_;
|
||||
int device_;
|
||||
thrust::device_vector<T> data_;
|
||||
// cached vector size
|
||||
@@ -183,13 +181,13 @@ struct HostDeviceVectorImpl {
|
||||
distribution_(other.distribution_), mutex_() {
|
||||
shards_.resize(other.shards_.size());
|
||||
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
|
||||
shard.Init(this, other.shards_[i]);
|
||||
shard.Init(this, other.shards_.at(i));
|
||||
});
|
||||
}
|
||||
|
||||
// Init can be std::vector<T> or std::initializer_list<T>
|
||||
template <class Init>
|
||||
HostDeviceVectorImpl(const Init& init, GPUDistribution distribution)
|
||||
// Initializer can be std::vector<T> or std::initializer_list<T>
|
||||
template <class Initializer>
|
||||
HostDeviceVectorImpl(const Initializer& init, GPUDistribution distribution)
|
||||
: distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) {
|
||||
if (!distribution_.IsEmpty()) {
|
||||
size_d_ = init.size();
|
||||
@@ -204,7 +202,7 @@ struct HostDeviceVectorImpl {
|
||||
int ndevices = distribution_.devices_.Size();
|
||||
shards_.resize(ndevices);
|
||||
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
|
||||
shard.Init(this, distribution_.devices_[i]);
|
||||
shard.Init(this, distribution_.devices_.DeviceId(i));
|
||||
});
|
||||
}
|
||||
|
||||
@@ -217,20 +215,20 @@ struct HostDeviceVectorImpl {
|
||||
T* DevicePointer(int device) {
|
||||
CHECK(distribution_.devices_.Contains(device));
|
||||
LazySyncDevice(device, GPUAccess::kWrite);
|
||||
return shards_[distribution_.devices_.Index(device)].data_.data().get();
|
||||
return shards_.at(distribution_.devices_.Index(device)).data_.data().get();
|
||||
}
|
||||
|
||||
const T* ConstDevicePointer(int device) {
|
||||
CHECK(distribution_.devices_.Contains(device));
|
||||
LazySyncDevice(device, GPUAccess::kRead);
|
||||
return shards_[distribution_.devices_.Index(device)].data_.data().get();
|
||||
return shards_.at(distribution_.devices_.Index(device)).data_.data().get();
|
||||
}
|
||||
|
||||
common::Span<T> DeviceSpan(int device) {
|
||||
GPUSet devices = distribution_.devices_;
|
||||
CHECK(devices.Contains(device));
|
||||
LazySyncDevice(device, GPUAccess::kWrite);
|
||||
return {shards_[devices.Index(device)].data_.data().get(),
|
||||
return {shards_.at(devices.Index(device)).data_.data().get(),
|
||||
static_cast<typename common::Span<T>::index_type>(DeviceSize(device))};
|
||||
}
|
||||
|
||||
@@ -238,20 +236,20 @@ struct HostDeviceVectorImpl {
|
||||
GPUSet devices = distribution_.devices_;
|
||||
CHECK(devices.Contains(device));
|
||||
LazySyncDevice(device, GPUAccess::kRead);
|
||||
return {shards_[devices.Index(device)].data_.data().get(),
|
||||
return {shards_.at(devices.Index(device)).data_.data().get(),
|
||||
static_cast<typename common::Span<const T>::index_type>(DeviceSize(device))};
|
||||
}
|
||||
|
||||
size_t DeviceSize(int device) {
|
||||
CHECK(distribution_.devices_.Contains(device));
|
||||
LazySyncDevice(device, GPUAccess::kRead);
|
||||
return shards_[distribution_.devices_.Index(device)].data_.size();
|
||||
return shards_.at(distribution_.devices_.Index(device)).data_.size();
|
||||
}
|
||||
|
||||
size_t DeviceStart(int device) {
|
||||
CHECK(distribution_.devices_.Contains(device));
|
||||
LazySyncDevice(device, GPUAccess::kRead);
|
||||
return shards_[distribution_.devices_.Index(device)].start_;
|
||||
return shards_.at(distribution_.devices_.Index(device)).start_;
|
||||
}
|
||||
|
||||
thrust::device_ptr<T> tbegin(int device) { // NOLINT
|
||||
@@ -277,7 +275,7 @@ struct HostDeviceVectorImpl {
|
||||
(end - begin) * sizeof(T),
|
||||
cudaMemcpyDeviceToHost));
|
||||
} else {
|
||||
dh::ExecuteShards(&shards_, [&](DeviceShard& shard) {
|
||||
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
|
||||
shard.ScatterFrom(begin.get());
|
||||
});
|
||||
}
|
||||
@@ -290,7 +288,7 @@ struct HostDeviceVectorImpl {
|
||||
data_h_.size() * sizeof(T),
|
||||
cudaMemcpyHostToDevice));
|
||||
} else {
|
||||
dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.GatherTo(begin); });
|
||||
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.GatherTo(begin); });
|
||||
}
|
||||
}
|
||||
|
||||
@@ -298,7 +296,7 @@ struct HostDeviceVectorImpl {
|
||||
if (perm_h_.CanWrite()) {
|
||||
std::fill(data_h_.begin(), data_h_.end(), v);
|
||||
} else {
|
||||
dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.Fill(v); });
|
||||
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.Fill(v); });
|
||||
}
|
||||
}
|
||||
|
||||
@@ -316,7 +314,7 @@ struct HostDeviceVectorImpl {
|
||||
size_d_ = other->size_d_;
|
||||
}
|
||||
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
|
||||
shard.Copy(&other->shards_[i]);
|
||||
shard.Copy(&other->shards_.at(i));
|
||||
});
|
||||
}
|
||||
|
||||
@@ -325,7 +323,7 @@ struct HostDeviceVectorImpl {
|
||||
if (perm_h_.CanWrite()) {
|
||||
std::copy(other.begin(), other.end(), data_h_.begin());
|
||||
} else {
|
||||
dh::ExecuteShards(&shards_, [&](DeviceShard& shard) {
|
||||
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
|
||||
shard.ScatterFrom(other.data());
|
||||
});
|
||||
}
|
||||
@@ -336,7 +334,7 @@ struct HostDeviceVectorImpl {
|
||||
if (perm_h_.CanWrite()) {
|
||||
std::copy(other.begin(), other.end(), data_h_.begin());
|
||||
} else {
|
||||
dh::ExecuteShards(&shards_, [&](DeviceShard& shard) {
|
||||
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
|
||||
shard.ScatterFrom(other.begin());
|
||||
});
|
||||
}
|
||||
@@ -389,14 +387,14 @@ struct HostDeviceVectorImpl {
|
||||
if (perm_h_.CanAccess(access)) { return; }
|
||||
if (perm_h_.CanRead()) {
|
||||
// data is present, just need to deny access to the device
|
||||
dh::ExecuteShards(&shards_, [&](DeviceShard& shard) {
|
||||
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
|
||||
shard.perm_d_.DenyComplementary(access);
|
||||
});
|
||||
perm_h_.Grant(access);
|
||||
return;
|
||||
}
|
||||
if (data_h_.size() != size_d_) { data_h_.resize(size_d_); }
|
||||
dh::ExecuteShards(&shards_, [&](DeviceShard& shard) {
|
||||
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
|
||||
shard.LazySyncHost(access);
|
||||
});
|
||||
perm_h_.Grant(access);
|
||||
@@ -405,7 +403,7 @@ struct HostDeviceVectorImpl {
|
||||
void LazySyncDevice(int device, GPUAccess access) {
|
||||
GPUSet devices = distribution_.Devices();
|
||||
CHECK(devices.Contains(device));
|
||||
shards_[devices.Index(device)].LazySyncDevice(access);
|
||||
shards_.at(devices.Index(device)).LazySyncDevice(access);
|
||||
}
|
||||
|
||||
bool HostCanAccess(GPUAccess access) { return perm_h_.CanAccess(access); }
|
||||
@@ -413,7 +411,7 @@ struct HostDeviceVectorImpl {
|
||||
bool DeviceCanAccess(int device, GPUAccess access) {
|
||||
GPUSet devices = distribution_.Devices();
|
||||
if (!devices.Contains(device)) { return false; }
|
||||
return shards_[devices.Index(device)].perm_d_.CanAccess(access);
|
||||
return shards_.at(devices.Index(device)).perm_d_.CanAccess(access);
|
||||
}
|
||||
|
||||
std::vector<T> data_h_;
|
||||
@@ -461,9 +459,8 @@ HostDeviceVector<T>& HostDeviceVector<T>::operator=
|
||||
|
||||
template <typename T>
|
||||
HostDeviceVector<T>::~HostDeviceVector() {
|
||||
HostDeviceVectorImpl<T>* tmp = impl_;
|
||||
delete impl_;
|
||||
impl_ = nullptr;
|
||||
delete tmp;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
||||
@@ -57,6 +57,7 @@
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
#include <initializer_list>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "common.h"
|
||||
@@ -66,7 +67,7 @@
|
||||
// is included from a .cu file
|
||||
#ifdef __CUDACC__
|
||||
#include <thrust/device_ptr.h>
|
||||
#endif
|
||||
#endif // __CUDACC__
|
||||
|
||||
namespace xgboost {
|
||||
|
||||
@@ -74,14 +75,15 @@ namespace xgboost {
|
||||
// Sets a function to call instead of cudaSetDevice();
|
||||
// only added for testing
|
||||
void SetCudaSetDeviceHandler(void (*handler)(int));
|
||||
#endif
|
||||
#endif // __CUDACC__
|
||||
|
||||
template <typename T> struct HostDeviceVectorImpl;
|
||||
|
||||
// Distribution for the HostDeviceVector; it specifies such aspects as the devices it is
|
||||
// distributed on, whether there are copies of elements from other GPUs as well as the granularity
|
||||
// of splitting. It may also specify explicit boundaries for devices, in which case the size of the
|
||||
// array cannot be changed.
|
||||
// Distribution for the HostDeviceVector; it specifies such aspects as the
|
||||
// devices it is distributed on, whether there are copies of elements from
|
||||
// other GPUs as well as the granularity of splitting. It may also specify
|
||||
// explicit boundaries for devices, in which case the size of the array cannot
|
||||
// be changed.
|
||||
class GPUDistribution {
|
||||
template<typename T> friend struct HostDeviceVectorImpl;
|
||||
|
||||
@@ -139,7 +141,7 @@ class GPUDistribution {
|
||||
return begin;
|
||||
}
|
||||
|
||||
size_t ShardSize(size_t size, int index) const {
|
||||
size_t ShardSize(size_t size, size_t index) const {
|
||||
if (size == 0) { return 0; }
|
||||
if (offsets_.size() > 0) {
|
||||
// explicit offsets are provided
|
||||
@@ -153,7 +155,7 @@ class GPUDistribution {
|
||||
return end - begin;
|
||||
}
|
||||
|
||||
size_t ShardProperSize(size_t size, int index) const {
|
||||
size_t ShardProperSize(size_t size, size_t index) const {
|
||||
if (size == 0) { return 0; }
|
||||
return ShardSize(size, index) - (devices_.Size() - 1 > index ? overlap_ : 0);
|
||||
}
|
||||
@@ -232,7 +234,7 @@ class HostDeviceVector {
|
||||
|
||||
void ScatterFrom(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end);
|
||||
void GatherTo(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) const;
|
||||
#endif
|
||||
#endif // __CUDACC__
|
||||
|
||||
void Fill(T v);
|
||||
void Copy(const HostDeviceVector<T>& other);
|
||||
|
||||
@@ -9,9 +9,9 @@
|
||||
#define XGBOOST_COMMON_IO_H_
|
||||
|
||||
#include <dmlc/io.h>
|
||||
#include <rabit/rabit.h>
|
||||
#include <string>
|
||||
#include <cstring>
|
||||
#include "./sync.h"
|
||||
|
||||
namespace xgboost {
|
||||
namespace common {
|
||||
|
||||
@@ -116,7 +116,6 @@ inline static bool CmpSecond(const std::pair<float, unsigned> &a,
|
||||
#if XGBOOST_STRICT_R_MODE
|
||||
// check nan
|
||||
bool CheckNAN(double v);
|
||||
double LogGamma(double v);
|
||||
#else
|
||||
template<typename T>
|
||||
inline bool CheckNAN(T v) {
|
||||
@@ -124,11 +123,21 @@ inline bool CheckNAN(T v) {
|
||||
return (_isnan(v) != 0);
|
||||
#else
|
||||
return std::isnan(v);
|
||||
#endif
|
||||
#endif // _MSC_VER
|
||||
}
|
||||
#endif // XGBOOST_STRICT_R_MODE_
|
||||
|
||||
// GPU version is not uploaded in CRAN anyway.
|
||||
// Specialize only when using R with CPU.
|
||||
#if XGBOOST_STRICT_R_MODE && !defined(XGBOOST_USE_CUDA)
|
||||
double LogGamma(double v);
|
||||
|
||||
#else // Not R or R with GPU.
|
||||
|
||||
template<typename T>
|
||||
inline T LogGamma(T v) {
|
||||
XGBOOST_DEVICE inline T LogGamma(T v) {
|
||||
#ifdef _MSC_VER
|
||||
|
||||
#if _MSC_VER >= 1800
|
||||
return lgamma(v);
|
||||
#else
|
||||
@@ -136,12 +145,15 @@ inline T LogGamma(T v) {
|
||||
", poisson regression will be disabled")
|
||||
utils::Error("lgamma function was not available until VS2013");
|
||||
return static_cast<T>(1.0);
|
||||
#endif
|
||||
#endif // _MSC_VER >= 1800
|
||||
|
||||
#else
|
||||
return lgamma(v);
|
||||
#endif
|
||||
#endif // _MSC_VER
|
||||
}
|
||||
#endif // XGBOOST_STRICT_R_MODE_
|
||||
|
||||
#endif // XGBOOST_STRICT_R_MODE && !defined(XGBOOST_USE_CUDA)
|
||||
|
||||
} // namespace common
|
||||
} // namespace xgboost
|
||||
#endif // XGBOOST_COMMON_MATH_H_
|
||||
|
||||
@@ -7,14 +7,17 @@
|
||||
#ifndef XGBOOST_COMMON_RANDOM_H_
|
||||
#define XGBOOST_COMMON_RANDOM_H_
|
||||
|
||||
#include <rabit/rabit.h>
|
||||
#include <xgboost/logging.h>
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
#include <limits>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <numeric>
|
||||
#include <random>
|
||||
#include "host_device_vector.h"
|
||||
|
||||
#include "io.h"
|
||||
|
||||
namespace xgboost {
|
||||
namespace common {
|
||||
@@ -63,7 +66,7 @@ typedef CustomGlobalRandomEngine GlobalRandomEngine;
|
||||
* \brief global random engine
|
||||
*/
|
||||
using GlobalRandomEngine = RandomEngine;
|
||||
#endif
|
||||
#endif // XGBOOST_CUSTOMIZE_GLOBAL_PRNG
|
||||
|
||||
/*!
|
||||
* \brief global singleton of a random engine.
|
||||
@@ -75,27 +78,36 @@ GlobalRandomEngine& GlobalRandom(); // NOLINT(*)
|
||||
/**
|
||||
* \class ColumnSampler
|
||||
*
|
||||
* \brief Handles selection of columns due to colsample_bytree and
|
||||
* colsample_bylevel parameters. Should be initialised before tree
|
||||
* construction and to reset when tree construction is completed.
|
||||
* \brief Handles selection of columns due to colsample_bytree, colsample_bylevel and
|
||||
* colsample_bynode parameters. Should be initialised before tree construction and to
|
||||
* reset when tree construction is completed.
|
||||
*/
|
||||
|
||||
class ColumnSampler {
|
||||
HostDeviceVector<int> feature_set_tree_;
|
||||
std::map<int, HostDeviceVector<int>> feature_set_level_;
|
||||
std::shared_ptr<std::vector<int>> feature_set_tree_;
|
||||
std::map<int, std::shared_ptr<std::vector<int>>> feature_set_level_;
|
||||
float colsample_bylevel_{1.0f};
|
||||
float colsample_bytree_{1.0f};
|
||||
float colsample_bynode_{1.0f};
|
||||
|
||||
std::vector<int> ColSample(std::vector<int> features, float colsample) const {
|
||||
if (colsample == 1.0f) return features;
|
||||
std::shared_ptr<std::vector<int>> ColSample
|
||||
(std::shared_ptr<std::vector<int>> p_features, float colsample) const {
|
||||
if (colsample == 1.0f) return p_features;
|
||||
const auto& features = *p_features;
|
||||
CHECK_GT(features.size(), 0);
|
||||
int n = std::max(1, static_cast<int>(colsample * features.size()));
|
||||
auto p_new_features = std::make_shared<std::vector<int>>();
|
||||
auto& new_features = *p_new_features;
|
||||
new_features.resize(features.size());
|
||||
std::copy(features.begin(), features.end(), new_features.begin());
|
||||
std::shuffle(new_features.begin(), new_features.end(), common::GlobalRandom());
|
||||
new_features.resize(n);
|
||||
std::sort(new_features.begin(), new_features.end());
|
||||
|
||||
std::shuffle(features.begin(), features.end(), common::GlobalRandom());
|
||||
features.resize(n);
|
||||
std::sort(features.begin(), features.end());
|
||||
// ensure that new_features are the same across ranks
|
||||
rabit::Broadcast(&new_features, 0);
|
||||
|
||||
return features;
|
||||
return p_new_features;
|
||||
}
|
||||
|
||||
public:
|
||||
@@ -103,44 +115,60 @@ class ColumnSampler {
|
||||
* \brief Initialise this object before use.
|
||||
*
|
||||
* \param num_col
|
||||
* \param colsample_bynode
|
||||
* \param colsample_bylevel
|
||||
* \param colsample_bytree
|
||||
* \param skip_index_0 (Optional) True to skip index 0.
|
||||
*/
|
||||
void Init(int64_t num_col, float colsample_bylevel, float colsample_bytree,
|
||||
bool skip_index_0 = false) {
|
||||
this->colsample_bylevel_ = colsample_bylevel;
|
||||
this->colsample_bytree_ = colsample_bytree;
|
||||
this->Reset();
|
||||
void Init(int64_t num_col, float colsample_bynode, float colsample_bylevel,
|
||||
float colsample_bytree, bool skip_index_0 = false) {
|
||||
colsample_bylevel_ = colsample_bylevel;
|
||||
colsample_bytree_ = colsample_bytree;
|
||||
colsample_bynode_ = colsample_bynode;
|
||||
|
||||
if (feature_set_tree_ == nullptr) {
|
||||
feature_set_tree_ = std::make_shared<std::vector<int>>();
|
||||
}
|
||||
Reset();
|
||||
|
||||
int begin_idx = skip_index_0 ? 1 : 0;
|
||||
auto& feature_set_h = feature_set_tree_.HostVector();
|
||||
feature_set_h.resize(num_col - begin_idx);
|
||||
feature_set_tree_->resize(num_col - begin_idx);
|
||||
std::iota(feature_set_tree_->begin(), feature_set_tree_->end(), begin_idx);
|
||||
|
||||
std::iota(feature_set_h.begin(), feature_set_h.end(), begin_idx);
|
||||
feature_set_h = ColSample(feature_set_h, this->colsample_bytree_);
|
||||
feature_set_tree_ = ColSample(feature_set_tree_, colsample_bytree_);
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Resets this object.
|
||||
*/
|
||||
void Reset() {
|
||||
feature_set_tree_.HostVector().clear();
|
||||
feature_set_tree_->clear();
|
||||
feature_set_level_.clear();
|
||||
}
|
||||
|
||||
HostDeviceVector<int>& GetFeatureSet(int depth) {
|
||||
if (this->colsample_bylevel_ == 1.0f) {
|
||||
/**
|
||||
* \brief Samples a feature set.
|
||||
*
|
||||
* \param depth The tree depth of the node at which to sample.
|
||||
* \return The sampled feature set.
|
||||
* \note If colsample_bynode_ < 1.0, this method creates a new feature set each time it
|
||||
* is called. Therefore, it should be called only once per node.
|
||||
*/
|
||||
std::shared_ptr<std::vector<int>> GetFeatureSet(int depth) {
|
||||
if (colsample_bylevel_ == 1.0f && colsample_bynode_ == 1.0f) {
|
||||
return feature_set_tree_;
|
||||
}
|
||||
|
||||
if (feature_set_level_.count(depth) == 0) {
|
||||
// Level sampling, level does not yet exist so generate it
|
||||
auto& level = feature_set_level_[depth].HostVector();
|
||||
level = ColSample(feature_set_tree_.HostVector(), this->colsample_bylevel_);
|
||||
feature_set_level_[depth] = ColSample(feature_set_tree_, colsample_bylevel_);
|
||||
}
|
||||
// Level sampling
|
||||
return feature_set_level_[depth];
|
||||
if (colsample_bynode_ == 1.0f) {
|
||||
// Level sampling
|
||||
return feature_set_level_[depth];
|
||||
}
|
||||
// Need to sample for the node individually
|
||||
return ColSample(feature_set_level_[depth], colsample_bynode_);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -62,13 +62,14 @@
|
||||
|
||||
#define __span_noexcept noexcept
|
||||
|
||||
#endif
|
||||
#endif // defined(_MSC_VER) && _MSC_VER < 1910
|
||||
|
||||
namespace xgboost {
|
||||
namespace common {
|
||||
|
||||
// Usual logging facility is not available inside device code.
|
||||
// TODO(trivialfis): Make dmlc check more generic.
|
||||
// assert is not supported in mac as of CUDA 10.0
|
||||
#define KERNEL_CHECK(cond) \
|
||||
do { \
|
||||
if (!(cond)) { \
|
||||
@@ -84,7 +85,7 @@ namespace common {
|
||||
#define SPAN_CHECK KERNEL_CHECK
|
||||
#else
|
||||
#define SPAN_CHECK CHECK // check from dmlc
|
||||
#endif
|
||||
#endif // __CUDA_ARCH__
|
||||
|
||||
namespace detail {
|
||||
/*!
|
||||
@@ -100,7 +101,7 @@ using ptrdiff_t = int64_t; // NOLINT
|
||||
constexpr const detail::ptrdiff_t dynamic_extent = -1; // NOLINT
|
||||
#else
|
||||
constexpr detail::ptrdiff_t dynamic_extent = -1; // NOLINT
|
||||
#endif
|
||||
#endif // defined(_MSC_VER) && _MSC_VER < 1910
|
||||
|
||||
enum class byte : unsigned char {}; // NOLINT
|
||||
|
||||
@@ -543,7 +544,7 @@ class Span {
|
||||
XGBOOST_DEVICE auto subspan() const -> // NOLINT
|
||||
Span<element_type,
|
||||
detail::ExtentValue<Extent, Offset, Count>::value> {
|
||||
SPAN_CHECK(Offset >= 0 && Offset < size());
|
||||
SPAN_CHECK(Offset >= 0 && (Offset < size() || size() == 0));
|
||||
SPAN_CHECK(Count == dynamic_extent ||
|
||||
Count >= 0 && Offset + Count <= size());
|
||||
|
||||
@@ -553,9 +554,9 @@ class Span {
|
||||
XGBOOST_DEVICE Span<element_type, dynamic_extent> subspan( // NOLINT
|
||||
detail::ptrdiff_t _offset,
|
||||
detail::ptrdiff_t _count = dynamic_extent) const {
|
||||
SPAN_CHECK(_offset >= 0 && _offset < size());
|
||||
SPAN_CHECK(_count == dynamic_extent ||
|
||||
_count >= 0 && _offset + _count <= size());
|
||||
SPAN_CHECK(_offset >= 0 && (_offset < size() || size() == 0));
|
||||
SPAN_CHECK((_count == dynamic_extent) ||
|
||||
(_count >= 0 && _offset + _count <= size()));
|
||||
|
||||
return {data() + _offset, _count ==
|
||||
dynamic_extent ? size() - _offset : _count};
|
||||
@@ -621,8 +622,8 @@ XGBOOST_DEVICE auto as_writable_bytes(Span<T, E> s) __span_noexcept -> // NOLIN
|
||||
return {reinterpret_cast<byte*>(s.data()), s.size_bytes()};
|
||||
}
|
||||
|
||||
} // namespace common
|
||||
} // namespace xgboost
|
||||
} // namespace common NOLINT
|
||||
} // namespace xgboost NOLINT
|
||||
|
||||
#if defined(_MSC_VER) &&_MSC_VER < 1910
|
||||
#undef constexpr
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user