Compare commits
125 Commits
release_0.
...
release_0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4b39590c14 | ||
|
|
9a4d0b078f | ||
|
|
78ec77fa97 | ||
|
|
c22e90d5d2 | ||
|
|
6da462234e | ||
|
|
a650131fc3 | ||
|
|
91537e7353 | ||
|
|
e04ab56b57 | ||
|
|
ad68865d6b | ||
|
|
583c88bce7 | ||
|
|
2febc105a4 | ||
|
|
45d321da28 | ||
|
|
411df9f878 | ||
|
|
42200ec03e | ||
|
|
87f49995be | ||
|
|
e3c1afac6b | ||
|
|
d81fedb955 | ||
|
|
5fbe230636 | ||
|
|
d83c818000 | ||
|
|
2a59ff2f9b | ||
|
|
32de54fdee | ||
|
|
02130af47d | ||
|
|
4ae225a08d | ||
|
|
e26b5d63b2 | ||
|
|
abf2f661be | ||
|
|
55ee9a92a1 | ||
|
|
b38c636d05 | ||
|
|
4302fc4027 | ||
|
|
f00fd87b36 | ||
|
|
516457fadc | ||
|
|
184efff9f9 | ||
|
|
5d6baed998 | ||
|
|
1db28b8718 | ||
|
|
5480e05173 | ||
|
|
9504f411c1 | ||
|
|
ca33bf6476 | ||
|
|
133b8d94df | ||
|
|
11eaf3eed1 | ||
|
|
6d42e56c85 | ||
|
|
7a7269e983 | ||
|
|
ea99b53d8e | ||
|
|
10cd7c8447 | ||
|
|
813d2436d3 | ||
|
|
c23783a0d1 | ||
|
|
91903ac5d4 | ||
|
|
ae7e58b96e | ||
|
|
e0fd60f4e5 | ||
|
|
4b892c2b30 | ||
|
|
785094db53 | ||
|
|
9e73087324 | ||
|
|
34522d56f0 | ||
|
|
c6b5df67f6 | ||
|
|
efc4f85505 | ||
|
|
d594b11f35 | ||
|
|
87aca8c244 | ||
|
|
70d208d68c | ||
|
|
b50bc2c1d4 | ||
|
|
baef5741df | ||
|
|
5a7f7e7d49 | ||
|
|
0b7fd74138 | ||
|
|
51478a39c9 | ||
|
|
fbe9d41dd0 | ||
|
|
79d854c695 | ||
|
|
3b5a1f389a | ||
|
|
2405c59352 | ||
|
|
73140ce84c | ||
|
|
aa53e9fc8d | ||
|
|
9119f9e369 | ||
|
|
0f99cdfe0e | ||
|
|
20a9e716bd | ||
|
|
7bbb44182a | ||
|
|
9acd549dc7 | ||
|
|
42b108136f | ||
|
|
bd41bd6605 | ||
|
|
3209b42b07 | ||
|
|
7707982a85 | ||
|
|
ad3a0bbab8 | ||
|
|
d1e75d615e | ||
|
|
14a8b96476 | ||
|
|
3564b68b98 | ||
|
|
f606cb8ef4 | ||
|
|
beab6e08dd | ||
|
|
4b43810f51 | ||
|
|
5a8bbb39a1 | ||
|
|
8dac0d1009 | ||
|
|
308f664ade | ||
|
|
56e906a789 | ||
|
|
d176a0fbc8 | ||
|
|
190d888695 | ||
|
|
c87153ed32 | ||
|
|
9344f081a4 | ||
|
|
8f4acba34b | ||
|
|
9254c58e4d | ||
|
|
dee0b69674 | ||
|
|
86d88c0758 | ||
|
|
5b662cbe1c | ||
|
|
10c31ab2cb | ||
|
|
7b1427f926 | ||
|
|
72cd1517d6 | ||
|
|
58d783df16 | ||
|
|
78bea0d204 | ||
|
|
7ef2b599c7 | ||
|
|
686e990ffc | ||
|
|
60787ecebc | ||
|
|
3261002099 | ||
|
|
cb4de521c1 | ||
|
|
4ed8a88240 | ||
|
|
4912c1f9c6 | ||
|
|
57f3c2f252 | ||
|
|
24a268a2e3 | ||
|
|
b13c3a8bcc | ||
|
|
cf2d86a4f6 | ||
|
|
983cb0b374 | ||
|
|
993e62b9e7 | ||
|
|
b53a5a262c | ||
|
|
ac7fc1306b | ||
|
|
caf4a756bf | ||
|
|
7c82dc92b2 | ||
|
|
725f4c36f2 | ||
|
|
73bd590a1d | ||
|
|
9265964ee7 | ||
|
|
2c502784ff | ||
|
|
2b7a1c5780 | ||
|
|
ce0f0568a6 | ||
|
|
6288f6d563 |
32
.github/lock.yml
vendored
Normal file
32
.github/lock.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
# Configuration for lock-threads - https://github.com/dessant/lock-threads
|
||||
|
||||
# Number of days of inactivity before a closed issue or pull request is locked
|
||||
daysUntilLock: 90
|
||||
|
||||
# Issues and pull requests with these labels will not be locked. Set to `[]` to disable
|
||||
exemptLabels:
|
||||
- feature-request
|
||||
|
||||
# Label to add before locking, such as `outdated`. Set to `false` to disable
|
||||
lockLabel: false
|
||||
|
||||
# Comment to post before locking. Set to `false` to disable
|
||||
lockComment: false
|
||||
|
||||
# Assign `resolved` as the reason for locking. Set to `false` to disable
|
||||
setLockReason: true
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
# only: issues
|
||||
|
||||
# Optionally, specify configuration settings just for `issues` or `pulls`
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - help-wanted
|
||||
# lockLabel: outdated
|
||||
|
||||
# pulls:
|
||||
# daysUntilLock: 30
|
||||
|
||||
# Repository to extend settings from
|
||||
# _extends: repo
|
||||
@@ -28,6 +28,8 @@ env:
|
||||
- TASK=cpp_test
|
||||
# distributed test
|
||||
- TASK=distributed_test
|
||||
# address sanitizer test
|
||||
- TASK=sanitizer_test
|
||||
|
||||
matrix:
|
||||
exclude:
|
||||
@@ -43,6 +45,8 @@ matrix:
|
||||
env: TASK=cpp_test
|
||||
- os: osx
|
||||
env: TASK=distributed_test
|
||||
- os: osx
|
||||
env: TASK=sanitizer_test
|
||||
|
||||
# dependent apt packages
|
||||
addons:
|
||||
@@ -62,6 +66,8 @@ addons:
|
||||
- graphviz
|
||||
- gcc-4.8
|
||||
- g++-4.8
|
||||
- gcc-7
|
||||
- g++-7
|
||||
|
||||
before_install:
|
||||
- source dmlc-core/scripts/travis/travis_setup_env.sh
|
||||
|
||||
@@ -9,22 +9,24 @@ msvc_use_static_runtime()
|
||||
|
||||
# Options
|
||||
option(USE_CUDA "Build with GPU acceleration")
|
||||
option(USE_AVX "Build with AVX instructions. May not produce identical results due to approximate math." OFF)
|
||||
option(USE_NCCL "Build using NCCL for multi-GPU. Also requires USE_CUDA")
|
||||
option(JVM_BINDINGS "Build JVM bindings" OFF)
|
||||
option(GOOGLE_TEST "Build google tests" OFF)
|
||||
option(R_LIB "Build shared library for R package" OFF)
|
||||
option(USE_SANITIZER "Use santizer flags" OFF)
|
||||
set(GPU_COMPUTE_VER "" CACHE STRING
|
||||
"Space separated list of compute versions to be built against, e.g. '35 61'")
|
||||
option(USE_SANITIZER "Use santizer flags" OFF)
|
||||
option(SANITIZER_PATH "Path to sanitizes.")
|
||||
set(ENABLED_SANITIZERS "address" "leak" CACHE STRING
|
||||
"Semicolon separated list of sanitizer names. E.g 'address;leak'. Supported sanitizers are
|
||||
address, leak and thread.")
|
||||
|
||||
# Plugins
|
||||
option(PLUGIN_LZ4 "Build lz4 plugin" OFF)
|
||||
option(PLUGIN_DENSE_PARSER "Build dense parser plugin" OFF)
|
||||
|
||||
# Deprecation warning
|
||||
if(PLUGIN_UPDATER_GPU)
|
||||
set(USE_CUDA ON)
|
||||
message(WARNING "The option 'PLUGIN_UPDATER_GPU' is deprecated. Set 'USE_CUDA' instead.")
|
||||
if(USE_AVX)
|
||||
message(WARNING "The option 'USE_AVX' is deprecated as experimental AVX features have been removed from xgboost.")
|
||||
endif()
|
||||
|
||||
# Compiler flags
|
||||
@@ -53,16 +55,6 @@ if(USE_SANITIZER)
|
||||
enable_sanitizers("${ENABLED_SANITIZERS}")
|
||||
endif(USE_SANITIZER)
|
||||
|
||||
# AVX
|
||||
if(USE_AVX)
|
||||
if(MSVC)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX")
|
||||
else()
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx")
|
||||
endif()
|
||||
add_definitions(-DXGBOOST_USE_AVX)
|
||||
endif()
|
||||
|
||||
# dmlc-core
|
||||
add_subdirectory(dmlc-core)
|
||||
set(LINK_LIBRARIES dmlc rabit)
|
||||
@@ -83,6 +75,7 @@ if(R_LIB)
|
||||
)
|
||||
endif()
|
||||
|
||||
# Gather source files
|
||||
include_directories (
|
||||
${PROJECT_SOURCE_DIR}/include
|
||||
${PROJECT_SOURCE_DIR}/dmlc-core/include
|
||||
@@ -98,11 +91,22 @@ file(GLOB_RECURSE SOURCES
|
||||
# Only add main function for executable target
|
||||
list(REMOVE_ITEM SOURCES ${PROJECT_SOURCE_DIR}/src/cli_main.cc)
|
||||
|
||||
file(GLOB_RECURSE TEST_SOURCES "tests/cpp/*.cc")
|
||||
|
||||
file(GLOB_RECURSE CUDA_SOURCES
|
||||
src/*.cu
|
||||
src/*.cuh
|
||||
)
|
||||
|
||||
# Add plugins to source files
|
||||
if(PLUGIN_LZ4)
|
||||
list(APPEND SOURCES plugin/lz4/sparse_page_lz4_format.cc)
|
||||
link_libraries(lz4)
|
||||
endif()
|
||||
if(PLUGIN_DENSE_PARSER)
|
||||
list(APPEND SOURCES plugin/dense_parser/dense_libsvm.cc)
|
||||
endif()
|
||||
|
||||
# rabit
|
||||
# TODO: Create rabit cmakelists.txt
|
||||
set(RABIT_SOURCES
|
||||
@@ -185,6 +189,9 @@ if(R_LIB)
|
||||
target_link_libraries(xgboost ${LINK_LIBRARIES})
|
||||
# R uses no lib prefix in shared library names of its packages
|
||||
set_target_properties(xgboost PROPERTIES PREFIX "")
|
||||
if(APPLE)
|
||||
set_target_properties(xgboost PROPERTIES SUFFIX ".so")
|
||||
endif()
|
||||
|
||||
setup_rpackage_install_target(xgboost ${CMAKE_CURRENT_BINARY_DIR})
|
||||
# use a dummy location for any other remaining installs
|
||||
@@ -235,7 +242,6 @@ if(GOOGLE_TEST)
|
||||
enable_testing()
|
||||
find_package(GTest REQUIRED)
|
||||
|
||||
file(GLOB_RECURSE TEST_SOURCES "tests/cpp/*.cc")
|
||||
auto_source_group("${TEST_SOURCES}")
|
||||
include_directories(${GTEST_INCLUDE_DIRS})
|
||||
|
||||
|
||||
@@ -6,21 +6,30 @@ Committers
|
||||
----------
|
||||
Committers are people who have made substantial contribution to the project and granted write access to the project.
|
||||
* [Tianqi Chen](https://github.com/tqchen), University of Washington
|
||||
- Tianqi is a PhD working on large-scale machine learning, he is the creator of the project.
|
||||
- Tianqi is a Ph.D. student working on large-scale machine learning. He is the creator of the project.
|
||||
* [Tong He](https://github.com/hetong007), Amazon AI
|
||||
- Tong is an applied scientist in Amazon AI, he is the maintainer of xgboost R package.
|
||||
- Tong is an applied scientist in Amazon AI. He is the maintainer of XGBoost R package.
|
||||
* [Vadim Khotilovich](https://github.com/khotilov)
|
||||
- Vadim contributes many improvements in R and core packages.
|
||||
* [Bing Xu](https://github.com/antinucleon)
|
||||
- Bing is the original creator of xgboost python package and currently the maintainer of [XGBoost.jl](https://github.com/antinucleon/XGBoost.jl).
|
||||
- Bing is the original creator of XGBoost Python package and currently the maintainer of [XGBoost.jl](https://github.com/antinucleon/XGBoost.jl).
|
||||
* [Michael Benesty](https://github.com/pommedeterresautee)
|
||||
- Micheal is a lawyer, data scientist in France, he is the creator of xgboost interactive analysis module in R.
|
||||
* [Yuan Tang](https://github.com/terrytangyuan)
|
||||
- Yuan is a data scientist in Chicago, US. He contributed mostly in R and Python packages.
|
||||
* [Nan Zhu](https://github.com/CodingCat)
|
||||
- Nan is a software engineer in Microsoft. He contributed mostly in JVM packages.
|
||||
* [Sergei Lebedev](https://github.com/superbobry)
|
||||
- Serget is a software engineer in Criteo. He contributed mostly in JVM packages.
|
||||
- Michael is a lawyer and data scientist in France. He is the creator of XGBoost interactive analysis module in R.
|
||||
* [Yuan Tang](https://github.com/terrytangyuan), Ant Financial
|
||||
- Yuan is a software engineer in Ant Financial. He contributed mostly in R and Python packages.
|
||||
* [Nan Zhu](https://github.com/CodingCat), Uber
|
||||
- Nan is a software engineer in Uber. He contributed mostly in JVM packages.
|
||||
* [Sergei Lebedev](https://github.com/superbobry), Criteo
|
||||
- Sergei is a software engineer in Criteo. He contributed mostly in JVM packages.
|
||||
* [Hongliang Liu](https://github.com/phunterlau)
|
||||
* [Scott Lundberg](http://scottlundberg.com/), University of Washington
|
||||
- Scott is a Ph.D. student at University of Washington. He is the creator of SHAP, a unified approach to explain the output of machine learning models such as decision tree ensembles. He also helps maintain the XGBoost Julia package.
|
||||
* [Rory Mitchell](https://github.com/RAMitchell), University of Waikato
|
||||
- Rory is a Ph.D. student at University of Waikato. He is the original creator of the GPU training algorithms. He improved the CMake build system and continuous integration.
|
||||
* [Hyunsu Cho](http://hyunsu-cho.io/), Amazon AI
|
||||
- Hyunsu is an applied scientist in Amazon AI. He is the maintainer of the XGBoost Python package. He also manages the Jenkins continuous integration system (https://xgboost-ci.net/). He is the initial author of the CPU 'hist' updater.
|
||||
* [Jiaming](https://github.com/trivialfis)
|
||||
- Jiaming contributed to the GPU algorithms. He has also introduced new abstractions to improve the quality of the C++ codebase.
|
||||
|
||||
Become a Committer
|
||||
------------------
|
||||
@@ -36,28 +45,25 @@ List of Contributors
|
||||
* [Full List of Contributors](https://github.com/dmlc/xgboost/graphs/contributors)
|
||||
- To contributors: please add your name to the list when you submit a patch to the project:)
|
||||
* [Kailong Chen](https://github.com/kalenhaha)
|
||||
- Kailong is an early contributor of xgboost, he is creator of ranking objectives in xgboost.
|
||||
- Kailong is an early contributor of XGBoost, he is creator of ranking objectives in XGBoost.
|
||||
* [Skipper Seabold](https://github.com/jseabold)
|
||||
- Skipper is the major contributor to the scikit-learn module of xgboost.
|
||||
- Skipper is the major contributor to the scikit-learn module of XGBoost.
|
||||
* [Zygmunt Zając](https://github.com/zygmuntz)
|
||||
- Zygmunt is the master behind the early stopping feature frequently used by kagglers.
|
||||
* [Ajinkya Kale](https://github.com/ajkl)
|
||||
* [Boliang Chen](https://github.com/cblsjtu)
|
||||
* [Yangqing Men](https://github.com/yanqingmen)
|
||||
- Yangqing is the creator of xgboost java package.
|
||||
- Yangqing is the creator of XGBoost java package.
|
||||
* [Engpeng Yao](https://github.com/yepyao)
|
||||
* [Giulio](https://github.com/giuliohome)
|
||||
- Giulio is the creator of windows project of xgboost
|
||||
- Giulio is the creator of Windows project of XGBoost
|
||||
* [Jamie Hall](https://github.com/nerdcha)
|
||||
- Jamie is the initial creator of xgboost sklearn module.
|
||||
- Jamie is the initial creator of XGBoost scikit-learn module.
|
||||
* [Yen-Ying Lee](https://github.com/white1033)
|
||||
* [Masaaki Horikoshi](https://github.com/sinhrks)
|
||||
- Masaaki is the initial creator of xgboost python plotting module.
|
||||
* [Hongliang Liu](https://github.com/phunterlau)
|
||||
* [Hyunsu Cho](http://hyunsu-cho.io/)
|
||||
- Hyunsu is the maintainer of the XGBoost Python package. He is in charge of submitting the Python package to Python Package Index (PyPI). He is also the initial author of the CPU 'hist' updater.
|
||||
- Masaaki is the initial creator of XGBoost Python plotting module.
|
||||
* [daiyl0320](https://github.com/daiyl0320)
|
||||
- daiyl0320 contributed patch to xgboost distributed version more robust, and scales stably on TB scale datasets.
|
||||
- daiyl0320 contributed patch to XGBoost distributed version more robust, and scales stably on TB scale datasets.
|
||||
* [Huayi Zhang](https://github.com/irachex)
|
||||
* [Johan Manders](https://github.com/johanmanders)
|
||||
* [yoori](https://github.com/yoori)
|
||||
@@ -68,8 +74,6 @@ List of Contributors
|
||||
* [Alex Bain](https://github.com/convexquad)
|
||||
* [Baltazar Bieniek](https://github.com/bbieniek)
|
||||
* [Adam Pocock](https://github.com/Craigacp)
|
||||
* [Rory Mitchell](https://github.com/RAMitchell)
|
||||
- Rory is the author of the GPU plugin and also contributed the cmake build system and windows continuous integration
|
||||
* [Gideon Whitehead](https://github.com/gaw89)
|
||||
* [Yi-Lin Juang](https://github.com/frankyjuang)
|
||||
* [Andrew Hannigan](https://github.com/andrewhannigan)
|
||||
@@ -78,3 +82,7 @@ List of Contributors
|
||||
* [Pierre de Sahb](https://github.com/pdesahb)
|
||||
* [liuliang01](https://github.com/liuliang01)
|
||||
- liuliang01 added support for the qid column for LibSVM input format. This makes ranking task easier in distributed setting.
|
||||
* [Andrew Thia](https://github.com/BlueTea88)
|
||||
- Andrew Thia implemented feature interaction constraints
|
||||
* [Wei Tian](https://github.com/weitian)
|
||||
* [Chen Qin] (https://github.com/chenqin)
|
||||
|
||||
48
Jenkinsfile
vendored
48
Jenkinsfile
vendored
@@ -14,6 +14,7 @@ def dockerRun = 'tests/ci_build/ci_build.sh'
|
||||
def utils
|
||||
|
||||
def buildMatrix = [
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "9.2", "multiGpu": true],
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "9.2" ],
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": false, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
|
||||
@@ -67,22 +68,41 @@ def buildPlatformCmake(buildName, conf, nodeReq, dockerTarget) {
|
||||
// Destination dir for artifacts
|
||||
def distDir = "dist/${buildName}"
|
||||
def dockerArgs = ""
|
||||
if(conf["withGpu"]){
|
||||
if (conf["withGpu"]) {
|
||||
dockerArgs = "--build-arg CUDA_VERSION=" + conf["cudaVersion"]
|
||||
}
|
||||
def test_suite = conf["withGpu"] ? (conf["multiGpu"] ? "mgpu" : "gpu") : "cpu"
|
||||
// Build node - this is returned result
|
||||
node(nodeReq) {
|
||||
unstash name: 'srcs'
|
||||
echo """
|
||||
|===== XGBoost CMake build =====
|
||||
| dockerTarget: ${dockerTarget}
|
||||
| cmakeOpts : ${opts}
|
||||
|=========================
|
||||
""".stripMargin('|')
|
||||
// Invoke command inside docker
|
||||
sh """
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/build_via_cmake.sh ${opts}
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/test_${dockerTarget}.sh
|
||||
"""
|
||||
retry(3) {
|
||||
node(nodeReq) {
|
||||
unstash name: 'srcs'
|
||||
echo """
|
||||
|===== XGBoost CMake build =====
|
||||
| dockerTarget: ${dockerTarget}
|
||||
| cmakeOpts : ${opts}
|
||||
|=========================
|
||||
""".stripMargin('|')
|
||||
// Invoke command inside docker
|
||||
sh """
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/build_via_cmake.sh ${opts}
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/test_${test_suite}.sh
|
||||
"""
|
||||
if (!conf["multiGpu"]) {
|
||||
sh """
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} bash -c "cd python-package; rm -f dist/*; python setup.py bdist_wheel --universal"
|
||||
rm -rf "${distDir}"; mkdir -p "${distDir}/py"
|
||||
cp xgboost "${distDir}"
|
||||
cp -r python-package/dist "${distDir}/py"
|
||||
# Test the wheel for compatibility on a barebones CPU container
|
||||
${dockerRun} release ${dockerArgs} bash -c " \
|
||||
pip install --user python-package/dist/xgboost-*-none-any.whl && \
|
||||
python -m nose -v tests/python"
|
||||
# Test the wheel for compatibility on CUDA 10.0 container
|
||||
${dockerRun} gpu --build-arg CUDA_VERSION=10.0 bash -c " \
|
||||
pip install --user python-package/dist/xgboost-*-none-any.whl && \
|
||||
python -m nose -v --eval-attr='(not slow) and (not mgpu)' tests/python-gpu"
|
||||
"""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,6 +13,10 @@ def dockerRun = 'tests/ci_build/ci_build.sh'
|
||||
// Utility functions
|
||||
@Field
|
||||
def utils
|
||||
@Field
|
||||
def commit_id
|
||||
@Field
|
||||
def branch_name
|
||||
|
||||
def buildMatrix = [
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "9.2" ],
|
||||
@@ -42,27 +46,28 @@ pipeline {
|
||||
script {
|
||||
utils = load('tests/ci_build/jenkins_tools.Groovy')
|
||||
utils.checkoutSrcs()
|
||||
commit_id = "${GIT_COMMIT}"
|
||||
branch_name = "${GIT_LOCAL_BRANCH}"
|
||||
}
|
||||
stash name: 'srcs', excludes: '.git/'
|
||||
milestone label: 'Sources ready', ordinal: 1
|
||||
}
|
||||
}
|
||||
stage('Jenkins: Build doc') {
|
||||
agent {
|
||||
label 'linux && cpu && restricted'
|
||||
}
|
||||
steps {
|
||||
unstash name: 'srcs'
|
||||
script {
|
||||
def commit_id = "${GIT_COMMIT}"
|
||||
def branch_name = "${GIT_LOCAL_BRANCH}"
|
||||
echo 'Building doc...'
|
||||
dir ('jvm-packages') {
|
||||
sh "bash ./build_doc.sh ${commit_id}"
|
||||
archiveArtifacts artifacts: "${commit_id}.tar.bz2", allowEmptyArchive: true
|
||||
echo 'Deploying doc...'
|
||||
withAWS(credentials:'xgboost-doc-bucket') {
|
||||
s3Upload file: "${commit_id}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${branch_name}.tar.bz2"
|
||||
retry(3) {
|
||||
node('linux && cpu && restricted') {
|
||||
unstash name: 'srcs'
|
||||
echo 'Building doc...'
|
||||
dir ('jvm-packages') {
|
||||
sh "bash ./build_doc.sh ${commit_id}"
|
||||
archiveArtifacts artifacts: "${commit_id}.tar.bz2", allowEmptyArchive: true
|
||||
echo 'Deploying doc...'
|
||||
withAWS(credentials:'xgboost-doc-bucket') {
|
||||
s3Upload file: "${commit_id}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${branch_name}.tar.bz2"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -94,28 +99,25 @@ def buildPlatformCmake(buildName, conf, nodeReq, dockerTarget) {
|
||||
dockerArgs = "--build-arg CUDA_VERSION=" + conf["cudaVersion"]
|
||||
}
|
||||
// Build node - this is returned result
|
||||
node(nodeReq) {
|
||||
unstash name: 'srcs'
|
||||
echo """
|
||||
|===== XGBoost CMake build =====
|
||||
| dockerTarget: ${dockerTarget}
|
||||
| cmakeOpts : ${opts}
|
||||
|=========================
|
||||
""".stripMargin('|')
|
||||
// Invoke command inside docker
|
||||
sh """
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/build_via_cmake.sh ${opts}
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} bash -c "cd python-package; rm -f dist/*; python setup.py bdist_wheel --universal"
|
||||
rm -rf "${distDir}"; mkdir -p "${distDir}/py"
|
||||
cp xgboost "${distDir}"
|
||||
cp -r lib "${distDir}"
|
||||
cp -r python-package/dist "${distDir}/py"
|
||||
# Test the wheel for compatibility on a barebones CPU container
|
||||
${dockerRun} release ${dockerArgs} bash -c " \
|
||||
auditwheel show xgboost-*-py2-none-any.whl
|
||||
pip install --user python-package/dist/xgboost-*-none-any.whl && \
|
||||
python -m nose tests/python"
|
||||
"""
|
||||
archiveArtifacts artifacts: "${distDir}/**/*.*", allowEmptyArchive: true
|
||||
retry(3) {
|
||||
node(nodeReq) {
|
||||
unstash name: 'srcs'
|
||||
echo """
|
||||
|===== XGBoost CMake build =====
|
||||
| dockerTarget: ${dockerTarget}
|
||||
| cmakeOpts : ${opts}
|
||||
|=========================
|
||||
""".stripMargin('|')
|
||||
// Invoke command inside docker
|
||||
sh """
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/build_via_cmake.sh ${opts}
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} bash -c "cd python-package; rm -f dist/*; python setup.py bdist_wheel --universal"
|
||||
rm -rf "${distDir}"; mkdir -p "${distDir}/py"
|
||||
cp xgboost "${distDir}"
|
||||
cp -r lib "${distDir}"
|
||||
cp -r python-package/dist "${distDir}/py"
|
||||
"""
|
||||
archiveArtifacts artifacts: "${distDir}/**/*.*", allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
208
LICENSE
208
LICENSE
@@ -1,13 +1,201 @@
|
||||
Copyright (c) 2016 by Contributors
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
1. Definitions.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright (c) 2018 by Contributors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
170
NEWS.md
170
NEWS.md
@@ -3,6 +3,172 @@ XGBoost Change Log
|
||||
|
||||
This file records the changes in xgboost library in reverse chronological order.
|
||||
|
||||
## v0.81 (2018.11.04)
|
||||
### New feature: feature interaction constraints
|
||||
* Users are now able to control which features (independent variables) are allowed to interact by specifying feature interaction constraints (#3466).
|
||||
* [Tutorial](https://xgboost.readthedocs.io/en/release_0.81/tutorials/feature_interaction_constraint.html) is available, as well as [R](https://github.com/dmlc/xgboost/blob/9254c58e4dfff6a59dc0829a2ceb02e45ed17cd0/R-package/demo/interaction_constraints.R) and [Python](https://github.com/dmlc/xgboost/blob/9254c58e4dfff6a59dc0829a2ceb02e45ed17cd0/tests/python/test_interaction_constraints.py) examples.
|
||||
|
||||
### New feature: learning to rank using scikit-learn interface
|
||||
* Learning to rank task is now available for the scikit-learn interface of the Python package (#3560, #3848). It is now possible to integrate the XGBoost ranking model into the scikit-learn learning pipeline.
|
||||
* Examples of using `XGBRanker` class is found at [demo/rank/rank_sklearn.py](https://github.com/dmlc/xgboost/blob/24a268a2e3cb17302db3d72da8f04016b7d352d9/demo/rank/rank_sklearn.py).
|
||||
|
||||
### New feature: R interface for SHAP interactions
|
||||
* SHAP (SHapley Additive exPlanations) is a unified approach to explain the output of any machine learning model. Previously, this feature was only available from the Python package; now it is available from the R package as well (#3636).
|
||||
|
||||
### New feature: GPU predictor now use multiple GPUs to predict
|
||||
* GPU predictor is now able to utilize multiple GPUs at once to accelerate prediction (#3738)
|
||||
|
||||
### New feature: Scale distributed XGBoost to large-scale clusters
|
||||
* Fix OS file descriptor limit assertion error on large cluster (#3835, dmlc/rabit#73) by replacing `select()` based AllReduce/Broadcast with `poll()` based implementation.
|
||||
* Mitigate tracker "thundering herd" issue on large cluster. Add exponential backoff retry when workers connect to tracker.
|
||||
* With this change, we were able to scale to 1.5k executors on a 12 billion row dataset after some tweaks here and there.
|
||||
|
||||
### New feature: Additional objective functions for GPUs
|
||||
* New objective functions ported to GPU: `hinge`, `multi:softmax`, `multi:softprob`, `count:poisson`, `reg:gamma`, `"reg:tweedie`.
|
||||
* With supported objectives, XGBoost will select the correct devices based on your system and `n_gpus` parameter.
|
||||
|
||||
### Major bug fix: learning to rank with XGBoost4J-Spark
|
||||
* Previously, `repartitionForData` would shuffle data and lose ordering necessary for ranking task.
|
||||
* To fix this issue, data points within each RDD partition is explicitly group by their group (query session) IDs (#3654). Also handle empty RDD partition carefully (#3750).
|
||||
|
||||
### Major bug fix: early stopping fixed in XGBoost4J-Spark
|
||||
* Earlier implementation of early stopping had incorrect semantics and didn't let users to specify direction for optimizing (maximize / minimize)
|
||||
* A parameter `maximize_evaluation_metrics` is defined so as to tell whether a metric should be maximized or minimized as part of early stopping criteria (#3808). Also early stopping now has correct semantics.
|
||||
|
||||
### API changes
|
||||
* Column sampling by level (`colsample_bylevel`) is now functional for `hist` algorithm (#3635, #3862)
|
||||
* GPU tag `gpu:` for regression objectives are now deprecated. XGBoost will select the correct devices automatically (#3643)
|
||||
* Add `disable_default_eval_metric` parameter to disable default metric (#3606)
|
||||
* Experimental AVX support for gradient computation is removed (#3752)
|
||||
* XGBoost4J-Spark
|
||||
- Add `rank:ndcg` and `rank:map` to supported objectives (#3697)
|
||||
* Python package
|
||||
- Add `callbacks` argument to `fit()` function of sciki-learn API (#3682)
|
||||
- Add `XGBRanker` to scikit-learn interface (#3560, #3848)
|
||||
- Add `validate_features` argument to `predict()` function of scikit-learn API (#3653)
|
||||
- Allow scikit-learn grid search over parameters specified as keyword arguments (#3791)
|
||||
- Add `coef_` and `intercept_` as properties of scikit-learn wrapper (#3855). Some scikit-learn functions expect these properties.
|
||||
|
||||
### Performance improvements
|
||||
* Address very high GPU memory usage for large data (#3635)
|
||||
* Fix performance regression within `EvaluateSplits()` of `gpu_hist` algorithm. (#3680)
|
||||
|
||||
### Bug-fixes
|
||||
* Fix a problem in GPU quantile sketch with tiny instance weights. (#3628)
|
||||
* Fix copy constructor for `HostDeviceVectorImpl` to prevent dangling pointers (#3657)
|
||||
* Fix a bug in partitioned file loading (#3673)
|
||||
* Fixed an uninitialized pointer in `gpu_hist` (#3703)
|
||||
* Reshared data among GPUs when number of GPUs is changed (#3721)
|
||||
* Add back `max_delta_step` to split evaluation (#3668)
|
||||
* Do not round up integer thresholds for integer features in JSON dump (#3717)
|
||||
* Use `dmlc::TemporaryDirectory` to handle temporaries in cross-platform way (#3783)
|
||||
* Fix accuracy problem with `gpu_hist` when `min_child_weight` and `lambda` are set to 0 (#3793)
|
||||
* Make sure that `tree_method` parameter is recognized and not silently ignored (#3849)
|
||||
* XGBoost4J-Spark
|
||||
- Make sure `thresholds` are considered when executing `predict()` method (#3577)
|
||||
- Avoid losing precision when computing probabilities by converting to `Double` early (#3576)
|
||||
- `getTreeLimit()` should return `Int` (#3602)
|
||||
- Fix checkpoint serialization on HDFS (#3614)
|
||||
- Throw `ControlThrowable` instead of `InterruptedException` so that it is properly re-thrown (#3632)
|
||||
- Remove extraneous output to stdout (#3665)
|
||||
- Allow specification of task type for custom objectives and evaluations (#3646)
|
||||
- Fix distributed updater check (#3739)
|
||||
- Fix issue when spark job execution thread cannot return before we execute `first()` (#3758)
|
||||
* Python package
|
||||
- Fix accessing `DMatrix.handle` before it is set (#3599)
|
||||
- `XGBClassifier.predict()` should return margin scores when `output_margin` is set to true (#3651)
|
||||
- Early stopping callback should maximize metric of form `NDCG@n-` (#3685)
|
||||
- Preserve feature names when slicing `DMatrix` (#3766)
|
||||
* R package
|
||||
- Replace `nround` with `nrounds` to match actual parameter (#3592)
|
||||
- Amend `xgb.createFolds` to handle classes of a single element (#3630)
|
||||
- Fix buggy random generator and make `colsample_bytree` functional (#3781)
|
||||
|
||||
### Maintenance: testing, continuous integration, build system
|
||||
* Add sanitizers tests to Travis CI (#3557)
|
||||
* Add NumPy, Matplotlib, Graphviz as requirements for doc build (#3669)
|
||||
* Comply with CRAN submission policy (#3660, #3728)
|
||||
* Remove copy-paste error in JVM test suite (#3692)
|
||||
* Disable flaky tests in `R-package/tests/testthat/test_update.R` (#3723)
|
||||
* Make Python tests compatible with scikit-learn 0.20 release (#3731)
|
||||
* Separate out restricted and unrestricted tasks, so that pull requests don't build downloadable artifacts (#3736)
|
||||
* Add multi-GPU unit test environment (#3741)
|
||||
* Allow plug-ins to be built by CMake (#3752)
|
||||
* Test wheel compatibility on CPU containers for pull requests (#3762)
|
||||
* Fix broken doc build due to Matplotlib 3.0 release (#3764)
|
||||
* Produce `xgboost.so` for XGBoost-R on Mac OSX, so that `make install` works (#3767)
|
||||
* Retry Jenkins CI tests up to 3 times to improve reliability (#3769, #3769, #3775, #3776, #3777)
|
||||
* Add basic unit tests for `gpu_hist` algorithm (#3785)
|
||||
* Fix Python environment for distributed unit tests (#3806)
|
||||
* Test wheels on CUDA 10.0 container for compatibility (#3838)
|
||||
* Fix JVM doc build (#3853)
|
||||
|
||||
### Maintenance: Refactor C++ code for legibility and maintainability
|
||||
* Merge generic device helper functions into `GPUSet` class (#3626)
|
||||
* Re-factor column sampling logic into `ColumnSampler` class (#3635, #3637)
|
||||
* Replace `std::vector` with `HostDeviceVector` in `MetaInfo` and `SparsePage` (#3446)
|
||||
* Simplify `DMatrix` class (#3395)
|
||||
* De-duplicate CPU/GPU code using `Transform` class (#3643, #3751)
|
||||
* Remove obsoleted `QuantileHistMaker` class (#3761)
|
||||
* Remove obsoleted `NoConstraint` class (#3792)
|
||||
|
||||
### Other Features
|
||||
* C++20-compliant Span class for safe pointer indexing (#3548, #3588)
|
||||
* Add helper functions to manipulate multiple GPU devices (#3693)
|
||||
* XGBoost4J-Spark
|
||||
- Allow specifying host ip from the `xgboost-tracker.properties file` (#3833). This comes in handy when `hosts` files doesn't correctly define localhost.
|
||||
|
||||
### Usability Improvements
|
||||
* Add reference to GitHub repository in `pom.xml` of JVM packages (#3589)
|
||||
* Add R demo of multi-class classification (#3695)
|
||||
* Document JSON dump functionality (#3600, #3603)
|
||||
* Document CUDA requirement and lack of external memory for GPU algorithms (#3624)
|
||||
* Document LambdaMART objectives, both pairwise and listwise (#3672)
|
||||
* Document `aucpr` evaluation metric (#3687)
|
||||
* Document gblinear parameters: `feature_selector` and `top_k` (#3780)
|
||||
* Add instructions for using MinGW-built XGBoost with Python. (#3774)
|
||||
* Removed nonexistent parameter `use_buffer` from documentation (#3610)
|
||||
* Update Python API doc to include all classes and members (#3619, #3682)
|
||||
* Fix typos and broken links in documentation (#3618, #3640, #3676, #3713, #3759, #3784, #3843, #3852)
|
||||
* Binary classification demo should produce LIBSVM with 0-based indexing (#3652)
|
||||
* Process data once for Python and CLI examples of learning to rank (#3666)
|
||||
* Include full text of Apache 2.0 license in the repository (#3698)
|
||||
* Save predictor parameters in model file (#3856)
|
||||
* JVM packages
|
||||
- Let users specify feature names when calling `getModelDump` and `getFeatureScore` (#3733)
|
||||
- Warn the user about the lack of over-the-wire encryption (#3667)
|
||||
- Fix errors in examples (#3719)
|
||||
- Document choice of trackers (#3831)
|
||||
- Document that vanilla Apache Spark is required (#3854)
|
||||
* Python package
|
||||
- Document that custom objective can't contain colon (:) (#3601)
|
||||
- Show a better error message for failed library loading (#3690)
|
||||
- Document that feature importance is unavailable for non-tree learners (#3765)
|
||||
- Document behavior of `get_fscore()` for zero-importance features (#3763)
|
||||
- Recommend pickling as the way to save `XGBClassifier` / `XGBRegressor` / `XGBRanker` (#3829)
|
||||
* R package
|
||||
- Enlarge variable importance plot to make it more visible (#3820)
|
||||
|
||||
### BREAKING CHANGES
|
||||
* External memory page files have changed, breaking backwards compatibility for temporary storage used during external memory training. This only affects external memory users upgrading their xgboost version - we recommend clearing all `*.page` files before resuming training. Model serialization is unaffected.
|
||||
|
||||
### Known issues
|
||||
* Quantile sketcher fails to produce any quantile for some edge cases (#2943)
|
||||
* The `hist` algorithm leaks memory when used with learning rate decay callback (#3579)
|
||||
* Using custom evaluation funciton together with early stopping causes assertion failure in XGBoost4J-Spark (#3595)
|
||||
* Early stopping doesn't work with `gblinear` learner (#3789)
|
||||
* Label and weight vectors are not reshared upon the change in number of GPUs (#3794). To get around this issue, delete the `DMatrix` object and re-load.
|
||||
* The `DMatrix` Python objects are initialized with incorrect values when given array slices (#3841)
|
||||
* The `gpu_id` parameter is broken and not yet properly supported (#3850)
|
||||
|
||||
### Acknowledgement
|
||||
**Contributors** (in no particular order): Hyunsu Cho (@hcho3), Jiaming Yuan (@trivialfis), Nan Zhu (@CodingCat), Rory Mitchell (@RAMitchell), Andy Adinets (@canonizer), Vadim Khotilovich (@khotilov), Sergei Lebedev (@superbobry)
|
||||
|
||||
**First-time Contributors** (in no particular order): Matthew Tovbin (@tovbinm), Jakob Richter (@jakob-r), Grace Lam (@grace-lam), Grant W Schneider (@grantschneider), Andrew Thia (@BlueTea88), Sergei Chipiga (@schipiga), Joseph Bradley (@jkbradley), Chen Qin (@chenqin), Jerry Lin (@linjer), Dmitriy Rybalko (@rdtft), Michael Mui (@mmui), Takahiro Kojima (@515hikaru), Bruce Zhao (@BruceZhaoR), Wei Tian (@weitian), Saumya Bhatnagar (@Sam1301), Juzer Shakir (@JuzerShakir), Zhao Hang (@cleghom), Jonathan Friedman (@jontonsoup), Bruno Tremblay (@meztez), Boris Filippov (@frenzykryger), @Shiki-H, @mrgutkun, @gorogm, @htgeis, @jakehoare, @zengxy, @KOLANICH
|
||||
|
||||
**First-time Reviewers** (in no particular order): Nikita Titov (@StrikerRUS), Xiangrui Meng (@mengxr), Nirmal Borah (@Nirmal-Neel)
|
||||
|
||||
|
||||
## v0.80 (2018.08.13)
|
||||
* **JVM packages received a major upgrade**: To consolidate the APIs and improve the user experience, we refactored the design of XGBoost4J-Spark in a significant manner. (#3387)
|
||||
- Consolidated APIs: It is now much easier to integrate XGBoost models into a Spark ML pipeline. Users can control behaviors like output leaf prediction results by setting corresponding column names. Training is now more consistent with other Estimators in Spark MLLIB: there is now one single method `fit()` to train decision trees.
|
||||
@@ -173,7 +339,7 @@ This version is only applicable for the Python package. The content is identical
|
||||
- Compatibility fix for Python 2.6
|
||||
- Call `print_evaluation` callback at last iteration
|
||||
- Use appropriate integer types when calling native code, to prevent truncation and memory error
|
||||
- Fix shared library loading on Mac OS X
|
||||
- Fix shared library loading on Mac OS X
|
||||
* R package:
|
||||
- New parameters:
|
||||
- `silent` in `xgb.DMatrix()`
|
||||
@@ -214,7 +380,7 @@ This version is only applicable for the Python package. The content is identical
|
||||
- Support instance weights
|
||||
- Use `SparkParallelismTracker` to prevent jobs from hanging forever
|
||||
- Expose train-time evaluation metrics via `XGBoostModel.summary`
|
||||
- Option to specify `host-ip` explicitly in the Rabit tracker
|
||||
- Option to specify `host-ip` explicitly in the Rabit tracker
|
||||
* Documentation
|
||||
- Better math notation for gradient boosting
|
||||
- Updated build instructions for Mac OS X
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
Package: xgboost
|
||||
Type: Package
|
||||
Title: Extreme Gradient Boosting
|
||||
Version: 0.80.1
|
||||
Version: 0.81.0.1
|
||||
Date: 2018-08-13
|
||||
Authors@R: c(
|
||||
person("Tianqi", "Chen", role = c("aut"),
|
||||
@@ -61,5 +61,5 @@ Imports:
|
||||
data.table (>= 1.9.6),
|
||||
magrittr (>= 1.5),
|
||||
stringi (>= 0.5.2)
|
||||
RoxygenNote: 6.0.1
|
||||
RoxygenNote: 6.1.0
|
||||
SystemRequirements: GNU make, C++11
|
||||
|
||||
@@ -74,6 +74,19 @@ check.booster.params <- function(params, ...) {
|
||||
params[['monotone_constraints']] = vec2str
|
||||
}
|
||||
|
||||
# interaction constraints parser (convert from list of column indices to string)
|
||||
if (!is.null(params[['interaction_constraints']]) &&
|
||||
typeof(params[['interaction_constraints']]) != "character"){
|
||||
# check input class
|
||||
if (class(params[['interaction_constraints']]) != 'list') stop('interaction_constraints should be class list')
|
||||
if (!all(unique(sapply(params[['interaction_constraints']], class)) %in% c('numeric','integer'))) {
|
||||
stop('interaction_constraints should be a list of numeric/integer vectors')
|
||||
}
|
||||
|
||||
# recast parameter as string
|
||||
interaction_constraints <- sapply(params[['interaction_constraints']], function(x) paste0('[', paste(x, collapse=','), ']'))
|
||||
params[['interaction_constraints']] <- paste0('[', paste(interaction_constraints, collapse=','), ']')
|
||||
}
|
||||
return(params)
|
||||
}
|
||||
|
||||
@@ -262,7 +275,8 @@ xgb.createFolds <- function(y, k = 10)
|
||||
## add enough random integers to get length(seqVector) == numInClass[i]
|
||||
if (numInClass[i] %% k > 0) seqVector <- c(seqVector, sample.int(k, numInClass[i] %% k))
|
||||
## shuffle the integers for fold assignment and assign to this classes's data
|
||||
foldVector[y == dimnames(numInClass)$y[i]] <- sample(seqVector)
|
||||
## seqVector[sample.int(length(seqVector))] is used to handle length(seqVector) == 1
|
||||
foldVector[y == dimnames(numInClass)$y[i]] <- seqVector[sample.int(length(seqVector))]
|
||||
}
|
||||
} else {
|
||||
foldVector <- seq(along = y)
|
||||
|
||||
@@ -129,11 +129,13 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#' logistic regression would result in predictions for log-odds instead of probabilities.
|
||||
#' @param ntreelimit limit the number of model's trees or boosting iterations used in prediction (see Details).
|
||||
#' It will use all the trees by default (\code{NULL} value).
|
||||
#' @param predleaf whether predict leaf index instead.
|
||||
#' @param predcontrib whether to return feature contributions to individual predictions instead (see Details).
|
||||
#' @param predleaf whether predict leaf index.
|
||||
#' @param predcontrib whether to return feature contributions to individual predictions (see Details).
|
||||
#' @param approxcontrib whether to use a fast approximation for feature contributions (see Details).
|
||||
#' @param predinteraction whether to return contributions of feature interactions to individual predictions (see Details).
|
||||
#' @param reshape whether to reshape the vector of predictions to a matrix form when there are several
|
||||
#' prediction outputs per case. This option has no effect when \code{predleaf = TRUE}.
|
||||
#' prediction outputs per case. This option has no effect when either of predleaf, predcontrib,
|
||||
#' or predinteraction flags is TRUE.
|
||||
#' @param ... Parameters passed to \code{predict.xgb.Booster}
|
||||
#'
|
||||
#' @details
|
||||
@@ -158,6 +160,11 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#' Setting \code{approxcontrib = TRUE} approximates these values following the idea explained
|
||||
#' in \url{http://blog.datadive.net/interpreting-random-forests/}.
|
||||
#'
|
||||
#' With \code{predinteraction = TRUE}, SHAP values of contributions of interaction of each pair of features
|
||||
#' are computed. Note that this operation might be rather expensive in terms of compute and memory.
|
||||
#' Since it quadratically depends on the number of features, it is recommended to perfom selection
|
||||
#' of the most important features first. See below about the format of the returned results.
|
||||
#'
|
||||
#' @return
|
||||
#' For regression or binary classification, it returns a vector of length \code{nrows(newdata)}.
|
||||
#' For multiclass classification, either a \code{num_class * nrows(newdata)} vector or
|
||||
@@ -173,6 +180,14 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#' such a matrix. The contribution values are on the scale of untransformed margin
|
||||
#' (e.g., for binary classification would mean that the contributions are log-odds deviations from bias).
|
||||
#'
|
||||
#' When \code{predinteraction = TRUE} and it is not a multiclass setting, the output is a 3d array with
|
||||
#' dimensions \code{c(nrow, num_features + 1, num_features + 1)}. The off-diagonal (in the last two dimensions)
|
||||
#' elements represent different features interaction contributions. The array is symmetric WRT the last
|
||||
#' two dimensions. The "+ 1" columns corresponds to bias. Summing this array along the last dimension should
|
||||
#' produce practically the same result as predict with \code{predcontrib = TRUE}.
|
||||
#' For a multiclass case, a list of \code{num_class} elements is returned, where each element is
|
||||
#' such an array.
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{xgb.train}}.
|
||||
#'
|
||||
@@ -269,7 +284,8 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#' @rdname predict.xgb.Booster
|
||||
#' @export
|
||||
predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FALSE, ntreelimit = NULL,
|
||||
predleaf = FALSE, predcontrib = FALSE, approxcontrib = FALSE, reshape = FALSE, ...) {
|
||||
predleaf = FALSE, predcontrib = FALSE, approxcontrib = FALSE, predinteraction = FALSE,
|
||||
reshape = FALSE, ...) {
|
||||
|
||||
object <- xgb.Booster.complete(object, saveraw = FALSE)
|
||||
if (!inherits(newdata, "xgb.DMatrix"))
|
||||
@@ -285,7 +301,8 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
|
||||
if (ntreelimit < 0)
|
||||
stop("ntreelimit cannot be negative")
|
||||
|
||||
option <- 0L + 1L * as.logical(outputmargin) + 2L * as.logical(predleaf) + 4L * as.logical(predcontrib) + 8L * as.logical(approxcontrib)
|
||||
option <- 0L + 1L * as.logical(outputmargin) + 2L * as.logical(predleaf) + 4L * as.logical(predcontrib) +
|
||||
8L * as.logical(approxcontrib) + 16L * as.logical(predinteraction)
|
||||
|
||||
ret <- .Call(XGBoosterPredict_R, object$handle, newdata, option[1], as.integer(ntreelimit))
|
||||
|
||||
@@ -305,17 +322,28 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
|
||||
} else if (predcontrib) {
|
||||
n_col1 <- ncol(newdata) + 1
|
||||
n_group <- npred_per_case / n_col1
|
||||
dnames <- if (!is.null(colnames(newdata))) list(NULL, c(colnames(newdata), "BIAS")) else NULL
|
||||
cnames <- if (!is.null(colnames(newdata))) c(colnames(newdata), "BIAS") else NULL
|
||||
ret <- if (n_ret == n_row) {
|
||||
matrix(ret, ncol = 1, dimnames = dnames)
|
||||
matrix(ret, ncol = 1, dimnames = list(NULL, cnames))
|
||||
} else if (n_group == 1) {
|
||||
matrix(ret, nrow = n_row, byrow = TRUE, dimnames = dnames)
|
||||
matrix(ret, nrow = n_row, byrow = TRUE, dimnames = list(NULL, cnames))
|
||||
} else {
|
||||
grp_mask <- rep(seq_len(n_col1), n_row) +
|
||||
rep((seq_len(n_row) - 1) * n_col1 * n_group, each = n_col1)
|
||||
lapply(seq_len(n_group), function(g) {
|
||||
matrix(ret[grp_mask + n_col1 * (g - 1)], nrow = n_row, byrow = TRUE, dimnames = dnames)
|
||||
})
|
||||
arr <- array(ret, c(n_col1, n_group, n_row),
|
||||
dimnames = list(cnames, NULL, NULL)) %>% aperm(c(2,3,1)) # [group, row, col]
|
||||
lapply(seq_len(n_group), function(g) arr[g,,])
|
||||
}
|
||||
} else if (predinteraction) {
|
||||
n_col1 <- ncol(newdata) + 1
|
||||
n_group <- npred_per_case / n_col1^2
|
||||
cnames <- if (!is.null(colnames(newdata))) c(colnames(newdata), "BIAS") else NULL
|
||||
ret <- if (n_ret == n_row) {
|
||||
matrix(ret, ncol = 1, dimnames = list(NULL, cnames))
|
||||
} else if (n_group == 1) {
|
||||
array(ret, c(n_col1, n_col1, n_row), dimnames = list(cnames, cnames, NULL)) %>% aperm(c(3,1,2))
|
||||
} else {
|
||||
arr <- array(ret, c(n_col1, n_col1, n_group, n_row),
|
||||
dimnames = list(cnames, cnames, NULL, NULL)) %>% aperm(c(3,4,1,2)) # [group, row, col1, col2]
|
||||
lapply(seq_len(n_group), function(g) arr[g,,,])
|
||||
}
|
||||
} else if (reshape && npred_per_case > 1) {
|
||||
ret <- matrix(ret, nrow = n_row, byrow = TRUE)
|
||||
|
||||
@@ -22,7 +22,7 @@ xgb.ggplot.importance <- function(importance_matrix = NULL, top_n = NULL, measur
|
||||
|
||||
plot <-
|
||||
ggplot2::ggplot(importance_matrix,
|
||||
ggplot2::aes(x = factor(Feature, levels = rev(Feature)), y = Importance, width = 0.05),
|
||||
ggplot2::aes(x = factor(Feature, levels = rev(Feature)), y = Importance, width = 0.5),
|
||||
environment = environment()) +
|
||||
ggplot2::geom_bar(ggplot2::aes(fill = Cluster), stat = "identity", position = "identity") +
|
||||
ggplot2::coord_flip() +
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#' \item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
|
||||
#' \item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
|
||||
#' \item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
|
||||
#' \item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
|
||||
#' }
|
||||
#'
|
||||
#' 2.2. Parameter for Linear Booster
|
||||
|
||||
@@ -11,4 +11,5 @@ early_stopping Early Stop in training
|
||||
poisson_regression Poisson Regression on count data
|
||||
tweedie_regression Tweddie Regression
|
||||
gpu_accelerated GPU-accelerated tree building algorithms
|
||||
interaction_constraints Interaction constraints among features
|
||||
|
||||
|
||||
105
R-package/demo/interaction_constraints.R
Normal file
105
R-package/demo/interaction_constraints.R
Normal file
@@ -0,0 +1,105 @@
|
||||
library(xgboost)
|
||||
library(data.table)
|
||||
|
||||
set.seed(1024)
|
||||
|
||||
# Function to obtain a list of interactions fitted in trees, requires input of maximum depth
|
||||
treeInteractions <- function(input_tree, input_max_depth){
|
||||
trees <- copy(input_tree) # copy tree input to prevent overwriting
|
||||
if (input_max_depth < 2) return(list()) # no interactions if max depth < 2
|
||||
if (nrow(input_tree) == 1) return(list())
|
||||
|
||||
# Attach parent nodes
|
||||
for (i in 2:input_max_depth){
|
||||
if (i == 2) trees[, ID_merge:=ID] else trees[, ID_merge:=get(paste0('parent_',i-2))]
|
||||
parents_left <- trees[!is.na(Split), list(i.id=ID, i.feature=Feature, ID_merge=Yes)]
|
||||
parents_right <- trees[!is.na(Split), list(i.id=ID, i.feature=Feature, ID_merge=No)]
|
||||
|
||||
setorderv(trees, 'ID_merge')
|
||||
setorderv(parents_left, 'ID_merge')
|
||||
setorderv(parents_right, 'ID_merge')
|
||||
|
||||
trees <- merge(trees, parents_left, by='ID_merge', all.x=T)
|
||||
trees[!is.na(i.id), c(paste0('parent_', i-1), paste0('parent_feat_', i-1)):=list(i.id, i.feature)]
|
||||
trees[, c('i.id','i.feature'):=NULL]
|
||||
|
||||
trees <- merge(trees, parents_right, by='ID_merge', all.x=T)
|
||||
trees[!is.na(i.id), c(paste0('parent_', i-1), paste0('parent_feat_', i-1)):=list(i.id, i.feature)]
|
||||
trees[, c('i.id','i.feature'):=NULL]
|
||||
}
|
||||
|
||||
# Extract nodes with interactions
|
||||
interaction_trees <- trees[!is.na(Split) & !is.na(parent_1),
|
||||
c('Feature',paste0('parent_feat_',1:(input_max_depth-1))), with=F]
|
||||
interaction_trees_split <- split(interaction_trees, 1:nrow(interaction_trees))
|
||||
interaction_list <- lapply(interaction_trees_split, as.character)
|
||||
|
||||
# Remove NAs (no parent interaction)
|
||||
interaction_list <- lapply(interaction_list, function(x) x[!is.na(x)])
|
||||
|
||||
# Remove non-interactions (same variable)
|
||||
interaction_list <- lapply(interaction_list, unique) # remove same variables
|
||||
interaction_length <- sapply(interaction_list, length)
|
||||
interaction_list <- interaction_list[interaction_length > 1]
|
||||
interaction_list <- unique(lapply(interaction_list, sort))
|
||||
return(interaction_list)
|
||||
}
|
||||
|
||||
# Generate sample data
|
||||
x <- list()
|
||||
for (i in 1:10){
|
||||
x[[i]] = i*rnorm(1000, 10)
|
||||
}
|
||||
x <- as.data.table(x)
|
||||
|
||||
y = -1*x[, rowSums(.SD)] + x[['V1']]*x[['V2']] + x[['V3']]*x[['V4']]*x[['V5']] + rnorm(1000, 0.001) + 3*sin(x[['V7']])
|
||||
|
||||
train = as.matrix(x)
|
||||
|
||||
# Interaction constraint list (column names form)
|
||||
interaction_list <- list(c('V1','V2'),c('V3','V4','V5'))
|
||||
|
||||
# Convert interaction constraint list into feature index form
|
||||
cols2ids <- function(object, col_names) {
|
||||
LUT <- seq_along(col_names) - 1
|
||||
names(LUT) <- col_names
|
||||
rapply(object, function(x) LUT[x], classes="character", how="replace")
|
||||
}
|
||||
interaction_list_fid = cols2ids(interaction_list, colnames(train))
|
||||
|
||||
# Fit model with interaction constraints
|
||||
bst = xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000,
|
||||
interaction_constraints = interaction_list_fid)
|
||||
|
||||
bst_tree <- xgb.model.dt.tree(colnames(train), bst)
|
||||
bst_interactions <- treeInteractions(bst_tree, 4) # interactions constrained to combinations of V1*V2 and V3*V4*V5
|
||||
|
||||
# Fit model without interaction constraints
|
||||
bst2 = xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000)
|
||||
|
||||
bst2_tree <- xgb.model.dt.tree(colnames(train), bst2)
|
||||
bst2_interactions <- treeInteractions(bst2_tree, 4) # much more interactions
|
||||
|
||||
# Fit model with both interaction and monotonicity constraints
|
||||
bst3 = xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000,
|
||||
interaction_constraints = interaction_list_fid,
|
||||
monotone_constraints = c(-1,0,0,0,0,0,0,0,0,0))
|
||||
|
||||
bst3_tree <- xgb.model.dt.tree(colnames(train), bst3)
|
||||
bst3_interactions <- treeInteractions(bst3_tree, 4) # interactions still constrained to combinations of V1*V2 and V3*V4*V5
|
||||
|
||||
# Show monotonic constraints still apply by checking scores after incrementing V1
|
||||
x1 <- sort(unique(x[['V1']]))
|
||||
for (i in 1:length(x1)){
|
||||
testdata <- copy(x[, -c('V1')])
|
||||
testdata[['V1']] <- x1[i]
|
||||
testdata <- testdata[, paste0('V',1:10), with=F]
|
||||
pred <- predict(bst3, as.matrix(testdata))
|
||||
|
||||
# Should not print out anything due to monotonic constraints
|
||||
if (i > 1) if (any(pred > prev_pred)) print(i)
|
||||
prev_pred <- pred
|
||||
}
|
||||
@@ -7,7 +7,8 @@
|
||||
\usage{
|
||||
\method{predict}{xgb.Booster}(object, newdata, missing = NA,
|
||||
outputmargin = FALSE, ntreelimit = NULL, predleaf = FALSE,
|
||||
predcontrib = FALSE, approxcontrib = FALSE, reshape = FALSE, ...)
|
||||
predcontrib = FALSE, approxcontrib = FALSE,
|
||||
predinteraction = FALSE, reshape = FALSE, ...)
|
||||
|
||||
\method{predict}{xgb.Booster.handle}(object, ...)
|
||||
}
|
||||
@@ -26,14 +27,17 @@ logistic regression would result in predictions for log-odds instead of probabil
|
||||
\item{ntreelimit}{limit the number of model's trees or boosting iterations used in prediction (see Details).
|
||||
It will use all the trees by default (\code{NULL} value).}
|
||||
|
||||
\item{predleaf}{whether predict leaf index instead.}
|
||||
\item{predleaf}{whether predict leaf index.}
|
||||
|
||||
\item{predcontrib}{whether to return feature contributions to individual predictions instead (see Details).}
|
||||
\item{predcontrib}{whether to return feature contributions to individual predictions (see Details).}
|
||||
|
||||
\item{approxcontrib}{whether to use a fast approximation for feature contributions (see Details).}
|
||||
|
||||
\item{predinteraction}{whether to return contributions of feature interactions to individual predictions (see Details).}
|
||||
|
||||
\item{reshape}{whether to reshape the vector of predictions to a matrix form when there are several
|
||||
prediction outputs per case. This option has no effect when \code{predleaf = TRUE}.}
|
||||
prediction outputs per case. This option has no effect when either of predleaf, predcontrib,
|
||||
or predinteraction flags is TRUE.}
|
||||
|
||||
\item{...}{Parameters passed to \code{predict.xgb.Booster}}
|
||||
}
|
||||
@@ -51,6 +55,14 @@ When \code{predcontrib = TRUE} and it is not a multiclass setting, the output is
|
||||
For a multiclass case, a list of \code{num_class} elements is returned, where each element is
|
||||
such a matrix. The contribution values are on the scale of untransformed margin
|
||||
(e.g., for binary classification would mean that the contributions are log-odds deviations from bias).
|
||||
|
||||
When \code{predinteraction = TRUE} and it is not a multiclass setting, the output is a 3d array with
|
||||
dimensions \code{c(nrow, num_features + 1, num_features + 1)}. The off-diagonal (in the last two dimensions)
|
||||
elements represent different features interaction contributions. The array is symmetric WRT the last
|
||||
two dimensions. The "+ 1" columns corresponds to bias. Summing this array along the last dimension should
|
||||
produce practically the same result as predict with \code{predcontrib = TRUE}.
|
||||
For a multiclass case, a list of \code{num_class} elements is returned, where each element is
|
||||
such an array.
|
||||
}
|
||||
\description{
|
||||
Predicted values based on either xgboost model or model handle object.
|
||||
@@ -76,6 +88,11 @@ values (Lundberg 2017) that sum to the difference between the expected output
|
||||
of the model and the current prediction (where the hessian weights are used to compute the expectations).
|
||||
Setting \code{approxcontrib = TRUE} approximates these values following the idea explained
|
||||
in \url{http://blog.datadive.net/interpreting-random-forests/}.
|
||||
|
||||
With \code{predinteraction = TRUE}, SHAP values of contributions of interaction of each pair of features
|
||||
are computed. Note that this operation might be rather expensive in terms of compute and memory.
|
||||
Since it quadratically depends on the number of features, it is recommended to perfom selection
|
||||
of the most important features first. See below about the format of the returned results.
|
||||
}
|
||||
\examples{
|
||||
## binary classification:
|
||||
|
||||
@@ -4,11 +4,12 @@
|
||||
\alias{xgb.cv}
|
||||
\title{Cross Validation}
|
||||
\usage{
|
||||
xgb.cv(params = list(), data, nrounds, nfold, label = NULL, missing = NA,
|
||||
prediction = FALSE, showsd = TRUE, metrics = list(), obj = NULL,
|
||||
feval = NULL, stratified = TRUE, folds = NULL, verbose = TRUE,
|
||||
print_every_n = 1L, early_stopping_rounds = NULL, maximize = NULL,
|
||||
callbacks = list(), ...)
|
||||
xgb.cv(params = list(), data, nrounds, nfold, label = NULL,
|
||||
missing = NA, prediction = FALSE, showsd = TRUE,
|
||||
metrics = list(), obj = NULL, feval = NULL, stratified = TRUE,
|
||||
folds = NULL, verbose = TRUE, print_every_n = 1L,
|
||||
early_stopping_rounds = NULL, maximize = NULL, callbacks = list(),
|
||||
...)
|
||||
}
|
||||
\arguments{
|
||||
\item{params}{the list of parameters. Commonly used ones are:
|
||||
|
||||
@@ -44,8 +44,8 @@ test <- agaricus.test
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
# save the model in file 'xgb.model.dump'
|
||||
dump.path = file.path(tempdir(), 'model.dump')
|
||||
xgb.dump(bst, dump.path, with_stats = TRUE)
|
||||
dump_path = file.path(tempdir(), 'model.dump')
|
||||
xgb.dump(bst, dump_path, with_stats = TRUE)
|
||||
|
||||
# print the model without saving it to a file
|
||||
print(xgb.dump(bst, with_stats = TRUE))
|
||||
|
||||
@@ -5,11 +5,11 @@
|
||||
\alias{xgb.plot.deepness}
|
||||
\title{Plot model trees deepness}
|
||||
\usage{
|
||||
xgb.ggplot.deepness(model = NULL, which = c("2x1", "max.depth", "med.depth",
|
||||
"med.weight"))
|
||||
xgb.ggplot.deepness(model = NULL, which = c("2x1", "max.depth",
|
||||
"med.depth", "med.weight"))
|
||||
|
||||
xgb.plot.deepness(model = NULL, which = c("2x1", "max.depth", "med.depth",
|
||||
"med.weight"), plot = TRUE, ...)
|
||||
xgb.plot.deepness(model = NULL, which = c("2x1", "max.depth",
|
||||
"med.depth", "med.weight"), plot = TRUE, ...)
|
||||
}
|
||||
\arguments{
|
||||
\item{model}{either an \code{xgb.Booster} model generated by the \code{xgb.train} function
|
||||
|
||||
@@ -9,8 +9,8 @@ xgb.ggplot.importance(importance_matrix = NULL, top_n = NULL,
|
||||
measure = NULL, rel_to_first = FALSE, n_clusters = c(1:10), ...)
|
||||
|
||||
xgb.plot.importance(importance_matrix = NULL, top_n = NULL,
|
||||
measure = NULL, rel_to_first = FALSE, left_margin = 10, cex = NULL,
|
||||
plot = TRUE, ...)
|
||||
measure = NULL, rel_to_first = FALSE, left_margin = 10,
|
||||
cex = NULL, plot = TRUE, ...)
|
||||
}
|
||||
\arguments{
|
||||
\item{importance_matrix}{a \code{data.table} returned by \code{\link{xgb.importance}}.}
|
||||
|
||||
@@ -6,8 +6,8 @@
|
||||
\usage{
|
||||
xgb.plot.shap(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
model = NULL, trees = NULL, target_class = NULL,
|
||||
approxcontrib = FALSE, subsample = NULL, n_col = 1, col = rgb(0, 0, 1,
|
||||
0.2), pch = ".", discrete_n_uniq = 5, discrete_jitter = 0.01,
|
||||
approxcontrib = FALSE, subsample = NULL, n_col = 1, col = rgb(0,
|
||||
0, 1, 0.2), pch = ".", discrete_n_uniq = 5, discrete_jitter = 0.01,
|
||||
ylab = "SHAP", plot_NA = TRUE, col_NA = rgb(0.7, 0, 1, 0.6),
|
||||
pch_NA = ".", pos_NA = 1.07, plot_loess = TRUE, col_loess = 2,
|
||||
span_loess = 0.5, which = c("1d", "2d"), plot = TRUE, ...)
|
||||
|
||||
@@ -5,15 +5,17 @@
|
||||
\alias{xgboost}
|
||||
\title{eXtreme Gradient Boosting Training}
|
||||
\usage{
|
||||
xgb.train(params = list(), data, nrounds, watchlist = list(), obj = NULL,
|
||||
feval = NULL, verbose = 1, print_every_n = 1L,
|
||||
xgb.train(params = list(), data, nrounds, watchlist = list(),
|
||||
obj = NULL, feval = NULL, verbose = 1, print_every_n = 1L,
|
||||
early_stopping_rounds = NULL, maximize = NULL, save_period = NULL,
|
||||
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(), ...)
|
||||
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(),
|
||||
...)
|
||||
|
||||
xgboost(data = NULL, label = NULL, missing = NA, weight = NULL,
|
||||
params = list(), nrounds, verbose = 1, print_every_n = 1L,
|
||||
early_stopping_rounds = NULL, maximize = NULL, save_period = NULL,
|
||||
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(), ...)
|
||||
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(),
|
||||
...)
|
||||
}
|
||||
\arguments{
|
||||
\item{params}{the list of parameters.
|
||||
|
||||
@@ -223,3 +223,42 @@ test_that("train and predict with non-strict classes", {
|
||||
expect_error(pr <- predict(bst, train_dense), regexp = NA)
|
||||
expect_equal(pr0, pr)
|
||||
})
|
||||
|
||||
test_that("max_delta_step works", {
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
watchlist <- list(train = dtrain)
|
||||
param <- list(objective = "binary:logistic", eval_metric="logloss", max_depth = 2, nthread = 2, eta = 0.5)
|
||||
nrounds = 5
|
||||
# model with no restriction on max_delta_step
|
||||
bst1 <- xgb.train(param, dtrain, nrounds, watchlist, verbose = 1)
|
||||
# model with restricted max_delta_step
|
||||
bst2 <- xgb.train(param, dtrain, nrounds, watchlist, verbose = 1, max_delta_step = 1)
|
||||
# the no-restriction model is expected to have consistently lower loss during the initial interations
|
||||
expect_true(all(bst1$evaluation_log$train_logloss < bst2$evaluation_log$train_logloss))
|
||||
expect_lt(mean(bst1$evaluation_log$train_logloss)/mean(bst2$evaluation_log$train_logloss), 0.8)
|
||||
})
|
||||
|
||||
test_that("colsample_bytree works", {
|
||||
# Randomly generate data matrix by sampling from uniform distribution [-1, 1]
|
||||
set.seed(1)
|
||||
train_x <- matrix(runif(1000, min = -1, max = 1), ncol = 100)
|
||||
train_y <- as.numeric(rowSums(train_x) > 0)
|
||||
test_x <- matrix(runif(1000, min = -1, max = 1), ncol = 100)
|
||||
test_y <- as.numeric(rowSums(test_x) > 0)
|
||||
colnames(train_x) <- paste0("Feature_", sprintf("%03d", 1:100))
|
||||
colnames(test_x) <- paste0("Feature_", sprintf("%03d", 1:100))
|
||||
dtrain <- xgb.DMatrix(train_x, label = train_y)
|
||||
dtest <- xgb.DMatrix(test_x, label = test_y)
|
||||
watchlist <- list(train = dtrain, eval = dtest)
|
||||
# Use colsample_bytree = 0.01, so that roughly one out of 100 features is
|
||||
# chosen for each tree
|
||||
param <- list(max_depth = 2, eta = 0, silent = 1, nthread = 2,
|
||||
colsample_bytree = 0.01, objective = "binary:logistic",
|
||||
eval_metric = "auc")
|
||||
set.seed(2)
|
||||
bst <- xgb.train(param, dtrain, nrounds = 100, watchlist, verbose = 0)
|
||||
xgb.importance(model = bst)
|
||||
# If colsample_bytree works properly, a variety of features should be used
|
||||
# in the 100 trees
|
||||
expect_gte(nrow(xgb.importance(model = bst)), 30)
|
||||
})
|
||||
|
||||
38
R-package/tests/testthat/test_interaction_constraints.R
Normal file
38
R-package/tests/testthat/test_interaction_constraints.R
Normal file
@@ -0,0 +1,38 @@
|
||||
require(xgboost)
|
||||
|
||||
context("interaction constraints")
|
||||
|
||||
set.seed(1024)
|
||||
x1 <- rnorm(1000, 1)
|
||||
x2 <- rnorm(1000, 1)
|
||||
x3 <- sample(c(1,2,3), size=1000, replace=TRUE)
|
||||
y <- x1 + x2 + x3 + x1*x2*x3 + rnorm(1000, 0.001) + 3*sin(x1)
|
||||
train <- matrix(c(x1,x2,x3), ncol = 3)
|
||||
|
||||
test_that("interaction constraints for regression", {
|
||||
# Fit a model that only allows interaction between x1 and x2
|
||||
bst <- xgboost(data = train, label = y, max_depth = 3,
|
||||
eta = 0.1, nthread = 2, nrounds = 100, verbose = 0,
|
||||
interaction_constraints = list(c(0,1)))
|
||||
|
||||
# Set all observations to have the same x3 values then increment
|
||||
# by the same amount
|
||||
preds <- lapply(c(1,2,3), function(x){
|
||||
tmat <- matrix(c(x1,x2,rep(x,1000)), ncol=3)
|
||||
return(predict(bst, tmat))
|
||||
})
|
||||
|
||||
# Check incrementing x3 has the same effect on all observations
|
||||
# since x3 is constrained to be independent of x1 and x2
|
||||
# and all observations start off from the same x3 value
|
||||
diff1 <- preds[[2]] - preds[[1]]
|
||||
test1 <- all(abs(diff1 - diff1[1]) < 1e-4)
|
||||
|
||||
diff2 <- preds[[3]] - preds[[2]]
|
||||
test2 <- all(abs(diff2 - diff2[1]) < 1e-4)
|
||||
|
||||
expect_true({
|
||||
test1 & test2
|
||||
}, "Interaction Contraint Satisfied")
|
||||
|
||||
})
|
||||
108
R-package/tests/testthat/test_interactions.R
Normal file
108
R-package/tests/testthat/test_interactions.R
Normal file
@@ -0,0 +1,108 @@
|
||||
context('Test prediction of feature interactions')
|
||||
|
||||
require(xgboost)
|
||||
require(magrittr)
|
||||
|
||||
set.seed(123)
|
||||
|
||||
test_that("predict feature interactions works", {
|
||||
# simulate some binary data and a linear outcome with an interaction term
|
||||
N <- 1000
|
||||
P <- 5
|
||||
X <- matrix(rbinom(N * P, 1, 0.5), ncol=P, dimnames = list(NULL, letters[1:P]))
|
||||
# center the data (as contributions are computed WRT feature means)
|
||||
X <- scale(X, scale=FALSE)
|
||||
|
||||
# outcome without any interactions, without any noise:
|
||||
f <- function(x) 2 * x[, 1] - 3 * x[, 2]
|
||||
# outcome with interactions, without noise:
|
||||
f_int <- function(x) f(x) + 2 * x[, 2] * x[, 3]
|
||||
# outcome with interactions, with noise:
|
||||
#f_int_noise <- function(x) f_int(x) + rnorm(N, 0, 0.3)
|
||||
|
||||
y <- f_int(X)
|
||||
|
||||
dm <- xgb.DMatrix(X, label = y)
|
||||
param <- list(eta=0.1, max_depth=4, base_score=mean(y), lambda=0, nthread=2)
|
||||
b <- xgb.train(param, dm, 100)
|
||||
|
||||
pred = predict(b, dm, outputmargin=TRUE)
|
||||
|
||||
# SHAP contributions:
|
||||
cont <- predict(b, dm, predcontrib=TRUE)
|
||||
expect_equal(dim(cont), c(N, P+1))
|
||||
# make sure for each row they add up to marginal predictions
|
||||
max(abs(rowSums(cont) - pred)) %>% expect_lt(0.001)
|
||||
# Hand-construct the 'ground truth' feature contributions:
|
||||
gt_cont <- cbind(
|
||||
2. * X[, 1],
|
||||
-3. * X[, 2] + 1. * X[, 2] * X[, 3], # attribute a HALF of the interaction term to feature #2
|
||||
1. * X[, 2] * X[, 3] # and another HALF of the interaction term to feature #3
|
||||
)
|
||||
gt_cont <- cbind(gt_cont, matrix(0, nrow=N, ncol=P + 1 - 3))
|
||||
# These should be relatively close:
|
||||
expect_lt(max(abs(cont - gt_cont)), 0.05)
|
||||
|
||||
|
||||
# SHAP interaction contributions:
|
||||
intr <- predict(b, dm, predinteraction=TRUE)
|
||||
expect_equal(dim(intr), c(N, P+1, P+1))
|
||||
# check assigned colnames
|
||||
cn <- c(letters[1:P], "BIAS")
|
||||
expect_equal(dimnames(intr), list(NULL, cn, cn))
|
||||
|
||||
# check the symmetry
|
||||
max(abs(aperm(intr, c(1,3,2)) - intr)) %>% expect_lt(0.00001)
|
||||
|
||||
# sums WRT columns must be close to feature contributions
|
||||
max(abs(apply(intr, c(1,2), sum) - cont)) %>% expect_lt(0.00001)
|
||||
|
||||
# diagonal terms for features 3,4,5 must be close to zero
|
||||
Reduce(max, sapply(3:P, function(i) max(abs(intr[, i, i])))) %>% expect_lt(0.05)
|
||||
|
||||
# BIAS must have no interactions
|
||||
max(abs(intr[, 1:P, P+1])) %>% expect_lt(0.00001)
|
||||
|
||||
# interactions other than 2 x 3 must be close to zero
|
||||
intr23 <- intr
|
||||
intr23[,2,3] <- 0
|
||||
Reduce(max, sapply(1:P, function(i) max(abs(intr23[, i, (i+1):(P+1)])))) %>% expect_lt(0.05)
|
||||
|
||||
# Construct the 'ground truth' contributions of interactions directly from the linear terms:
|
||||
gt_intr <- array(0, c(N, P+1, P+1))
|
||||
gt_intr[,2,3] <- 1. * X[, 2] * X[, 3] # attribute a HALF of the interaction term to each symmetric element
|
||||
gt_intr[,3,2] <- gt_intr[, 2, 3]
|
||||
# merge-in the diagonal based on 'ground truth' feature contributions
|
||||
intr_diag = gt_cont - apply(gt_intr, c(1,2), sum)
|
||||
for(j in seq_len(P)) {
|
||||
gt_intr[,j,j] = intr_diag[,j]
|
||||
}
|
||||
# These should be relatively close:
|
||||
expect_lt(max(abs(intr - gt_intr)), 0.1)
|
||||
})
|
||||
|
||||
|
||||
test_that("multiclass feature interactions work", {
|
||||
dm <- xgb.DMatrix(as.matrix(iris[,-5]), label=as.numeric(iris$Species)-1)
|
||||
param <- list(eta=0.1, max_depth=4, objective='multi:softprob', num_class=3)
|
||||
b <- xgb.train(param, dm, 40)
|
||||
pred = predict(b, dm, outputmargin=TRUE) %>% array(c(3, 150)) %>% t
|
||||
|
||||
# SHAP contributions:
|
||||
cont <- predict(b, dm, predcontrib=TRUE)
|
||||
expect_length(cont, 3)
|
||||
# rewrap them as a 3d array
|
||||
cont <- unlist(cont) %>% array(c(150, 5, 3))
|
||||
# make sure for each row they add up to marginal predictions
|
||||
max(abs(apply(cont, c(1,3), sum) - pred)) %>% expect_lt(0.001)
|
||||
|
||||
# SHAP interaction contributions:
|
||||
intr <- predict(b, dm, predinteraction=TRUE)
|
||||
expect_length(intr, 3)
|
||||
# rewrap them as a 4d array
|
||||
intr <- unlist(intr) %>% array(c(150, 5, 5, 3)) %>% aperm(c(4, 1, 2, 3)) # [grp, row, col, col]
|
||||
# check the symmetry
|
||||
max(abs(aperm(intr, c(1,2,4,3)) - intr)) %>% expect_lt(0.00001)
|
||||
# sums WRT columns must be close to feature contributions
|
||||
max(abs(apply(intr, c(1,2,3), sum) - aperm(cont, c(3,1,2)))) %>% expect_lt(0.00001)
|
||||
})
|
||||
@@ -1,8 +1,8 @@
|
||||
set(ASan_LIB_NAME ASan)
|
||||
|
||||
find_library(ASan_LIBRARY
|
||||
NAMES libasan.so libasan.so.4
|
||||
PATHS /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib)
|
||||
NAMES libasan.so libasan.so.4 libasan.so.3 libasan.so.2 libasan.so.1 libasan.so.0
|
||||
PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(ASan DEFAULT_MSG
|
||||
|
||||
@@ -2,7 +2,7 @@ set(LSan_LIB_NAME lsan)
|
||||
|
||||
find_library(LSan_LIBRARY
|
||||
NAMES liblsan.so liblsan.so.0 liblsan.so.0.0.0
|
||||
PATHS /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib)
|
||||
PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(LSan DEFAULT_MSG
|
||||
|
||||
@@ -2,7 +2,7 @@ set(TSan_LIB_NAME tsan)
|
||||
|
||||
find_library(TSan_LIBRARY
|
||||
NAMES libtsan.so libtsan.so.0 libtsan.so.0.0.0
|
||||
PATHS /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib)
|
||||
PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(TSan DEFAULT_MSG
|
||||
|
||||
@@ -18,7 +18,7 @@ def loadfmap( fname ):
|
||||
if it.strip() == '':
|
||||
continue
|
||||
k , v = it.split('=')
|
||||
fmap[ idx ][ v ] = len(nmap) + 1
|
||||
fmap[ idx ][ v ] = len(nmap)
|
||||
nmap[ len(nmap) ] = ftype+'='+k
|
||||
return fmap, nmap
|
||||
|
||||
|
||||
@@ -33,9 +33,9 @@ def logregobj(preds, dtrain):
|
||||
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
|
||||
def evalerror(preds, dtrain):
|
||||
labels = dtrain.get_label()
|
||||
# return a pair metric_name, result. The metric name must not contain a colon (:)
|
||||
# return a pair metric_name, result. The metric name must not contain a colon (:) or a space
|
||||
# since preds are margin(before logistic transformation, cutoff at 0)
|
||||
return 'error', float(sum(labels != (preds > 0.0))) / len(labels)
|
||||
return 'my-error', float(sum(labels != (preds > 0.0))) / len(labels)
|
||||
|
||||
# training with customized objective, we can also do step by step training
|
||||
# simply look at xgboost.py's implementation of train
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
export PYTHONPATH=PYTHONPATH:../../python-package
|
||||
export PYTHONPATH=$PYTHONPATH:../../python-package
|
||||
python basic_walkthrough.py
|
||||
python custom_objective.py
|
||||
python boost_from_prediction.py
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
Demonstrating how to use XGBoost accomplish Multi-Class classification task on [UCI Dermatology dataset](https://archive.ics.uci.edu/ml/datasets/Dermatology)
|
||||
|
||||
Make sure you make make xgboost python module in ../../python
|
||||
Make sure you make xgboost python module in ../../python
|
||||
|
||||
1. Run runexp.sh
|
||||
```bash
|
||||
./runexp.sh
|
||||
```
|
||||
|
||||
|
||||
**R version** please see the `train.R`.
|
||||
|
||||
64
demo/multiclass_classification/train.R
Normal file
64
demo/multiclass_classification/train.R
Normal file
@@ -0,0 +1,64 @@
|
||||
library(data.table)
|
||||
library(xgboost)
|
||||
|
||||
if (!file.exists("./dermatology.data")) {
|
||||
download.file(
|
||||
"https://archive.ics.uci.edu/ml/machine-learning-databases/dermatology/dermatology.data",
|
||||
"dermatology.data",
|
||||
method = "curl"
|
||||
)
|
||||
}
|
||||
|
||||
df <- fread("dermatology.data", sep = ",", header = FALSE)
|
||||
|
||||
df[, `:=`(V34 = as.integer(ifelse(V34 == "?", 0L, V34)),
|
||||
V35 = V35 - 1L)]
|
||||
|
||||
idx <- sample(nrow(df), size = round(0.7 * nrow(df)), replace = FALSE)
|
||||
|
||||
train <- df[idx,]
|
||||
test <- df[-idx,]
|
||||
|
||||
train_x <- train[, 1:34]
|
||||
train_y <- train[, V35]
|
||||
|
||||
test_x <- test[, 1:34]
|
||||
test_y <- test[, V35]
|
||||
|
||||
xg_train <- xgb.DMatrix(data = as.matrix(train_x), label = train_y)
|
||||
xg_test = xgb.DMatrix(as.matrix(test_x), label = test_y)
|
||||
|
||||
params <- list(
|
||||
objective = 'multi:softmax',
|
||||
num_class = 6,
|
||||
max_depth = 6,
|
||||
nthread = 4,
|
||||
eta = 0.1
|
||||
)
|
||||
|
||||
watchlist = list(train = xg_train, test = xg_test)
|
||||
|
||||
bst <- xgb.train(
|
||||
params = params,
|
||||
data = xg_train,
|
||||
watchlist = watchlist,
|
||||
nrounds = 5
|
||||
)
|
||||
|
||||
pred <- predict(bst, xg_test)
|
||||
error_rate <- sum(pred != test_y) / length(test_y)
|
||||
print(paste("Test error using softmax =", error_rate))
|
||||
|
||||
# do the same thing again, but output probabilities
|
||||
params$objective <- 'multi:softprob'
|
||||
bst <- xgb.train(params, xg_train, nrounds = 5, watchlist)
|
||||
|
||||
pred_prob <- predict(bst, xg_test)
|
||||
|
||||
pred_mat <- matrix(pred_prob, ncol = 6, byrow = TRUE)
|
||||
# validation
|
||||
# rowSums(pred_mat)
|
||||
|
||||
pred_label <- apply(pred_mat, 1, which.max) - 1L
|
||||
error_rate = sum(pred_label != test_y) / length(test_y)
|
||||
print(paste("Test error using softprob =", error_rate))
|
||||
@@ -1,6 +1,6 @@
|
||||
Learning to rank
|
||||
====
|
||||
XGBoost supports accomplishing ranking tasks. In ranking scenario, data are often grouped and we need the [group information file](../../doc/input_format.md#group-input-format) to specify ranking tasks. The model used in XGBoost for ranking is the LambdaRank, this function is not yet completed. Currently, we provide pairwise rank.
|
||||
XGBoost supports accomplishing ranking tasks. In ranking scenario, data are often grouped and we need the [group information file](../../doc/tutorials/input_format.rst#group-input-format) to specify ranking tasks. The model used in XGBoost for ranking is the LambdaRank, this function is not yet completed. Currently, we provide pairwise rank.
|
||||
|
||||
### Parameters
|
||||
The configuration setting is similar to the regression and binary classification setting, except user need to specify the objectives:
|
||||
@@ -15,14 +15,27 @@ For more usage details please refer to the [binary classification demo](../binar
|
||||
Instructions
|
||||
====
|
||||
The dataset for ranking demo is from LETOR04 MQ2008 fold1.
|
||||
You can use the following command to run the example:
|
||||
Before running the examples, you need to get the data by running:
|
||||
|
||||
Get the data:
|
||||
```
|
||||
./wgetdata.sh
|
||||
```
|
||||
|
||||
### Command Line
|
||||
Run the example:
|
||||
```
|
||||
./runexp.sh
|
||||
```
|
||||
|
||||
### Python
|
||||
There are two ways of doing ranking in python.
|
||||
|
||||
Run the example using `xgboost.train`:
|
||||
```
|
||||
python rank.py
|
||||
```
|
||||
|
||||
Run the example using `XGBRanker`:
|
||||
```
|
||||
python rank_sklearn.py
|
||||
```
|
||||
|
||||
41
demo/rank/rank.py
Normal file
41
demo/rank/rank.py
Normal file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/python
|
||||
import xgboost as xgb
|
||||
from xgboost import DMatrix
|
||||
from sklearn.datasets import load_svmlight_file
|
||||
|
||||
|
||||
# This script demonstrate how to do ranking with xgboost.train
|
||||
x_train, y_train = load_svmlight_file("mq2008.train")
|
||||
x_valid, y_valid = load_svmlight_file("mq2008.vali")
|
||||
x_test, y_test = load_svmlight_file("mq2008.test")
|
||||
|
||||
group_train = []
|
||||
with open("mq2008.train.group", "r") as f:
|
||||
data = f.readlines()
|
||||
for line in data:
|
||||
group_train.append(int(line.split("\n")[0]))
|
||||
|
||||
group_valid = []
|
||||
with open("mq2008.vali.group", "r") as f:
|
||||
data = f.readlines()
|
||||
for line in data:
|
||||
group_valid.append(int(line.split("\n")[0]))
|
||||
|
||||
group_test = []
|
||||
with open("mq2008.test.group", "r") as f:
|
||||
data = f.readlines()
|
||||
for line in data:
|
||||
group_test.append(int(line.split("\n")[0]))
|
||||
|
||||
train_dmatrix = DMatrix(x_train, y_train)
|
||||
valid_dmatrix = DMatrix(x_valid, y_valid)
|
||||
test_dmatrix = DMatrix(x_test)
|
||||
|
||||
train_dmatrix.set_group(group_train)
|
||||
valid_dmatrix.set_group(group_valid)
|
||||
|
||||
params = {'objective': 'rank:pairwise', 'eta': 0.1, 'gamma': 1.0,
|
||||
'min_child_weight': 0.1, 'max_depth': 6}
|
||||
xgb_model = xgb.train(params, train_dmatrix, num_boost_round=4,
|
||||
evals=[(valid_dmatrix, 'validation')])
|
||||
pred = xgb_model.predict(test_dmatrix)
|
||||
35
demo/rank/rank_sklearn.py
Normal file
35
demo/rank/rank_sklearn.py
Normal file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/python
|
||||
import xgboost as xgb
|
||||
from sklearn.datasets import load_svmlight_file
|
||||
|
||||
|
||||
# This script demonstrate how to do ranking with XGBRanker
|
||||
x_train, y_train = load_svmlight_file("mq2008.train")
|
||||
x_valid, y_valid = load_svmlight_file("mq2008.vali")
|
||||
x_test, y_test = load_svmlight_file("mq2008.test")
|
||||
|
||||
group_train = []
|
||||
with open("mq2008.train.group", "r") as f:
|
||||
data = f.readlines()
|
||||
for line in data:
|
||||
group_train.append(int(line.split("\n")[0]))
|
||||
|
||||
group_valid = []
|
||||
with open("mq2008.vali.group", "r") as f:
|
||||
data = f.readlines()
|
||||
for line in data:
|
||||
group_valid.append(int(line.split("\n")[0]))
|
||||
|
||||
group_test = []
|
||||
with open("mq2008.test.group", "r") as f:
|
||||
data = f.readlines()
|
||||
for line in data:
|
||||
group_test.append(int(line.split("\n")[0]))
|
||||
|
||||
params = {'objective': 'rank:pairwise', 'learning_rate': 0.1,
|
||||
'gamma': 1.0, 'min_child_weight': 0.1,
|
||||
'max_depth': 6, 'n_estimators': 4}
|
||||
model = xgb.sklearn.XGBRanker(**params)
|
||||
model.fit(x_train, y_train, group_train,
|
||||
eval_set=[(x_valid, y_valid)], eval_group=[group_valid])
|
||||
pred = model.predict(x_test)
|
||||
@@ -1,11 +1,5 @@
|
||||
python trans_data.py train.txt mq2008.train mq2008.train.group
|
||||
|
||||
python trans_data.py test.txt mq2008.test mq2008.test.group
|
||||
|
||||
python trans_data.py vali.txt mq2008.vali mq2008.vali.group
|
||||
#!/bin/bash
|
||||
|
||||
../../xgboost mq2008.conf
|
||||
|
||||
../../xgboost mq2008.conf task=pred model_in=0004.model
|
||||
|
||||
|
||||
|
||||
@@ -2,3 +2,9 @@
|
||||
wget https://s3-us-west-2.amazonaws.com/xgboost-examples/MQ2008.rar
|
||||
unrar x MQ2008.rar
|
||||
mv -f MQ2008/Fold1/*.txt .
|
||||
|
||||
python trans_data.py train.txt mq2008.train mq2008.train.group
|
||||
|
||||
python trans_data.py test.txt mq2008.test mq2008.test.group
|
||||
|
||||
python trans_data.py vali.txt mq2008.vali mq2008.vali.group
|
||||
|
||||
Submodule dmlc-core updated: f2afdc7788...4d49691f1a
120
doc/build.rst
120
doc/build.rst
@@ -90,11 +90,11 @@ Building on OSX
|
||||
Install with pip: simple method
|
||||
--------------------------------
|
||||
|
||||
First, make sure you obtained ``gcc-5`` (newer version does not work with this method yet). Note: installation of ``gcc`` can take a while (~ 30 minutes).
|
||||
First, obtain ``gcc-7`` with Homebrew (https://brew.sh/) to enable multi-threading (i.e. using multiple CPU threads for training). The default Apple Clang compiler does not support OpenMP, so using the default compiler would have disabled multi-threading.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
brew install gcc@5
|
||||
brew install gcc@7
|
||||
|
||||
Then install XGBoost with ``pip``:
|
||||
|
||||
@@ -102,42 +102,30 @@ Then install XGBoost with ``pip``:
|
||||
|
||||
pip3 install xgboost
|
||||
|
||||
You might need to run the command with ``sudo`` if you run into permission errors.
|
||||
You might need to run the command with ``--user`` flag if you run into permission errors.
|
||||
|
||||
Build from the source code - advanced method
|
||||
--------------------------------------------
|
||||
|
||||
First, obtain ``gcc-7`` with homebrew (https://brew.sh/) if you want multi-threaded version. Clang is okay if multithreading is not required. Note: installation of ``gcc`` can take a while (~ 30 minutes).
|
||||
Obtain ``gcc-7`` from Homebrew:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
brew install gcc@7
|
||||
|
||||
Now, clone the repository:
|
||||
Now clone the repository:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git clone --recursive https://github.com/dmlc/xgboost
|
||||
cd xgboost; cp make/config.mk ./config.mk
|
||||
|
||||
Open ``config.mk`` and uncomment these two lines:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
export CC = gcc
|
||||
export CXX = g++
|
||||
|
||||
and replace these two lines as follows: (specify the GCC version)
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
export CC = gcc-7
|
||||
export CXX = g++-7
|
||||
|
||||
Now, you may build XGBoost using the following command:
|
||||
Create the ``build/`` directory and invoke CMake. Make sure to add ``CC=gcc-7 CXX=g++-7`` so that Homebrew GCC is selected. After invoking CMake, you can build XGBoost with ``make``:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
CC=gcc-7 CXX=g++-7 cmake ..
|
||||
make -j4
|
||||
|
||||
You may now continue to `Python Package Installation`_.
|
||||
@@ -173,6 +161,8 @@ To build with MinGW, type:
|
||||
|
||||
cp make/mingw64.mk config.mk; make -j4
|
||||
|
||||
See :ref:`mingw_python` for buildilng XGBoost for Python.
|
||||
|
||||
Compile XGBoost with Microsoft Visual Studio
|
||||
--------------------------------------------
|
||||
To build with Visual Studio, we will need CMake. Make sure to install a recent version of CMake. Then run the following from the root of the XGBoost directory:
|
||||
@@ -204,7 +194,7 @@ From the command line on Linux starting from the XGBoost directory:
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DUSE_CUDA=ON
|
||||
make -j
|
||||
make -j4
|
||||
|
||||
.. note:: Enabling multi-GPU training
|
||||
|
||||
@@ -214,8 +204,8 @@ From the command line on Linux starting from the XGBoost directory:
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DUSE_CUDA=ON -DUSE_NCCL=ON
|
||||
make -j
|
||||
cmake .. -DUSE_CUDA=ON -DUSE_NCCL=ON -DNCCL_ROOT=/path/to/nccl2
|
||||
make -j4
|
||||
|
||||
On Windows, see what options for generators you have for CMake, and choose one with ``[arch]`` replaced with Win64:
|
||||
|
||||
@@ -258,10 +248,12 @@ The configuration file ``config.mk`` modifies several compilation flags:
|
||||
|
||||
To customize, first copy ``make/config.mk`` to the project root and then modify the copy.
|
||||
|
||||
Alternatively, use CMake.
|
||||
|
||||
Python Package Installation
|
||||
===========================
|
||||
|
||||
The python package is located at ``python-package/``.
|
||||
The Python package is located at ``python-package/``.
|
||||
There are several ways to install the package:
|
||||
|
||||
1. Install system-wide, which requires root permission:
|
||||
@@ -271,7 +263,7 @@ There are several ways to install the package:
|
||||
cd python-package; sudo python setup.py install
|
||||
|
||||
You will however need Python ``distutils`` module for this to
|
||||
work. It is often part of the core python package or it can be installed using your
|
||||
work. It is often part of the core Python package or it can be installed using your
|
||||
package manager, e.g. in Debian use
|
||||
|
||||
.. code-block:: bash
|
||||
@@ -282,7 +274,7 @@ package manager, e.g. in Debian use
|
||||
|
||||
If you recompiled XGBoost, then you need to reinstall it again to make the new library take effect.
|
||||
|
||||
2. Only set the environment variable ``PYTHONPATH`` to tell python where to find
|
||||
2. Only set the environment variable ``PYTHONPATH`` to tell Python where to find
|
||||
the library. For example, assume we cloned `xgboost` on the home directory
|
||||
`~`. then we can added the following line in `~/.bashrc`.
|
||||
This option is **recommended for developers** who change the code frequently. The changes will be immediately reflected once you pulled the code and rebuild the project (no need to call ``setup`` again)
|
||||
@@ -304,6 +296,25 @@ package manager, e.g. in Debian use
|
||||
import os
|
||||
os.environ['PATH'] = os.environ['PATH'] + ';C:\\Program Files\\mingw-w64\\x86_64-5.3.0-posix-seh-rt_v4-rev0\\mingw64\\bin'
|
||||
|
||||
.. _mingw_python:
|
||||
|
||||
Building XGBoost library for Python for Windows with MinGW-w64
|
||||
--------------------------------------------------------------
|
||||
|
||||
Windows versions of Python are built with Microsoft Visual Studio. Usually Python binary modules are built with the same compiler the interpreter is built with, raising several potential concerns.
|
||||
|
||||
1. VS is proprietary and commercial software. Microsoft provides a freeware "Community" edition, but its licensing terms are unsuitable for many organizations.
|
||||
2. Visual Studio contains telemetry, as documented in `Microsoft Visual Studio Licensing Terms <https://visualstudio.microsoft.com/license-terms/mt736442/>`_. It `has been inserting telemetry <https://old.reddit.com/r/cpp/comments/4ibauu/visual_studio_adding_telemetry_function_calls_to/>`_ into apps for some time. In order to download VS distribution from MS servers one has to run the application containing telemetry. These facts have raised privacy and security concerns among some users and system administrators. Running software with telemetry may be against the policy of your organization.
|
||||
3. g++ usually generates faster code on ``-O3``.
|
||||
|
||||
So you may want to build XGBoost with g++ own your own risk. This opens a can of worms, because MSVC uses Microsoft runtime and MinGW-w64 uses own runtime, and the runtimes have different incompatible memory allocators. But in fact this setup is usable if you know how to deal with it. Here is some experience.
|
||||
|
||||
1. The Python interpreter will crash on exit if XGBoost was used. This is usually not a big issue.
|
||||
2. ``-O3`` is OK.
|
||||
3. ``-mtune=native`` is also OK.
|
||||
4. Don't use ``-march=native`` gcc flag. Using it causes the Python interpreter to crash if the dll was actually used.
|
||||
5. You may need to provide the lib with the runtime libs. If ``mingw32/bin`` is not in ``PATH``, build a wheel (``python setup.py bdist_wheel``), open it with an archiver and put the needed dlls to the directory where ``xgboost.dll`` is situated. Then you can install the wheel with ``pip``.
|
||||
|
||||
R Package Installation
|
||||
======================
|
||||
|
||||
@@ -316,35 +327,13 @@ You can install xgboost from CRAN just like any other R package:
|
||||
|
||||
install.packages("xgboost")
|
||||
|
||||
Or you can install it from our weekly updated drat repo:
|
||||
|
||||
.. code-block:: R
|
||||
|
||||
install.packages("drat", repos="https://cran.rstudio.com")
|
||||
drat:::addRepo("dmlc")
|
||||
install.packages("xgboost", repos="http://dmlc.ml/drat/", type = "source")
|
||||
|
||||
For OSX users, single threaded version will be installed. To install multi-threaded version,
|
||||
first follow `Building on OSX`_ to get the OpenMP enabled compiler. Then
|
||||
|
||||
- Set the ``Makevars`` file in highest piority for R.
|
||||
|
||||
The point is, there are three ``Makevars`` : ``~/.R/Makevars``, ``xgboost/R-package/src/Makevars``, and ``/usr/local/Cellar/r/3.2.0/R.framework/Resources/etc/Makeconf`` (the last one obtained by running ``file.path(R.home("etc"), "Makeconf")`` in R), and ``SHLIB_OPENMP_CXXFLAGS`` is not set by default!! After trying, it seems that the first one has highest piority (surprise!).
|
||||
|
||||
Then inside R, run
|
||||
|
||||
.. code-block:: R
|
||||
|
||||
install.packages("drat", repos="https://cran.rstudio.com")
|
||||
drat:::addRepo("dmlc")
|
||||
install.packages("xgboost", repos="http://dmlc.ml/drat/", type = "source")
|
||||
For OSX users, single-threaded version will be installed. So only one thread will be used for training. To enable use of multiple threads (and utilize capacity of multi-core CPUs), see the section :ref:`osx_multithread` to install XGBoost from source.
|
||||
|
||||
Installing the development version
|
||||
----------------------------------
|
||||
|
||||
Make sure you have installed git and a recent C++ compiler supporting C++11 (e.g., g++-4.8 or higher).
|
||||
On Windows, Rtools must be installed, and its bin directory has to be added to PATH during the installation.
|
||||
And see the previous subsection for an OSX tip.
|
||||
On Windows, Rtools must be installed, and its bin directory has to be added to ``PATH`` during the installation.
|
||||
|
||||
Due to the use of git-submodules, ``devtools::install_github`` can no longer be used to install the latest version of R package.
|
||||
Thus, one has to run git to check out the code first:
|
||||
@@ -370,6 +359,33 @@ The package could also be built and installed with cmake (and Visual C++ 2015 on
|
||||
|
||||
If all fails, try `Building the shared library`_ to see whether a problem is specific to R package or not.
|
||||
|
||||
.. _osx_multithread:
|
||||
|
||||
Installing R package on Mac OSX with multi-threading
|
||||
----------------------------------------------------
|
||||
|
||||
First, obtain ``gcc-7`` with Homebrew (https://brew.sh/) to enable multi-threading (i.e. using multiple CPU threads for training). The default Apple Clang compiler does not support OpenMP, so using the default compiler would have disabled multi-threading.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
brew install gcc@7
|
||||
|
||||
Now, clone the repository:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git clone --recursive https://github.com/dmlc/xgboost
|
||||
|
||||
Create the ``build/`` directory and invoke CMake with option ``R_LIB=ON``. Make sure to add ``CC=gcc-7 CXX=g++-7`` so that Homebrew GCC is selected. After invoking CMake, you can install the R package by running ``make`` and ``make install``:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
CC=gcc-7 CXX=g++-7 cmake .. -DR_LIB=ON
|
||||
make -j4
|
||||
make install
|
||||
|
||||
Installing R package with GPU support
|
||||
-------------------------------------
|
||||
|
||||
@@ -387,7 +403,7 @@ On Linux, starting from the XGBoost directory type:
|
||||
When default target is used, an R package shared library would be built in the ``build`` area.
|
||||
The ``install`` target, in addition, assembles the package files with this shared library under ``build/R-package``, and runs ``R CMD INSTALL``.
|
||||
|
||||
On Windows, cmake with Visual C++ Build Tools (or Visual Studio) has to be used to build an R package with GPU support. Rtools must also be installed (perhaps, some other MinGW distributions with ``gendef.exe`` and ``dlltool.exe`` would work, but that was not tested).
|
||||
On Windows, CMake with Visual C++ Build Tools (or Visual Studio) has to be used to build an R package with GPU support. Rtools must also be installed (perhaps, some other MinGW distributions with ``gendef.exe`` and ``dlltool.exe`` would work, but that was not tested).
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
||||
10
doc/conf.py
10
doc/conf.py
@@ -41,7 +41,7 @@ sys.path.insert(0, curr_path)
|
||||
|
||||
# -- mock out modules
|
||||
import mock
|
||||
MOCK_MODULES = ['numpy', 'scipy', 'scipy.sparse', 'sklearn', 'matplotlib', 'pandas', 'graphviz']
|
||||
MOCK_MODULES = ['scipy', 'scipy.sparse', 'sklearn', 'pandas']
|
||||
for mod_name in MOCK_MODULES:
|
||||
sys.modules[mod_name] = mock.Mock()
|
||||
|
||||
@@ -62,6 +62,7 @@ release = xgboost.__version__
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
|
||||
extensions = [
|
||||
'matplotlib.sphinxext.plot_directive',
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.napoleon',
|
||||
'sphinx.ext.mathjax',
|
||||
@@ -69,6 +70,11 @@ extensions = [
|
||||
'breathe'
|
||||
]
|
||||
|
||||
graphviz_output_format = 'png'
|
||||
plot_formats = [('svg', 300), ('png', 100), ('hires.png', 300)]
|
||||
plot_html_show_source_link = False
|
||||
plot_html_show_formats = False
|
||||
|
||||
# Breathe extension variables
|
||||
breathe_projects = {"xgboost": "doxyxml/"}
|
||||
breathe_default_project = "xgboost"
|
||||
@@ -150,7 +156,7 @@ extensions.append("guzzle_sphinx_theme")
|
||||
# Guzzle theme options (see theme.conf for more information)
|
||||
html_theme_options = {
|
||||
# Set the name of the project to appear in the sidebar
|
||||
"project_nav_name": "XGBoost (0.80)"
|
||||
"project_nav_name": "XGBoost"
|
||||
}
|
||||
|
||||
html_sidebars = {
|
||||
|
||||
@@ -149,6 +149,14 @@ sanitizer is not compatible with the other two sanitizers.
|
||||
|
||||
cmake -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak" /path/to/xgboost
|
||||
|
||||
By default, CMake will search regular system paths for sanitizers, you can also
|
||||
supply a specified SANITIZER_PATH.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
cmake -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak" \
|
||||
-DSANITIZER_PATH=/path/to/sanitizers /path/to/xgboost
|
||||
|
||||
How to use sanitizers with CUDA support
|
||||
=======================================
|
||||
Runing XGBoost on CUDA with address sanitizer (asan) will raise memory error.
|
||||
|
||||
@@ -58,9 +58,11 @@ For sbt, please add the repository and dependency in build.sbt as following:
|
||||
|
||||
If you want to use XGBoost4J-Spark, replace ``xgboost4j`` with ``xgboost4j-spark``.
|
||||
|
||||
.. note:: XGBoost4J-Spark requires Spark 2.3+
|
||||
.. note:: XGBoost4J-Spark requires Apache Spark 2.3+
|
||||
|
||||
XGBoost4J-Spark now requires Spark 2.3+. Latest versions of XGBoost4J-Spark uses facilities of `org.apache.spark.ml.param.shared` extensively to provide for a tight integration with Spark MLLIB framework, and these facilities are not fully available on earlier versions of Spark.
|
||||
XGBoost4J-Spark now requires **Apache Spark 2.3+**. Latest versions of XGBoost4J-Spark uses facilities of `org.apache.spark.ml.param.shared` extensively to provide for a tight integration with Spark MLLIB framework, and these facilities are not fully available on earlier versions of Spark.
|
||||
|
||||
Also, make sure to install Spark directly from `Apache website <https://spark.apache.org/>`_. **Upstream XGBoost is not guaranteed to work with third-party distributions of Spark, such as Cloudera Spark.** Consult appropriate third parties to obtain their distribution of XGBoost.
|
||||
|
||||
Installation from maven repo
|
||||
============================
|
||||
|
||||
@@ -61,9 +61,17 @@ and then refer to the snapshot dependency by adding:
|
||||
<version>next_version_num-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
.. note:: XGBoost4J-Spark requires Spark 2.3+
|
||||
.. note:: XGBoost4J-Spark requires Apache Spark 2.3+
|
||||
|
||||
XGBoost4J-Spark now requires Spark 2.3+. Latest versions of XGBoost4J-Spark uses facilities of `org.apache.spark.ml.param.shared` extensively to provide for a tight integration with Spark MLLIB framework, and these facilities are not fully available on earlier versions of Spark.
|
||||
XGBoost4J-Spark now requires **Apache Spark 2.3+**. Latest versions of XGBoost4J-Spark uses facilities of `org.apache.spark.ml.param.shared` extensively to provide for a tight integration with Spark MLLIB framework, and these facilities are not fully available on earlier versions of Spark.
|
||||
|
||||
Also, make sure to install Spark directly from `Apache website <https://spark.apache.org/>`_. **Upstream XGBoost is not guaranteed to work with third-party distributions of Spark, such as Cloudera Spark.** Consult appropriate third parties to obtain their distribution of XGBoost.
|
||||
|
||||
Installation from maven repo
|
||||
|
||||
.. note:: Use of Python in XGBoost4J-Spark
|
||||
|
||||
By default, we use the tracker in `dmlc-core <https://github.com/dmlc/dmlc-core/tree/master/tracker>`_ to drive the training with XGBoost4J-Spark. It requires Python 2.7+. We also have an experimental Scala version of tracker which can be enabled by passing the parameter ``tracker_conf`` as ``scala``.
|
||||
|
||||
Data Preparation
|
||||
================
|
||||
@@ -183,6 +191,15 @@ After we set XGBoostClassifier parameters and feature/label column, we can build
|
||||
|
||||
val xgbClassificationModel = xgbClassifier.fit(xgbInput)
|
||||
|
||||
Early Stopping
|
||||
----------------
|
||||
|
||||
Early stopping is a feature to prevent the unnecessary training iterations. By specifying ``num_early_stopping_rounds`` or directly call ``setNumEarlyStoppingRounds`` over a XGBoostClassifier or XGBoostRegressor, we can define number of rounds for the evaluation metric going to the unexpected direction to tolerate before stopping the training.
|
||||
|
||||
In additional to ``num_early_stopping_rounds``, you also need to define ``maximize_evaluation_metrics`` or call ``setMaximizeEvaluationMetrics`` to specify whether you want to maximize or minimize the metrics in training.
|
||||
|
||||
After specifying these two parameters, the training would stop when the metrics goes to the other direction against the one specified by ``maximize_evaluation_metrics`` for ``num_early_stopping_rounds`` iterations.
|
||||
|
||||
Prediction
|
||||
==========
|
||||
|
||||
|
||||
@@ -31,6 +31,10 @@ General Parameters
|
||||
|
||||
- Number of parallel threads used to run XGBoost
|
||||
|
||||
* ``disable_default_eval_metric`` [default=0]
|
||||
|
||||
- Flag to disable default metric. Set to >0 to disable.
|
||||
|
||||
* ``num_pbuffer`` [set automatically by XGBoost, no need to be set by user]
|
||||
|
||||
- Size of prediction buffer, normally set to number of training instances. The buffers are used to save the prediction results of last boosting step.
|
||||
@@ -78,7 +82,7 @@ Parameters for Tree Booster
|
||||
|
||||
* ``colsample_bylevel`` [default=1]
|
||||
|
||||
- Subsample ratio of columns for each split, in each level. Subsampling will occur each time a new split is made. This paramter has no effect when ``tree_method`` is set to ``hist``.
|
||||
- Subsample ratio of columns for each split, in each level. Subsampling will occur each time a new split is made.
|
||||
- range: (0,1]
|
||||
|
||||
* ``lambda`` [default=1, alias: ``reg_lambda``]
|
||||
@@ -152,7 +156,7 @@ Parameters for Tree Booster
|
||||
|
||||
- Controls a way new nodes are added to the tree.
|
||||
- Currently supported only if ``tree_method`` is set to ``hist``.
|
||||
- Choices: ``depthwise``, ```lossguide``
|
||||
- Choices: ``depthwise``, ``lossguide``
|
||||
|
||||
- ``depthwise``: split at nodes closest to the root.
|
||||
- ``lossguide``: split at nodes with highest loss change.
|
||||
@@ -244,6 +248,20 @@ Parameters for Linear Booster (``booster=gblinear``)
|
||||
- ``shotgun``: Parallel coordinate descent algorithm based on shotgun algorithm. Uses 'hogwild' parallelism and therefore produces a nondeterministic solution on each run.
|
||||
- ``coord_descent``: Ordinary coordinate descent algorithm. Also multithreaded but still produces a deterministic solution.
|
||||
|
||||
* ``feature_selector`` [default= ``cyclic``]
|
||||
|
||||
- Feature selection and ordering method
|
||||
|
||||
* ``cyclic``: Deterministic selection by cycling through features one at a time.
|
||||
* ``shuffle``: Similar to ``cyclic`` but with random feature shuffling prior to each update.
|
||||
* ``random``: A random (with replacement) coordinate selector.
|
||||
* ``greedy``: Select coordinate with the greatest gradient magnitude. It has ``O(num_feature^2)`` complexity. It is fully deterministic. It allows restricting the selection to ``top_k`` features per group with the largest magnitude of univariate weight change, by setting the ``top_k`` parameter. Doing so would reduce the complexity to ``O(num_feature*top_k)``.
|
||||
* ``thrifty``: Thrifty, approximately-greedy feature selector. Prior to cyclic updates, reorders features in descending magnitude of their univariate weight changes. This operation is multithreaded and is a linear complexity approximation of the quadratic greedy selection. It allows restricting the selection to ``top_k`` features per group with the largest magnitude of univariate weight change, by setting the ``top_k`` parameter.
|
||||
|
||||
* ``top_k`` [default=0]
|
||||
|
||||
- The number of top features to select in ``greedy`` and ``thrifty`` feature selector. The value of 0 means using all the features.
|
||||
|
||||
Parameters for Tweedie Regression (``objective=reg:tweedie``)
|
||||
=============================================================
|
||||
* ``tweedie_variance_power`` [default=1.5]
|
||||
@@ -276,7 +294,9 @@ Specify the learning task and the corresponding learning objective. The objectiv
|
||||
Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional hazard function ``h(t) = h0(t) * HR``).
|
||||
- ``multi:softmax``: set XGBoost to do multiclass classification using the softmax objective, you also need to set num_class(number of classes)
|
||||
- ``multi:softprob``: same as softmax, but output a vector of ``ndata * nclass``, which can be further reshaped to ``ndata * nclass`` matrix. The result contains predicted probability of each data point belonging to each class.
|
||||
- ``rank:pairwise``: set XGBoost to do ranking task by minimizing the pairwise loss
|
||||
- ``rank:pairwise``: Use LambdaMART to perform pairwise ranking where the pairwise loss is minimized
|
||||
- ``rank:ndcg``: Use LambdaMART to perform list-wise ranking where `Normalized Discounted Cumulative Gain (NDCG) <http://en.wikipedia.org/wiki/NDCG>`_ is maximized
|
||||
- ``rank:map``: Use LambdaMART to perform list-wise ranking where `Mean Average Precision (MAP) <http://en.wikipedia.org/wiki/Mean_average_precision#Mean_average_precision>`_ is maximized
|
||||
- ``reg:gamma``: gamma regression with log-link. Output is a mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be `gamma-distributed <https://en.wikipedia.org/wiki/Gamma_distribution#Applications>`_.
|
||||
- ``reg:tweedie``: Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be `Tweedie-distributed <https://en.wikipedia.org/wiki/Tweedie_distribution#Applications>`_.
|
||||
|
||||
@@ -299,8 +319,9 @@ Specify the learning task and the corresponding learning objective. The objectiv
|
||||
- ``merror``: Multiclass classification error rate. It is calculated as ``#(wrong cases)/#(all cases)``.
|
||||
- ``mlogloss``: `Multiclass logloss <http://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html>`_.
|
||||
- ``auc``: `Area under the curve <http://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_curve>`_
|
||||
- ``aucpr``: `Area under the PR curve <https://en.wikipedia.org/wiki/Precision_and_recall>`_
|
||||
- ``ndcg``: `Normalized Discounted Cumulative Gain <http://en.wikipedia.org/wiki/NDCG>`_
|
||||
- ``map``: `Mean average precision <http://en.wikipedia.org/wiki/Mean_average_precision#Mean_average_precision>`_
|
||||
- ``map``: `Mean Average Precision <http://en.wikipedia.org/wiki/Mean_average_precision#Mean_average_precision>`_
|
||||
- ``ndcg@n``, ``map@n``: 'n' can be assigned as an integer to cut off the top positions in the lists for evaluation.
|
||||
- ``ndcg-``, ``map-``, ``ndcg@n-``, ``map@n-``: In XGBoost, NDCG and MAP will evaluate the score of a list without any positive samples as 1. By adding "-" in the evaluation metric XGBoost will evaluate these score as 0 to be consistent under some conditions.
|
||||
- ``poisson-nloglik``: negative log-likelihood for Poisson regression
|
||||
|
||||
@@ -39,6 +39,10 @@ Scikit-Learn API
|
||||
:members:
|
||||
:inherited-members:
|
||||
:show-inheritance:
|
||||
.. autoclass:: xgboost.XGBRanker
|
||||
:members:
|
||||
:inherited-members:
|
||||
:show-inheritance:
|
||||
|
||||
Plotting API
|
||||
------------
|
||||
@@ -49,3 +53,15 @@ Plotting API
|
||||
.. autofunction:: xgboost.plot_tree
|
||||
|
||||
.. autofunction:: xgboost.to_graphviz
|
||||
|
||||
.. _callback_api:
|
||||
|
||||
Callback API
|
||||
------------
|
||||
.. autofunction:: xgboost.callback.print_evaluation
|
||||
|
||||
.. autofunction:: xgboost.callback.record_evaluation
|
||||
|
||||
.. autofunction:: xgboost.callback.reset_learning_rate
|
||||
|
||||
.. autofunction:: xgboost.callback.early_stop
|
||||
|
||||
@@ -3,3 +3,6 @@ mock
|
||||
guzzle_sphinx_theme
|
||||
breathe
|
||||
sh>=1.12.14
|
||||
matplotlib>=2.1
|
||||
graphviz
|
||||
numpy
|
||||
|
||||
177
doc/tutorials/feature_interaction_constraint.rst
Normal file
177
doc/tutorials/feature_interaction_constraint.rst
Normal file
@@ -0,0 +1,177 @@
|
||||
###############################
|
||||
Feature Interaction Constraints
|
||||
###############################
|
||||
|
||||
The decision tree is a powerful tool to discover interaction among independent
|
||||
variables (features). Variables that appear together in a traversal path
|
||||
are interacting with one another, since the condition of a child node is
|
||||
predicated on the condition of the parent node. For example, the highlighted
|
||||
red path in the diagram below contains three variables: :math:`x_1`, :math:`x_7`,
|
||||
and :math:`x_{10}`, so the highlighted prediction (at the highlighted leaf node)
|
||||
is the product of interaction between :math:`x_1`, :math:`x_7`, and
|
||||
:math:`x_{10}`.
|
||||
|
||||
.. plot::
|
||||
:nofigs:
|
||||
|
||||
from graphviz import Source
|
||||
source = r"""
|
||||
digraph feature_interaction_illustration1 {
|
||||
graph [fontname = "helvetica"];
|
||||
node [fontname = "helvetica"];
|
||||
edge [fontname = "helvetica"];
|
||||
0 [label=<x<SUB><FONT POINT-SIZE="11">10</FONT></SUB> < -1.5 ?>, shape=box, color=red, fontcolor=red];
|
||||
1 [label=<x<SUB><FONT POINT-SIZE="11">2</FONT></SUB> < 2 ?>, shape=box];
|
||||
2 [label=<x<SUB><FONT POINT-SIZE="11">7</FONT></SUB> < 0.3 ?>, shape=box, color=red, fontcolor=red];
|
||||
3 [label="...", shape=none];
|
||||
4 [label="...", shape=none];
|
||||
5 [label=<x<SUB><FONT POINT-SIZE="11">1</FONT></SUB> < 0.5 ?>, shape=box, color=red, fontcolor=red];
|
||||
6 [label="...", shape=none];
|
||||
7 [label="...", shape=none];
|
||||
8 [label="Predict +1.3", color=red, fontcolor=red];
|
||||
0 -> 1 [labeldistance=2.0, labelangle=45, headlabel="Yes/Missing "];
|
||||
0 -> 2 [labeldistance=2.0, labelangle=-45,
|
||||
headlabel="No", color=red, fontcolor=red];
|
||||
1 -> 3 [labeldistance=2.0, labelangle=45, headlabel="Yes"];
|
||||
1 -> 4 [labeldistance=2.0, labelangle=-45, headlabel=" No/Missing"];
|
||||
2 -> 5 [labeldistance=2.0, labelangle=-45, headlabel="Yes",
|
||||
color=red, fontcolor=red];
|
||||
2 -> 6 [labeldistance=2.0, labelangle=-45, headlabel=" No/Missing"];
|
||||
5 -> 7;
|
||||
5 -> 8 [color=red];
|
||||
}
|
||||
"""
|
||||
Source(source, format='png').render('../_static/feature_interaction_illustration1', view=False)
|
||||
Source(source, format='svg').render('../_static/feature_interaction_illustration1', view=False)
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<p>
|
||||
<img src="../_static/feature_interaction_illustration1.svg"
|
||||
onerror="this.src='../_static/feature_interaction_illustration1.png'; this.onerror=null;">
|
||||
</p>
|
||||
|
||||
When the tree depth is larger than one, many variables interact on
|
||||
the sole basis of minimizing training loss, and the resulting decision tree may
|
||||
capture a spurious relationship (noise) rather than a legitimate relationship
|
||||
that generalizes across different datasets. **Feature interaction constraints**
|
||||
allow users to decide which variables are allowed to interact and which are not.
|
||||
|
||||
Potential benefits include:
|
||||
|
||||
* Better predictive performance from focusing on interactions that work --
|
||||
whether through domain specific knowledge or algorithms that rank interactions
|
||||
* Less noise in predictions; better generalization
|
||||
* More control to the user on what the model can fit. For example, the user may
|
||||
want to exclude some interactions even if they perform well due to regulatory
|
||||
constraints
|
||||
|
||||
****************
|
||||
A Simple Example
|
||||
****************
|
||||
|
||||
Feature interaction constraints are expressed in terms of groups of variables
|
||||
that are allowed to interact. For example, the constraint
|
||||
``[0, 1]`` indicates that variables :math:`x_0` and :math:`x_1` are allowed to
|
||||
interact with each other but with no other variable. Similarly, ``[2, 3, 4]``
|
||||
indicates that :math:`x_2`, :math:`x_3`, and :math:`x_4` are allowed to
|
||||
interact with one another but with no other variable. A set of feature
|
||||
interaction constraints is expressed as a nested list, e.g.
|
||||
``[[0, 1], [2, 3, 4]]``, where each inner list is a group of indices of features
|
||||
that are allowed to interact with each other.
|
||||
|
||||
In the following diagram, the left decision tree is in violation of the first
|
||||
constraint (``[0, 1]``), whereas the right decision tree complies with both the
|
||||
first and second constraints (``[0, 1]``, ``[2, 3, 4]``).
|
||||
|
||||
.. plot::
|
||||
:nofigs:
|
||||
|
||||
from graphviz import Source
|
||||
source = r"""
|
||||
digraph feature_interaction_illustration2 {
|
||||
graph [fontname = "helvetica"];
|
||||
node [fontname = "helvetica"];
|
||||
edge [fontname = "helvetica"];
|
||||
0 [label=<x<SUB><FONT POINT-SIZE="11">0</FONT></SUB> < 5.0 ?>, shape=box];
|
||||
1 [label=<x<SUB><FONT POINT-SIZE="11">2</FONT></SUB> < -3.0 ?>, shape=box];
|
||||
2 [label="+0.6"];
|
||||
3 [label="-0.4"];
|
||||
4 [label="+1.2"];
|
||||
0 -> 1 [labeldistance=2.0, labelangle=45, headlabel="Yes/Missing "];
|
||||
0 -> 2 [labeldistance=2.0, labelangle=-45, headlabel="No"];
|
||||
1 -> 3 [labeldistance=2.0, labelangle=45, headlabel="Yes"];
|
||||
1 -> 4 [labeldistance=2.0, labelangle=-45, headlabel=" No/Missing"];
|
||||
}
|
||||
"""
|
||||
Source(source, format='png').render('../_static/feature_interaction_illustration2', view=False)
|
||||
Source(source, format='svg').render('../_static/feature_interaction_illustration2', view=False)
|
||||
|
||||
.. plot::
|
||||
:nofigs:
|
||||
|
||||
from graphviz import Source
|
||||
source = r"""
|
||||
digraph feature_interaction_illustration3 {
|
||||
graph [fontname = "helvetica"];
|
||||
node [fontname = "helvetica"];
|
||||
edge [fontname = "helvetica"];
|
||||
0 [label=<x<SUB><FONT POINT-SIZE="11">3</FONT></SUB> < 2.5 ?>, shape=box];
|
||||
1 [label="+1.6"];
|
||||
2 [label=<x<SUB><FONT POINT-SIZE="11">2</FONT></SUB> < -1.2 ?>, shape=box];
|
||||
3 [label="+0.1"];
|
||||
4 [label="-0.3"];
|
||||
0 -> 1 [labeldistance=2.0, labelangle=45, headlabel="Yes"];
|
||||
0 -> 2 [labeldistance=2.0, labelangle=-45, headlabel=" No/Missing"];
|
||||
2 -> 3 [labeldistance=2.0, labelangle=45, headlabel="Yes/Missing "];
|
||||
2 -> 4 [labeldistance=2.0, labelangle=-45, headlabel="No"];
|
||||
}
|
||||
"""
|
||||
Source(source, format='png').render('../_static/feature_interaction_illustration3', view=False)
|
||||
Source(source, format='svg').render('../_static/feature_interaction_illustration3', view=False)
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<p>
|
||||
<img src="../_static/feature_interaction_illustration2.svg"
|
||||
onerror="this.src='../_static/feature_interaction_illustration2.png'; this.onerror=null;">
|
||||
<img src="../_static/feature_interaction_illustration3.svg"
|
||||
onerror="this.src='../_static/feature_interaction_illustration3.png'; this.onerror=null;">
|
||||
</p>
|
||||
|
||||
****************************************************
|
||||
Enforcing Feature Interaction Constraints in XGBoost
|
||||
****************************************************
|
||||
|
||||
It is very simple to enforce monotonicity constraints in XGBoost. Here we will
|
||||
give an example using Python, but the same general idea generalizes to other
|
||||
platforms.
|
||||
|
||||
Suppose the following code fits your model without monotonicity constraints:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
model_no_constraints = xgb.train(params, dtrain,
|
||||
num_boost_round = 1000, evals = evallist,
|
||||
early_stopping_rounds = 10)
|
||||
|
||||
Then fitting with monotonicity constraints only requires adding a single
|
||||
parameter:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
params_constrained = params.copy()
|
||||
# Use nested list to define feature interaction constraints
|
||||
params_constrained['interaction_constraints'] = '[[0, 2], [1, 3, 4], [5, 6]]'
|
||||
# Features 0 and 2 are allowed to interact with each other but with no other feature
|
||||
# Features 1, 3, 4 are allowed to interact with one another but with no other feature
|
||||
# Features 5 and 6 are allowed to interact with each other but with no other feature
|
||||
|
||||
model_with_constraints = xgb.train(params_constrained, dtrain,
|
||||
num_boost_round = 1000, evals = evallist,
|
||||
early_stopping_rounds = 10)
|
||||
|
||||
**Choice of tree construction algorithm**. To use feature interaction
|
||||
constraints, be sure to set the ``tree_method`` parameter to either ``exact``
|
||||
or ``hist``. Currently, GPU algorithms (``gpu_hist``, ``gpu_exact``) do not
|
||||
support feature interaction constraints.
|
||||
@@ -14,6 +14,7 @@ See `Awesome XGBoost <https://github.com/dmlc/xgboost/tree/master/demo>`_ for mo
|
||||
Distributed XGBoost with XGBoost4J-Spark <https://xgboost.readthedocs.io/en/latest/jvm/xgboost4j_spark_tutorial.html>
|
||||
dart
|
||||
monotonic
|
||||
feature_interaction_constraint
|
||||
input_format
|
||||
param_tuning
|
||||
external_memory
|
||||
|
||||
@@ -82,7 +82,7 @@ Some other examples:
|
||||
- ``(1,0)``: An increasing constraint on the first predictor and no constraint on the second.
|
||||
- ``(0,-1)``: No constraint on the first predictor and a decreasing constraint on the second.
|
||||
|
||||
**Choise of tree construction algorithm**. To use monotonic constraints, be
|
||||
**Choice of tree construction algorithm**. To use monotonic constraints, be
|
||||
sure to set the ``tree_method`` parameter to one of ``exact``, ``hist``, and
|
||||
``gpu_hist``.
|
||||
|
||||
|
||||
@@ -12,9 +12,14 @@
|
||||
#include <cstring>
|
||||
#include <memory>
|
||||
#include <numeric>
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "./base.h"
|
||||
#include "../../src/common/span.h"
|
||||
#include "../../src/common/group_data.h"
|
||||
|
||||
#include "../../src/common/host_device_vector.h"
|
||||
|
||||
namespace xgboost {
|
||||
// forward declare learner.
|
||||
@@ -40,7 +45,7 @@ class MetaInfo {
|
||||
/*! \brief number of nonzero entries in the data */
|
||||
uint64_t num_nonzero_{0};
|
||||
/*! \brief label of each instance */
|
||||
std::vector<bst_float> labels_;
|
||||
HostDeviceVector<bst_float> labels_;
|
||||
/*!
|
||||
* \brief specified root index of each instance,
|
||||
* can be used for multi task setting
|
||||
@@ -52,7 +57,7 @@ class MetaInfo {
|
||||
*/
|
||||
std::vector<bst_uint> group_ptr_;
|
||||
/*! \brief weights of each instance, optional */
|
||||
std::vector<bst_float> weights_;
|
||||
HostDeviceVector<bst_float> weights_;
|
||||
/*! \brief session-id of each instance, optional */
|
||||
std::vector<uint64_t> qids_;
|
||||
/*!
|
||||
@@ -60,7 +65,7 @@ class MetaInfo {
|
||||
* if specified, xgboost will start from this init margin
|
||||
* can be used to specify initial prediction to boost from.
|
||||
*/
|
||||
std::vector<bst_float> base_margin_;
|
||||
HostDeviceVector<bst_float> base_margin_;
|
||||
/*! \brief version flag, used to check version of this info */
|
||||
static const int kVersion = 2;
|
||||
/*! \brief version that introduced qid field */
|
||||
@@ -73,7 +78,7 @@ class MetaInfo {
|
||||
* \return The weight.
|
||||
*/
|
||||
inline bst_float GetWeight(size_t i) const {
|
||||
return weights_.size() != 0 ? weights_[i] : 1.0f;
|
||||
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
|
||||
}
|
||||
/*!
|
||||
* \brief Get the root index of i-th instance.
|
||||
@@ -85,12 +90,12 @@ class MetaInfo {
|
||||
}
|
||||
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
|
||||
inline const std::vector<size_t>& LabelAbsSort() const {
|
||||
if (label_order_cache_.size() == labels_.size()) {
|
||||
if (label_order_cache_.size() == labels_.Size()) {
|
||||
return label_order_cache_;
|
||||
}
|
||||
label_order_cache_.resize(labels_.size());
|
||||
label_order_cache_.resize(labels_.Size());
|
||||
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
|
||||
const auto l = labels_;
|
||||
const auto& l = labels_.HostVector();
|
||||
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
|
||||
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
|
||||
|
||||
@@ -133,7 +138,7 @@ struct Entry {
|
||||
/*!
|
||||
* \brief constructor with index and value
|
||||
* \param index The feature or row index.
|
||||
* \param fvalue THe feature value.
|
||||
* \param fvalue The feature value.
|
||||
*/
|
||||
Entry(bst_uint index, bst_float fvalue) : index(index), fvalue(fvalue) {}
|
||||
/*! \brief reversely compare feature values */
|
||||
@@ -146,33 +151,26 @@ struct Entry {
|
||||
};
|
||||
|
||||
/*!
|
||||
* \brief in-memory storage unit of sparse batch
|
||||
* \brief In-memory storage unit of sparse batch, stored in CSR format.
|
||||
*/
|
||||
class SparsePage {
|
||||
public:
|
||||
std::vector<size_t> offset;
|
||||
// Offset for each row.
|
||||
HostDeviceVector<size_t> offset;
|
||||
/*! \brief the data of the segments */
|
||||
std::vector<Entry> data;
|
||||
HostDeviceVector<Entry> data;
|
||||
|
||||
size_t base_rowid;
|
||||
|
||||
/*! \brief an instance of sparse vector in the batch */
|
||||
struct Inst {
|
||||
/*! \brief pointer to the elements*/
|
||||
const Entry *data{nullptr};
|
||||
/*! \brief length of the instance */
|
||||
bst_uint length{0};
|
||||
/*! \brief constructor */
|
||||
Inst() = default;
|
||||
Inst(const Entry *data, bst_uint length) : data(data), length(length) {}
|
||||
/*! \brief get i-th pair in the sparse vector*/
|
||||
inline const Entry& operator[](size_t i) const {
|
||||
return data[i];
|
||||
}
|
||||
};
|
||||
using Inst = common::Span<Entry const>;
|
||||
|
||||
/*! \brief get i-th row from the batch */
|
||||
inline Inst operator[](size_t i) const {
|
||||
return {data.data() + offset[i], static_cast<bst_uint>(offset[i + 1] - offset[i])};
|
||||
const auto& data_vec = data.HostVector();
|
||||
const auto& offset_vec = offset.HostVector();
|
||||
return {data_vec.data() + offset_vec[i],
|
||||
static_cast<Inst::index_type>(offset_vec[i + 1] - offset_vec[i])};
|
||||
}
|
||||
|
||||
/*! \brief constructor */
|
||||
@@ -181,18 +179,62 @@ class SparsePage {
|
||||
}
|
||||
/*! \return number of instance in the page */
|
||||
inline size_t Size() const {
|
||||
return offset.size() - 1;
|
||||
return offset.Size() - 1;
|
||||
}
|
||||
/*! \return estimation of memory cost of this page */
|
||||
inline size_t MemCostBytes() const {
|
||||
return offset.size() * sizeof(size_t) + data.size() * sizeof(Entry);
|
||||
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
|
||||
}
|
||||
/*! \brief clear the page */
|
||||
inline void Clear() {
|
||||
base_rowid = 0;
|
||||
offset.clear();
|
||||
offset.push_back(0);
|
||||
data.clear();
|
||||
auto& offset_vec = offset.HostVector();
|
||||
offset_vec.clear();
|
||||
offset_vec.push_back(0);
|
||||
data.HostVector().clear();
|
||||
}
|
||||
|
||||
SparsePage GetTranspose(int num_columns) const {
|
||||
SparsePage transpose;
|
||||
common::ParallelGroupBuilder<Entry> builder(&transpose.offset.HostVector(),
|
||||
&transpose.data.HostVector());
|
||||
const int nthread = omp_get_max_threads();
|
||||
builder.InitBudget(num_columns, nthread);
|
||||
long batch_size = static_cast<long>(this->Size()); // NOLINT(*)
|
||||
#pragma omp parallel for schedule(static)
|
||||
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
|
||||
int tid = omp_get_thread_num();
|
||||
auto inst = (*this)[i];
|
||||
for (bst_uint j = 0; j < inst.size(); ++j) {
|
||||
builder.AddBudget(inst[j].index, tid);
|
||||
}
|
||||
}
|
||||
builder.InitStorage();
|
||||
#pragma omp parallel for schedule(static)
|
||||
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
|
||||
int tid = omp_get_thread_num();
|
||||
auto inst = (*this)[i];
|
||||
for (bst_uint j = 0; j < inst.size(); ++j) {
|
||||
builder.Push(
|
||||
inst[j].index,
|
||||
Entry(static_cast<bst_uint>(this->base_rowid + i), inst[j].fvalue),
|
||||
tid);
|
||||
}
|
||||
}
|
||||
return transpose;
|
||||
}
|
||||
|
||||
void SortRows() {
|
||||
auto ncol = static_cast<bst_omp_uint>(this->Size());
|
||||
#pragma omp parallel for schedule(dynamic, 1)
|
||||
for (bst_omp_uint i = 0; i < ncol; ++i) {
|
||||
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
|
||||
std::sort(
|
||||
this->data.HostVector().begin() + this->offset.HostVector()[i],
|
||||
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
|
||||
Entry::CmpValue);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
@@ -200,33 +242,39 @@ class SparsePage {
|
||||
* \param batch the row batch.
|
||||
*/
|
||||
inline void Push(const dmlc::RowBlock<uint32_t>& batch) {
|
||||
data.reserve(data.size() + batch.offset[batch.size] - batch.offset[0]);
|
||||
offset.reserve(offset.size() + batch.size);
|
||||
auto& data_vec = data.HostVector();
|
||||
auto& offset_vec = offset.HostVector();
|
||||
data_vec.reserve(data.Size() + batch.offset[batch.size] - batch.offset[0]);
|
||||
offset_vec.reserve(offset.Size() + batch.size);
|
||||
CHECK(batch.index != nullptr);
|
||||
for (size_t i = 0; i < batch.size; ++i) {
|
||||
offset.push_back(offset.back() + batch.offset[i + 1] - batch.offset[i]);
|
||||
offset_vec.push_back(offset_vec.back() + batch.offset[i + 1] - batch.offset[i]);
|
||||
}
|
||||
for (size_t i = batch.offset[0]; i < batch.offset[batch.size]; ++i) {
|
||||
uint32_t index = batch.index[i];
|
||||
bst_float fvalue = batch.value == nullptr ? 1.0f : batch.value[i];
|
||||
data.emplace_back(index, fvalue);
|
||||
data_vec.emplace_back(index, fvalue);
|
||||
}
|
||||
CHECK_EQ(offset.back(), data.size());
|
||||
CHECK_EQ(offset_vec.back(), data.Size());
|
||||
}
|
||||
/*!
|
||||
* \brief Push a sparse page
|
||||
* \param batch the row page
|
||||
*/
|
||||
inline void Push(const SparsePage &batch) {
|
||||
size_t top = offset.back();
|
||||
data.resize(top + batch.data.size());
|
||||
std::memcpy(dmlc::BeginPtr(data) + top,
|
||||
dmlc::BeginPtr(batch.data),
|
||||
sizeof(Entry) * batch.data.size());
|
||||
size_t begin = offset.size();
|
||||
offset.resize(begin + batch.Size());
|
||||
auto& data_vec = data.HostVector();
|
||||
auto& offset_vec = offset.HostVector();
|
||||
const auto& batch_offset_vec = batch.offset.HostVector();
|
||||
const auto& batch_data_vec = batch.data.HostVector();
|
||||
size_t top = offset_vec.back();
|
||||
data_vec.resize(top + batch.data.Size());
|
||||
std::memcpy(dmlc::BeginPtr(data_vec) + top,
|
||||
dmlc::BeginPtr(batch_data_vec),
|
||||
sizeof(Entry) * batch.data.Size());
|
||||
size_t begin = offset.Size();
|
||||
offset_vec.resize(begin + batch.Size());
|
||||
for (size_t i = 0; i < batch.Size(); ++i) {
|
||||
offset[i + begin] = top + batch.offset[i + 1];
|
||||
offset_vec[i + begin] = top + batch_offset_vec[i + 1];
|
||||
}
|
||||
}
|
||||
/*!
|
||||
@@ -234,19 +282,76 @@ class SparsePage {
|
||||
* \param inst an instance row
|
||||
*/
|
||||
inline void Push(const Inst &inst) {
|
||||
offset.push_back(offset.back() + inst.length);
|
||||
size_t begin = data.size();
|
||||
data.resize(begin + inst.length);
|
||||
if (inst.length != 0) {
|
||||
std::memcpy(dmlc::BeginPtr(data) + begin, inst.data,
|
||||
sizeof(Entry) * inst.length);
|
||||
auto& data_vec = data.HostVector();
|
||||
auto& offset_vec = offset.HostVector();
|
||||
offset_vec.push_back(offset_vec.back() + inst.size());
|
||||
|
||||
size_t begin = data_vec.size();
|
||||
data_vec.resize(begin + inst.size());
|
||||
if (inst.size() != 0) {
|
||||
std::memcpy(dmlc::BeginPtr(data_vec) + begin, inst.data(),
|
||||
sizeof(Entry) * inst.size());
|
||||
}
|
||||
}
|
||||
|
||||
size_t Size() { return offset.size() - 1; }
|
||||
size_t Size() { return offset.Size() - 1; }
|
||||
};
|
||||
|
||||
class BatchIteratorImpl {
|
||||
public:
|
||||
virtual ~BatchIteratorImpl() {}
|
||||
virtual BatchIteratorImpl* Clone() = 0;
|
||||
virtual const SparsePage& operator*() const = 0;
|
||||
virtual void operator++() = 0;
|
||||
virtual bool AtEnd() const = 0;
|
||||
};
|
||||
|
||||
class BatchIterator {
|
||||
public:
|
||||
using iterator_category = std::forward_iterator_tag;
|
||||
explicit BatchIterator(BatchIteratorImpl* impl) { impl_.reset(impl); }
|
||||
|
||||
BatchIterator(const BatchIterator& other) {
|
||||
if (other.impl_) {
|
||||
impl_.reset(other.impl_->Clone());
|
||||
} else {
|
||||
impl_.reset();
|
||||
}
|
||||
}
|
||||
|
||||
void operator++() {
|
||||
CHECK(impl_ != nullptr);
|
||||
++(*impl_);
|
||||
}
|
||||
|
||||
const SparsePage& operator*() const {
|
||||
CHECK(impl_ != nullptr);
|
||||
return *(*impl_);
|
||||
}
|
||||
|
||||
bool operator!=(const BatchIterator& rhs) const {
|
||||
CHECK(impl_ != nullptr);
|
||||
return !impl_->AtEnd();
|
||||
}
|
||||
|
||||
bool AtEnd() const {
|
||||
CHECK(impl_ != nullptr);
|
||||
return impl_->AtEnd();
|
||||
}
|
||||
|
||||
private:
|
||||
std::unique_ptr<BatchIteratorImpl> impl_;
|
||||
};
|
||||
|
||||
class BatchSet {
|
||||
public:
|
||||
explicit BatchSet(BatchIterator begin_iter) : begin_iter_(begin_iter) {}
|
||||
BatchIterator begin() { return begin_iter_; }
|
||||
BatchIterator end() { return BatchIterator(nullptr); }
|
||||
|
||||
private:
|
||||
BatchIterator begin_iter_;
|
||||
};
|
||||
|
||||
/*!
|
||||
* \brief This is data structure that user can pass to DMatrix::Create
|
||||
@@ -317,32 +422,17 @@ class DMatrix {
|
||||
virtual MetaInfo& Info() = 0;
|
||||
/*! \brief meta information of the dataset */
|
||||
virtual const MetaInfo& Info() const = 0;
|
||||
/*!
|
||||
* \brief get the row iterator, reset to beginning position
|
||||
* \note Only either RowIterator or column Iterator can be active.
|
||||
/**
|
||||
* \brief Gets row batches. Use range based for loop over BatchSet to access individual batches.
|
||||
*/
|
||||
virtual dmlc::DataIter<SparsePage>* RowIterator() = 0;
|
||||
/*!\brief get column iterator, reset to the beginning position */
|
||||
virtual dmlc::DataIter<SparsePage>* ColIterator() = 0;
|
||||
/*!
|
||||
* \brief check if column access is supported, if not, initialize column access.
|
||||
* \param max_row_perbatch auxiliary information, maximum row used in each column batch.
|
||||
* this is a hint information that can be ignored by the implementation.
|
||||
* \param sorted If column features should be in sorted order
|
||||
* \return Number of column blocks in the column access.
|
||||
*/
|
||||
virtual void InitColAccess(size_t max_row_perbatch, bool sorted) = 0;
|
||||
virtual BatchSet GetRowBatches() = 0;
|
||||
virtual BatchSet GetSortedColumnBatches() = 0;
|
||||
virtual BatchSet GetColumnBatches() = 0;
|
||||
// the following are column meta data, should be able to answer them fast.
|
||||
/*! \return whether column access is enabled */
|
||||
virtual bool HaveColAccess(bool sorted) const = 0;
|
||||
/*! \return Whether the data columns single column block. */
|
||||
virtual bool SingleColBlock() const = 0;
|
||||
/*! \brief get number of non-missing entries in column */
|
||||
virtual size_t GetColSize(size_t cidx) const = 0;
|
||||
/*! \brief get column density */
|
||||
virtual float GetColDensity(size_t cidx) const = 0;
|
||||
/*! \return reference of buffered rowset, in column access */
|
||||
virtual const RowSet& BufferedRowset() const = 0;
|
||||
virtual float GetColDensity(size_t cidx) = 0;
|
||||
/*! \brief virtual destructor */
|
||||
virtual ~DMatrix() = default;
|
||||
/*!
|
||||
@@ -389,12 +479,6 @@ class DMatrix {
|
||||
*/
|
||||
static DMatrix* Create(dmlc::Parser<uint32_t>* parser,
|
||||
const std::string& cache_prefix = "");
|
||||
|
||||
private:
|
||||
// allow learner class to access this field.
|
||||
friend class LearnerImpl;
|
||||
/*! \brief public field to back ref cached matrix. */
|
||||
LearnerImpl* cache_learner_ptr_{nullptr};
|
||||
};
|
||||
|
||||
// implementation of inline functions
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
#include <rabit/rabit.h>
|
||||
#include <utility>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "./base.h"
|
||||
@@ -178,6 +179,12 @@ class Learner : public rabit::Serializable {
|
||||
*/
|
||||
static Learner* Create(const std::vector<std::shared_ptr<DMatrix> >& cache_data);
|
||||
|
||||
/*!
|
||||
* \brief Get configuration arguments currently stored by the learner
|
||||
* \return Key-value pairs representing configuration arguments
|
||||
*/
|
||||
virtual const std::map<std::string, std::string>& GetConfigurationArguments() const = 0;
|
||||
|
||||
protected:
|
||||
/*! \brief internal base score of the model */
|
||||
bst_float base_score_;
|
||||
|
||||
@@ -44,7 +44,7 @@ class ObjFunction {
|
||||
* \param iteration current iteration number.
|
||||
* \param out_gpair output of get gradient, saves gradient and second order gradient in
|
||||
*/
|
||||
virtual void GetGradient(HostDeviceVector<bst_float>* preds,
|
||||
virtual void GetGradient(const HostDeviceVector<bst_float>& preds,
|
||||
const MetaInfo& info,
|
||||
int iteration,
|
||||
HostDeviceVector<GradientPair>* out_gpair) = 0;
|
||||
|
||||
@@ -574,14 +574,14 @@ inline void RegTree::FVec::Init(size_t size) {
|
||||
}
|
||||
|
||||
inline void RegTree::FVec::Fill(const SparsePage::Inst& inst) {
|
||||
for (bst_uint i = 0; i < inst.length; ++i) {
|
||||
for (bst_uint i = 0; i < inst.size(); ++i) {
|
||||
if (inst[i].index >= data_.size()) continue;
|
||||
data_[inst[i].index].fvalue = inst[i].fvalue;
|
||||
}
|
||||
}
|
||||
|
||||
inline void RegTree::FVec::Drop(const SparsePage::Inst& inst) {
|
||||
for (bst_uint i = 0; i < inst.length; ++i) {
|
||||
for (bst_uint i = 0; i < inst.size(); ++i) {
|
||||
if (inst[i].index >= data_.size()) continue;
|
||||
data_[inst[i].index].flag = -1;
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost-jvm</artifactId>
|
||||
<version>0.80</version>
|
||||
<version>0.81</version>
|
||||
<packaging>pom</packaging>
|
||||
<name>XGBoost JVM Package</name>
|
||||
<description>JVM Package for XGBoost</description>
|
||||
@@ -23,14 +23,19 @@
|
||||
<email>codingcat@apache.org</email>
|
||||
</developer>
|
||||
</developers>
|
||||
<scm>
|
||||
<connection>scm:git:git:/github.com/dmlc/xgboost.git</connection>
|
||||
<developerConnection>scm:git:ssh://github.com/dmlc/xgboost.git</developerConnection>
|
||||
<url>https://github.com/dmlc/xgboost</url>
|
||||
</scm>
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
|
||||
<maven.compiler.source>1.7</maven.compiler.source>
|
||||
<maven.compiler.target>1.7</maven.compiler.target>
|
||||
<flink.version>0.10.2</flink.version>
|
||||
<spark.version>2.3.0</spark.version>
|
||||
<scala.version>2.11.8</scala.version>
|
||||
<flink.version>1.5.0</flink.version>
|
||||
<spark.version>2.3.1</spark.version>
|
||||
<scala.version>2.11.12</scala.version>
|
||||
<scala.binary.version>2.11</scala.binary.version>
|
||||
</properties>
|
||||
<repositories>
|
||||
@@ -314,6 +319,7 @@
|
||||
<version>2.19.1</version>
|
||||
<configuration>
|
||||
<skipTests>false</skipTests>
|
||||
<useSystemClassLoader>false</useSystemClassLoader>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
|
||||
@@ -6,10 +6,10 @@
|
||||
<parent>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost-jvm</artifactId>
|
||||
<version>0.80</version>
|
||||
<version>0.81</version>
|
||||
</parent>
|
||||
<artifactId>xgboost4j-example</artifactId>
|
||||
<version>0.80</version>
|
||||
<version>0.81</version>
|
||||
<packaging>jar</packaging>
|
||||
<build>
|
||||
<plugins>
|
||||
@@ -26,7 +26,7 @@
|
||||
<dependency>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost4j-spark</artifactId>
|
||||
<version>0.80</version>
|
||||
<version>0.81</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.spark</groupId>
|
||||
@@ -37,7 +37,7 @@
|
||||
<dependency>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost4j-flink</artifactId>
|
||||
<version>0.80</version>
|
||||
<version>0.81</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
|
||||
@@ -31,7 +31,7 @@ object SparkMLlibPipeline {
|
||||
|
||||
def main(args: Array[String]): Unit = {
|
||||
|
||||
if (args.length != 1) {
|
||||
if (args.length != 3) {
|
||||
println("Usage: SparkMLlibPipeline input_path native_model_path pipeline_model_path")
|
||||
sys.exit(1)
|
||||
}
|
||||
@@ -79,6 +79,8 @@ object SparkMLlibPipeline {
|
||||
"num_workers" -> 2
|
||||
)
|
||||
)
|
||||
booster.setFeaturesCol("features")
|
||||
booster.setLabelCol("classIndex")
|
||||
val labelConverter = new IndexToString()
|
||||
.setInputCol("prediction")
|
||||
.setOutputCol("realLabel")
|
||||
@@ -94,6 +96,8 @@ object SparkMLlibPipeline {
|
||||
|
||||
// Model evaluation
|
||||
val evaluator = new MulticlassClassificationEvaluator()
|
||||
evaluator.setLabelCol("classIndex")
|
||||
evaluator.setPredictionCol("prediction")
|
||||
val accuracy = evaluator.evaluate(prediction)
|
||||
println("The model accuracy is : " + accuracy)
|
||||
|
||||
|
||||
@@ -6,10 +6,10 @@
|
||||
<parent>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost-jvm</artifactId>
|
||||
<version>0.80</version>
|
||||
<version>0.81</version>
|
||||
</parent>
|
||||
<artifactId>xgboost4j-flink</artifactId>
|
||||
<version>0.80</version>
|
||||
<version>0.81</version>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
@@ -26,7 +26,7 @@
|
||||
<dependency>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost4j</artifactId>
|
||||
<version>0.80</version>
|
||||
<version>0.81</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
@@ -48,6 +48,11 @@
|
||||
<artifactId>flink-ml_${scala.binary.version}</artifactId>
|
||||
<version>${flink.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-common</artifactId>
|
||||
<version>2.7.3</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
</project>
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
<parent>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost-jvm</artifactId>
|
||||
<version>0.80</version>
|
||||
<version>0.81</version>
|
||||
</parent>
|
||||
<artifactId>xgboost4j-spark</artifactId>
|
||||
<build>
|
||||
@@ -24,7 +24,7 @@
|
||||
<dependency>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost4j</artifactId>
|
||||
<version>0.80</version>
|
||||
<version>0.81</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.spark</groupId>
|
||||
|
||||
@@ -63,8 +63,9 @@ private[spark] class CheckpointManager(sc: SparkContext, checkpointPath: String)
|
||||
if (versions.nonEmpty) {
|
||||
val version = versions.max
|
||||
val fullPath = getPath(version)
|
||||
val inputStream = FileSystem.get(sc.hadoopConfiguration).open(new Path(fullPath))
|
||||
logger.info(s"Start training from previous booster at $fullPath")
|
||||
val booster = SXGBoost.loadModel(fullPath)
|
||||
val booster = SXGBoost.loadModel(inputStream)
|
||||
booster.booster.setVersion(version)
|
||||
booster
|
||||
} else {
|
||||
@@ -81,8 +82,9 @@ private[spark] class CheckpointManager(sc: SparkContext, checkpointPath: String)
|
||||
val fs = FileSystem.get(sc.hadoopConfiguration)
|
||||
val prevModelPaths = getExistingVersions.map(version => new Path(getPath(version)))
|
||||
val fullPath = getPath(checkpoint.getVersion)
|
||||
val outputStream = fs.create(new Path(fullPath), true)
|
||||
logger.info(s"Saving checkpoint model with version ${checkpoint.getVersion} to $fullPath")
|
||||
checkpoint.saveModel(fullPath)
|
||||
checkpoint.saveModel(outputStream)
|
||||
prevModelPaths.foreach(path => fs.delete(path, true))
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ package ml.dmlc.xgboost4j.scala.spark
|
||||
import java.io.File
|
||||
import java.nio.file.Files
|
||||
|
||||
import scala.collection.mutable
|
||||
import scala.collection.{AbstractIterator, mutable}
|
||||
import scala.util.Random
|
||||
|
||||
import ml.dmlc.xgboost4j.java.{IRabitTracker, Rabit, XGBoostError, RabitTracker => PyRabitTracker}
|
||||
@@ -31,6 +31,7 @@ import org.apache.commons.io.FileUtils
|
||||
import org.apache.commons.logging.LogFactory
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.apache.spark.{SparkContext, SparkParallelismTracker, TaskContext}
|
||||
import org.apache.spark.sql.SparkSession
|
||||
|
||||
|
||||
/**
|
||||
@@ -52,6 +53,17 @@ object TrackerConf {
|
||||
def apply(): TrackerConf = TrackerConf(0L, "python")
|
||||
}
|
||||
|
||||
/**
|
||||
* Traing data group in a RDD partition.
|
||||
* @param groupId The group id
|
||||
* @param points Array of XGBLabeledPoint within the same group.
|
||||
* @param isEdgeGroup whether it is a frist or last group in a RDD partition.
|
||||
*/
|
||||
private[spark] case class XGBLabeledPointGroup(
|
||||
groupId: Int,
|
||||
points: Array[XGBLabeledPoint],
|
||||
isEdgeGroup: Boolean)
|
||||
|
||||
object XGBoost extends Serializable {
|
||||
private val logger = LogFactory.getLog("XGBoostSpark")
|
||||
|
||||
@@ -73,78 +85,67 @@ object XGBoost extends Serializable {
|
||||
}
|
||||
}
|
||||
|
||||
private def fromBaseMarginsToArray(baseMargins: Iterator[Float]): Option[Array[Float]] = {
|
||||
val builder = new mutable.ArrayBuilder.ofFloat()
|
||||
var nTotal = 0
|
||||
var nUndefined = 0
|
||||
while (baseMargins.hasNext) {
|
||||
nTotal += 1
|
||||
val baseMargin = baseMargins.next()
|
||||
if (baseMargin.isNaN) {
|
||||
nUndefined += 1 // don't waste space for all-NaNs.
|
||||
} else {
|
||||
builder += baseMargin
|
||||
private def removeMissingValuesWithGroup(
|
||||
xgbLabelPointGroups: Iterator[Array[XGBLabeledPoint]],
|
||||
missing: Float): Iterator[Array[XGBLabeledPoint]] = {
|
||||
if (!missing.isNaN) {
|
||||
xgbLabelPointGroups.map {
|
||||
labeledPoints => XGBoost.removeMissingValues(labeledPoints.iterator, missing).toArray
|
||||
}
|
||||
}
|
||||
if (nUndefined == nTotal) {
|
||||
None
|
||||
} else if (nUndefined == 0) {
|
||||
Some(builder.result())
|
||||
} else {
|
||||
throw new IllegalArgumentException(
|
||||
s"Encountered a partition with $nUndefined NaN base margin values. " +
|
||||
s"If you want to specify base margin, ensure all values are non-NaN.")
|
||||
xgbLabelPointGroups
|
||||
}
|
||||
}
|
||||
|
||||
private[spark] def buildDistributedBoosters(
|
||||
data: RDD[XGBLabeledPoint],
|
||||
private def getCacheDirName(useExternalMemory: Boolean): Option[String] = {
|
||||
val taskId = TaskContext.getPartitionId().toString
|
||||
if (useExternalMemory) {
|
||||
val dir = Files.createTempDirectory(s"${TaskContext.get().stageId()}-cache-$taskId")
|
||||
Some(dir.toAbsolutePath.toString)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
private def buildDistributedBooster(
|
||||
watches: Watches,
|
||||
params: Map[String, Any],
|
||||
rabitEnv: java.util.Map[String, String],
|
||||
round: Int,
|
||||
obj: ObjectiveTrait,
|
||||
eval: EvalTrait,
|
||||
useExternalMemory: Boolean,
|
||||
missing: Float,
|
||||
prevBooster: Booster
|
||||
): RDD[(Booster, Map[String, Array[Float]])] = {
|
||||
prevBooster: Booster)
|
||||
: Iterator[(Booster, Map[String, Array[Float]])] = {
|
||||
|
||||
val partitionedBaseMargin = data.map(_.baseMargin)
|
||||
// to workaround the empty partitions in training dataset,
|
||||
// this might not be the best efficient implementation, see
|
||||
// (https://github.com/dmlc/xgboost/issues/1277)
|
||||
data.zipPartitions(partitionedBaseMargin) { (labeledPoints, baseMargins) =>
|
||||
if (labeledPoints.isEmpty) {
|
||||
throw new XGBoostError(
|
||||
s"detected an empty partition in the training data, partition ID:" +
|
||||
s" ${TaskContext.getPartitionId()}")
|
||||
}
|
||||
val taskId = TaskContext.getPartitionId().toString
|
||||
val cacheDirName = if (useExternalMemory) {
|
||||
val dir = Files.createTempDirectory(s"${TaskContext.get().stageId()}-cache-$taskId")
|
||||
Some(dir.toAbsolutePath.toString)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
rabitEnv.put("DMLC_TASK_ID", taskId)
|
||||
Rabit.init(rabitEnv)
|
||||
val watches = Watches(params,
|
||||
removeMissingValues(labeledPoints, missing),
|
||||
fromBaseMarginsToArray(baseMargins), cacheDirName)
|
||||
if (watches.train.rowNum == 0) {
|
||||
throw new XGBoostError(
|
||||
s"detected an empty partition in the training data, partition ID:" +
|
||||
s" ${TaskContext.getPartitionId()}")
|
||||
}
|
||||
val taskId = TaskContext.getPartitionId().toString
|
||||
rabitEnv.put("DMLC_TASK_ID", taskId)
|
||||
Rabit.init(rabitEnv)
|
||||
|
||||
try {
|
||||
val numEarlyStoppingRounds = params.get("num_early_stopping_rounds")
|
||||
.map(_.toString.toInt).getOrElse(0)
|
||||
val metrics = Array.tabulate(watches.size)(_ => Array.ofDim[Float](round))
|
||||
val booster = SXGBoost.train(watches.train, params, round,
|
||||
watches.toMap, metrics, obj, eval,
|
||||
earlyStoppingRound = numEarlyStoppingRounds, prevBooster)
|
||||
Iterator(booster -> watches.toMap.keys.zip(metrics).toMap)
|
||||
} finally {
|
||||
Rabit.shutdown()
|
||||
watches.delete()
|
||||
try {
|
||||
val numEarlyStoppingRounds = params.get("num_early_stopping_rounds")
|
||||
.map(_.toString.toInt).getOrElse(0)
|
||||
if (numEarlyStoppingRounds > 0) {
|
||||
if (!params.contains("maximize_evaluation_metrics")) {
|
||||
throw new IllegalArgumentException("maximize_evaluation_metrics has to be specified")
|
||||
}
|
||||
}
|
||||
}.cache()
|
||||
val metrics = Array.tabulate(watches.size)(_ => Array.ofDim[Float](round))
|
||||
val booster = SXGBoost.train(watches.train, params, round,
|
||||
watches.toMap, metrics, obj, eval,
|
||||
earlyStoppingRound = numEarlyStoppingRounds, prevBooster)
|
||||
Iterator(booster -> watches.toMap.keys.zip(metrics).toMap)
|
||||
} finally {
|
||||
Rabit.shutdown()
|
||||
watches.delete()
|
||||
}
|
||||
}
|
||||
|
||||
private def overrideParamsAccordingToTaskCPUs(
|
||||
@@ -174,6 +175,38 @@ object XGBoost extends Serializable {
|
||||
tracker
|
||||
}
|
||||
|
||||
/**
|
||||
* Check to see if Spark expects SSL encryption (`spark.ssl.enabled` set to true).
|
||||
* If so, throw an exception unless this safety measure has been explicitly overridden
|
||||
* via conf `xgboost.spark.ignoreSsl`.
|
||||
*
|
||||
* @param sc SparkContext for the training dataset. When looking for the confs, this method
|
||||
* first checks for an active SparkSession. If one is not available, it falls back
|
||||
* to this SparkContext.
|
||||
*/
|
||||
private def validateSparkSslConf(sc: SparkContext): Unit = {
|
||||
val (sparkSslEnabled: Boolean, xgboostSparkIgnoreSsl: Boolean) =
|
||||
SparkSession.getActiveSession match {
|
||||
case Some(ss) =>
|
||||
(ss.conf.getOption("spark.ssl.enabled").getOrElse("false").toBoolean,
|
||||
ss.conf.getOption("xgboost.spark.ignoreSsl").getOrElse("false").toBoolean)
|
||||
case None =>
|
||||
(sc.getConf.getBoolean("spark.ssl.enabled", false),
|
||||
sc.getConf.getBoolean("xgboost.spark.ignoreSsl", false))
|
||||
}
|
||||
if (sparkSslEnabled) {
|
||||
if (xgboostSparkIgnoreSsl) {
|
||||
logger.warn(s"spark-xgboost is being run without encrypting data in transit! " +
|
||||
s"Spark Conf spark.ssl.enabled=true was overridden with xgboost.spark.ignoreSsl=true.")
|
||||
} else {
|
||||
throw new Exception("xgboost-spark found spark.ssl.enabled=true to encrypt data " +
|
||||
"in transit, but xgboost-spark sends non-encrypted data over the wire for efficiency. " +
|
||||
"To override this protection and still use xgboost-spark at your own risk, " +
|
||||
"you can set the SparkSession conf to use xgboost.spark.ignoreSsl=true.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return A tuple of the booster and the metrics used to build training summary
|
||||
*/
|
||||
@@ -186,16 +219,18 @@ object XGBoost extends Serializable {
|
||||
obj: ObjectiveTrait = null,
|
||||
eval: EvalTrait = null,
|
||||
useExternalMemory: Boolean = false,
|
||||
missing: Float = Float.NaN): (Booster, Map[String, Array[Float]]) = {
|
||||
missing: Float = Float.NaN,
|
||||
hasGroup: Boolean = false): (Booster, Map[String, Array[Float]]) = {
|
||||
validateSparkSslConf(trainingData.context)
|
||||
if (params.contains("tree_method")) {
|
||||
require(params("tree_method") != "hist", "xgboost4j-spark does not support fast histogram" +
|
||||
" for now")
|
||||
}
|
||||
require(nWorkers > 0, "you must specify more than 0 workers")
|
||||
if (obj != null) {
|
||||
require(params.get("obj_type").isDefined, "parameter \"obj_type\" is not defined," +
|
||||
" you have to specify the objective type as classification or regression with a" +
|
||||
" customized objective function")
|
||||
require(params.get("objective_type").isDefined, "parameter \"objective_type\" is not" +
|
||||
" defined, you have to specify the objective type as classification or regression" +
|
||||
" with a customized objective function")
|
||||
}
|
||||
val trackerConf = params.get("tracker_conf") match {
|
||||
case None => TrackerConf()
|
||||
@@ -210,7 +245,6 @@ object XGBoost extends Serializable {
|
||||
" an instance of Long.")
|
||||
}
|
||||
val (checkpointPath, checkpointInterval) = CheckpointManager.extractParams(params)
|
||||
val partitionedData = repartitionForTraining(trainingData, nWorkers)
|
||||
|
||||
val sc = trainingData.sparkContext
|
||||
val checkpointManager = new CheckpointManager(sc, checkpointPath)
|
||||
@@ -224,9 +258,29 @@ object XGBoost extends Serializable {
|
||||
try {
|
||||
val overriddenParams = overrideParamsAccordingToTaskCPUs(params, sc)
|
||||
val parallelismTracker = new SparkParallelismTracker(sc, timeoutRequestWorkers, nWorkers)
|
||||
val boostersAndMetrics = buildDistributedBoosters(partitionedData, overriddenParams,
|
||||
tracker.getWorkerEnvs, checkpointRound, obj, eval, useExternalMemory, missing,
|
||||
prevBooster)
|
||||
val rabitEnv = tracker.getWorkerEnvs
|
||||
val boostersAndMetrics = hasGroup match {
|
||||
case true => {
|
||||
val partitionedData = repartitionForTrainingGroup(trainingData, nWorkers)
|
||||
partitionedData.mapPartitions(labeledPointGroups => {
|
||||
val watches = Watches.buildWatchesWithGroup(overriddenParams,
|
||||
removeMissingValuesWithGroup(labeledPointGroups, missing),
|
||||
getCacheDirName(useExternalMemory))
|
||||
buildDistributedBooster(watches, overriddenParams, rabitEnv, checkpointRound,
|
||||
obj, eval, prevBooster)
|
||||
}).cache()
|
||||
}
|
||||
case false => {
|
||||
val partitionedData = repartitionForTraining(trainingData, nWorkers)
|
||||
partitionedData.mapPartitions(labeledPoints => {
|
||||
val watches = Watches.buildWatches(overriddenParams,
|
||||
removeMissingValues(labeledPoints, missing),
|
||||
getCacheDirName(useExternalMemory))
|
||||
buildDistributedBooster(watches, overriddenParams, rabitEnv, checkpointRound,
|
||||
obj, eval, prevBooster)
|
||||
}).cache()
|
||||
}
|
||||
}
|
||||
val sparkJobThread = new Thread() {
|
||||
override def run() {
|
||||
// force the job
|
||||
@@ -244,13 +298,12 @@ object XGBoost extends Serializable {
|
||||
checkpointManager.updateCheckpoint(prevBooster)
|
||||
}
|
||||
(booster, metrics)
|
||||
} finally {
|
||||
tracker.stop()
|
||||
}
|
||||
} finally {
|
||||
tracker.stop()
|
||||
}
|
||||
}.last
|
||||
}
|
||||
|
||||
|
||||
private[spark] def repartitionForTraining(trainingData: RDD[XGBLabeledPoint], nWorkers: Int) = {
|
||||
if (trainingData.getNumPartitions != nWorkers) {
|
||||
logger.info(s"repartitioning training set to $nWorkers partitions")
|
||||
@@ -260,6 +313,31 @@ object XGBoost extends Serializable {
|
||||
}
|
||||
}
|
||||
|
||||
private[spark] def repartitionForTrainingGroup(
|
||||
trainingData: RDD[XGBLabeledPoint], nWorkers: Int): RDD[Array[XGBLabeledPoint]] = {
|
||||
val normalGroups: RDD[Array[XGBLabeledPoint]] = trainingData.mapPartitions(
|
||||
// LabeledPointGroupIterator returns (Boolean, Array[XGBLabeledPoint])
|
||||
new LabeledPointGroupIterator(_)).filter(!_.isEdgeGroup).map(_.points)
|
||||
|
||||
// edge groups with partition id.
|
||||
val edgeGroups: RDD[(Int, XGBLabeledPointGroup)] = trainingData.mapPartitions(
|
||||
new LabeledPointGroupIterator(_)).filter(_.isEdgeGroup).map(
|
||||
group => (TaskContext.getPartitionId(), group))
|
||||
|
||||
// group chunks from different partitions together by group id in XGBLabeledPoint.
|
||||
// use groupBy instead of aggregateBy since all groups within a partition have unique groud ids.
|
||||
val stitchedGroups: RDD[Array[XGBLabeledPoint]] = edgeGroups.groupBy(_._2.groupId).map(
|
||||
groups => {
|
||||
val it: Iterable[(Int, XGBLabeledPointGroup)] = groups._2
|
||||
// sorted by partition id and merge list of Array[XGBLabeledPoint] into one array
|
||||
it.toArray.sortBy(_._1).map(_._2.points).flatten
|
||||
})
|
||||
|
||||
var allGroups = normalGroups.union(stitchedGroups)
|
||||
logger.info(s"repartitioning training group set to $nWorkers partitions")
|
||||
allGroups.repartition(nWorkers)
|
||||
}
|
||||
|
||||
private def postTrackerReturnProcessing(
|
||||
trackerReturnVal: Int,
|
||||
distributedBoostersAndMetrics: RDD[(Booster, Map[String, Array[Float]])],
|
||||
@@ -268,6 +346,9 @@ object XGBoost extends Serializable {
|
||||
// Copies of the final booster and the corresponding metrics
|
||||
// reside in each partition of the `distributedBoostersAndMetrics`.
|
||||
// Any of them can be used to create the model.
|
||||
// it's safe to block here forever, as the tracker has returned successfully, and the Spark
|
||||
// job should have finished, there is no reason for the thread cannot return
|
||||
sparkJobThread.join()
|
||||
val (booster, metrics) = distributedBoostersAndMetrics.first()
|
||||
distributedBoostersAndMetrics.unpersist(false)
|
||||
(booster, metrics)
|
||||
@@ -287,9 +368,9 @@ object XGBoost extends Serializable {
|
||||
}
|
||||
|
||||
private class Watches private(
|
||||
val train: DMatrix,
|
||||
val test: DMatrix,
|
||||
private val cacheDirName: Option[String]) {
|
||||
val train: DMatrix,
|
||||
val test: DMatrix,
|
||||
private val cacheDirName: Option[String]) {
|
||||
|
||||
def toMap: Map[String, DMatrix] = Map("train" -> train, "test" -> test)
|
||||
.filter { case (_, matrix) => matrix.rowNum > 0 }
|
||||
@@ -308,59 +389,152 @@ private class Watches private(
|
||||
|
||||
private object Watches {
|
||||
|
||||
def buildGroups(groups: Seq[Int]): Seq[Int] = {
|
||||
val output = mutable.ArrayBuffer.empty[Int]
|
||||
var count = 1
|
||||
var lastGroup = groups.head
|
||||
for (group <- groups.tail) {
|
||||
if (group != lastGroup) {
|
||||
lastGroup = group
|
||||
output += count
|
||||
count = 1
|
||||
private def fromBaseMarginsToArray(baseMargins: Iterator[Float]): Option[Array[Float]] = {
|
||||
val builder = new mutable.ArrayBuilder.ofFloat()
|
||||
var nTotal = 0
|
||||
var nUndefined = 0
|
||||
while (baseMargins.hasNext) {
|
||||
nTotal += 1
|
||||
val baseMargin = baseMargins.next()
|
||||
if (baseMargin.isNaN) {
|
||||
nUndefined += 1 // don't waste space for all-NaNs.
|
||||
} else {
|
||||
count += 1
|
||||
builder += baseMargin
|
||||
}
|
||||
}
|
||||
output += count
|
||||
output
|
||||
if (nUndefined == nTotal) {
|
||||
None
|
||||
} else if (nUndefined == 0) {
|
||||
Some(builder.result())
|
||||
} else {
|
||||
throw new IllegalArgumentException(
|
||||
s"Encountered a partition with $nUndefined NaN base margin values. " +
|
||||
s"If you want to specify base margin, ensure all values are non-NaN.")
|
||||
}
|
||||
}
|
||||
|
||||
def apply(
|
||||
def buildWatches(
|
||||
params: Map[String, Any],
|
||||
labeledPoints: Iterator[XGBLabeledPoint],
|
||||
baseMarginsOpt: Option[Array[Float]],
|
||||
cacheDirName: Option[String]): Watches = {
|
||||
val trainTestRatio = params.get("train_test_ratio").map(_.toString.toDouble).getOrElse(1.0)
|
||||
val seed = params.get("seed").map(_.toString.toLong).getOrElse(System.nanoTime())
|
||||
val r = new Random(seed)
|
||||
val testPoints = mutable.ArrayBuffer.empty[XGBLabeledPoint]
|
||||
val trainBaseMargins = new mutable.ArrayBuilder.ofFloat
|
||||
val testBaseMargins = new mutable.ArrayBuilder.ofFloat
|
||||
val trainPoints = labeledPoints.filter { labeledPoint =>
|
||||
val accepted = r.nextDouble() <= trainTestRatio
|
||||
if (!accepted) {
|
||||
testPoints += labeledPoint
|
||||
testBaseMargins += labeledPoint.baseMargin
|
||||
} else {
|
||||
trainBaseMargins += labeledPoint.baseMargin
|
||||
}
|
||||
accepted
|
||||
}
|
||||
val trainMatrix = new DMatrix(trainPoints, cacheDirName.map(_ + "/train").orNull)
|
||||
val testMatrix = new DMatrix(testPoints.iterator, cacheDirName.map(_ + "/test").orNull)
|
||||
|
||||
val trainMargin = fromBaseMarginsToArray(trainBaseMargins.result().iterator)
|
||||
val testMargin = fromBaseMarginsToArray(testBaseMargins.result().iterator)
|
||||
if (trainMargin.isDefined) trainMatrix.setBaseMargin(trainMargin.get)
|
||||
if (testMargin.isDefined) testMatrix.setBaseMargin(testMargin.get)
|
||||
|
||||
new Watches(trainMatrix, testMatrix, cacheDirName)
|
||||
}
|
||||
|
||||
def buildWatchesWithGroup(
|
||||
params: Map[String, Any],
|
||||
labeledPointGroups: Iterator[Array[XGBLabeledPoint]],
|
||||
cacheDirName: Option[String]): Watches = {
|
||||
val trainTestRatio = params.get("train_test_ratio").map(_.toString.toDouble).getOrElse(1.0)
|
||||
val seed = params.get("seed").map(_.toString.toLong).getOrElse(System.nanoTime())
|
||||
val r = new Random(seed)
|
||||
val testPoints = mutable.ArrayBuilder.make[XGBLabeledPoint]
|
||||
val trainBaseMargins = new mutable.ArrayBuilder.ofFloat
|
||||
val testBaseMargins = new mutable.ArrayBuilder.ofFloat
|
||||
val trainGroups = new mutable.ArrayBuilder.ofInt
|
||||
val testGroups = new mutable.ArrayBuilder.ofInt
|
||||
|
||||
val trainLabelPointGroups = labeledPointGroups.filter { labeledPointGroup =>
|
||||
val accepted = r.nextDouble() <= trainTestRatio
|
||||
if (!accepted) {
|
||||
labeledPointGroup.foreach(labeledPoint => {
|
||||
testPoints += labeledPoint
|
||||
testBaseMargins += labeledPoint.baseMargin
|
||||
})
|
||||
testGroups += labeledPointGroup.length
|
||||
} else {
|
||||
labeledPointGroup.foreach(trainBaseMargins += _.baseMargin)
|
||||
trainGroups += labeledPointGroup.length
|
||||
}
|
||||
accepted
|
||||
}
|
||||
|
||||
val (trainIter1, trainIter2) = trainPoints.duplicate
|
||||
val trainMatrix = new DMatrix(trainIter1, cacheDirName.map(_ + "/train").orNull)
|
||||
val trainGroups = buildGroups(trainIter2.map(_.group).toSeq).toArray
|
||||
trainMatrix.setGroup(trainGroups)
|
||||
val trainPoints = trainLabelPointGroups.flatMap(_.iterator)
|
||||
val trainMatrix = new DMatrix(trainPoints, cacheDirName.map(_ + "/train").orNull)
|
||||
trainMatrix.setGroup(trainGroups.result())
|
||||
|
||||
val testMatrix = new DMatrix(testPoints.iterator, cacheDirName.map(_ + "/test").orNull)
|
||||
val testMatrix = new DMatrix(testPoints.result().iterator, cacheDirName.map(_ + "/test").orNull)
|
||||
if (trainTestRatio < 1.0) {
|
||||
val testGroups = buildGroups(testPoints.map(_.group)).toArray
|
||||
testMatrix.setGroup(testGroups)
|
||||
testMatrix.setGroup(testGroups.result())
|
||||
}
|
||||
|
||||
r.setSeed(seed)
|
||||
for (baseMargins <- baseMarginsOpt) {
|
||||
val (trainMargin, testMargin) = baseMargins.partition(_ => r.nextDouble() <= trainTestRatio)
|
||||
trainMatrix.setBaseMargin(trainMargin)
|
||||
testMatrix.setBaseMargin(testMargin)
|
||||
}
|
||||
val trainMargin = fromBaseMarginsToArray(trainBaseMargins.result().iterator)
|
||||
val testMargin = fromBaseMarginsToArray(testBaseMargins.result().iterator)
|
||||
if (trainMargin.isDefined) trainMatrix.setBaseMargin(trainMargin.get)
|
||||
if (testMargin.isDefined) testMatrix.setBaseMargin(testMargin.get)
|
||||
|
||||
new Watches(trainMatrix, testMatrix, cacheDirName)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Within each RDD partition, group the <code>XGBLabeledPoint</code> by group id.</p>
|
||||
* And the first and the last groups may not have all the items due to the data partition.
|
||||
* <code>LabeledPointGroupIterator</code> orginaizes data in a tuple format:
|
||||
* (isFistGroup || isLastGroup, Array[XGBLabeledPoint]).</p>
|
||||
* The edge groups across partitions can be stitched together later.
|
||||
* @param base collection of <code>XGBLabeledPoint</code>
|
||||
*/
|
||||
private[spark] class LabeledPointGroupIterator(base: Iterator[XGBLabeledPoint])
|
||||
extends AbstractIterator[XGBLabeledPointGroup] {
|
||||
|
||||
private var firstPointOfNextGroup: XGBLabeledPoint = null
|
||||
private var isNewGroup = false
|
||||
|
||||
override def hasNext: Boolean = {
|
||||
return base.hasNext || isNewGroup
|
||||
}
|
||||
|
||||
override def next(): XGBLabeledPointGroup = {
|
||||
val builder = mutable.ArrayBuilder.make[XGBLabeledPoint]
|
||||
var isFirstGroup = true
|
||||
if (firstPointOfNextGroup != null) {
|
||||
builder += firstPointOfNextGroup
|
||||
isFirstGroup = false
|
||||
}
|
||||
|
||||
isNewGroup = false
|
||||
while (!isNewGroup && base.hasNext) {
|
||||
val point = base.next()
|
||||
val groupId = if (firstPointOfNextGroup != null) firstPointOfNextGroup.group else point.group
|
||||
firstPointOfNextGroup = point
|
||||
if (point.group == groupId) {
|
||||
// add to current group
|
||||
builder += point
|
||||
} else {
|
||||
// start a new group
|
||||
isNewGroup = true
|
||||
}
|
||||
}
|
||||
|
||||
val isLastGroup = !isNewGroup
|
||||
val result = builder.result()
|
||||
val group = XGBLabeledPointGroup(result(0).group, result, isFirstGroup || isLastGroup)
|
||||
|
||||
group
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -130,6 +130,8 @@ class XGBoostClassifier (
|
||||
// setters for learning params
|
||||
def setObjective(value: String): this.type = set(objective, value)
|
||||
|
||||
def setObjectiveType(value: String): this.type = set(objectiveType, value)
|
||||
|
||||
def setBaseScore(value: Double): this.type = set(baseScore, value)
|
||||
|
||||
def setEvalMetric(value: String): this.type = set(evalMetric, value)
|
||||
@@ -138,6 +140,9 @@ class XGBoostClassifier (
|
||||
|
||||
def setNumEarlyStoppingRounds(value: Int): this.type = set(numEarlyStoppingRounds, value)
|
||||
|
||||
def setMaximizeEvaluationMetrics(value: Boolean): this.type =
|
||||
set(maximizeEvaluationMetrics, value)
|
||||
|
||||
def setCustomObj(value: ObjectiveTrait): this.type = set(customObj, value)
|
||||
|
||||
def setCustomEval(value: EvalTrait): this.type = set(customEval, value)
|
||||
@@ -160,6 +165,10 @@ class XGBoostClassifier (
|
||||
set(evalMetric, setupDefaultEvalMetric())
|
||||
}
|
||||
|
||||
if (isDefined(customObj) && $(customObj) != null) {
|
||||
set(objectiveType, "classification")
|
||||
}
|
||||
|
||||
val _numClasses = getNumClasses(dataset)
|
||||
if (isDefined(numClass) && $(numClass) != _numClasses) {
|
||||
throw new Exception("The number of classes in dataset doesn't match " +
|
||||
@@ -190,7 +199,7 @@ class XGBoostClassifier (
|
||||
// All non-null param maps in XGBoostClassifier are in derivedXGBParamMap.
|
||||
val (_booster, _metrics) = XGBoost.trainDistributed(instances, derivedXGBParamMap,
|
||||
$(numRound), $(numWorkers), $(customObj), $(customEval), $(useExternalMemory),
|
||||
$(missing))
|
||||
$(missing), hasGroup = false)
|
||||
val model = new XGBoostClassificationModel(uid, _numClasses, _booster)
|
||||
val summary = XGBoostTrainingSummary(_metrics)
|
||||
model.setSummary(summary)
|
||||
@@ -251,11 +260,11 @@ class XGBoostClassificationModel private[ml](
|
||||
override def predict(features: Vector): Double = {
|
||||
import DataUtils._
|
||||
val dm = new DMatrix(XGBoost.removeMissingValues(Iterator(features.asXGB), $(missing)))
|
||||
val probability = _booster.predict(data = dm)(0)
|
||||
val probability = _booster.predict(data = dm)(0).map(_.toDouble)
|
||||
if (numClasses == 2) {
|
||||
math.round(probability(0))
|
||||
} else {
|
||||
Vectors.dense(probability.map(_.toDouble)).argmax
|
||||
probability2prediction(Vectors.dense(probability))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -411,20 +420,15 @@ class XGBoostClassificationModel private[ml](
|
||||
}
|
||||
|
||||
val probabilityUDF = udf { probability: mutable.WrappedArray[Float] =>
|
||||
if (numClasses == 2) {
|
||||
Vectors.dense(Array(1 - probability(0), probability(0)).map(_.toDouble))
|
||||
} else {
|
||||
Vectors.dense(probability.map(_.toDouble).toArray)
|
||||
}
|
||||
val prob = probability.map(_.toDouble).toArray
|
||||
val probabilities = if (numClasses == 2) Array(1.0 - prob(0), prob(0)) else prob
|
||||
Vectors.dense(probabilities)
|
||||
}
|
||||
|
||||
val predictUDF = udf { probability: mutable.WrappedArray[Float] =>
|
||||
// From XGBoost probability to MLlib prediction
|
||||
val probabilities = if (numClasses == 2) {
|
||||
Array(1 - probability(0), probability(0)).map(_.toDouble)
|
||||
} else {
|
||||
probability.map(_.toDouble).toArray
|
||||
}
|
||||
val prob = probability.map(_.toDouble).toArray
|
||||
val probabilities = if (numClasses == 2) Array(1.0 - prob(0), prob(0)) else prob
|
||||
probability2prediction(Vectors.dense(probabilities))
|
||||
}
|
||||
|
||||
@@ -516,3 +520,4 @@ object XGBoostClassificationModel extends MLReadable[XGBoostClassificationModel]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -130,6 +130,8 @@ class XGBoostRegressor (
|
||||
// setters for learning params
|
||||
def setObjective(value: String): this.type = set(objective, value)
|
||||
|
||||
def setObjectiveType(value: String): this.type = set(objectiveType, value)
|
||||
|
||||
def setBaseScore(value: Double): this.type = set(baseScore, value)
|
||||
|
||||
def setEvalMetric(value: String): this.type = set(evalMetric, value)
|
||||
@@ -138,6 +140,9 @@ class XGBoostRegressor (
|
||||
|
||||
def setNumEarlyStoppingRounds(value: Int): this.type = set(numEarlyStoppingRounds, value)
|
||||
|
||||
def setMaximizeEvaluationMetrics(value: Boolean): this.type =
|
||||
set(maximizeEvaluationMetrics, value)
|
||||
|
||||
def setCustomObj(value: ObjectiveTrait): this.type = set(customObj, value)
|
||||
|
||||
def setCustomEval(value: EvalTrait): this.type = set(customEval, value)
|
||||
@@ -158,6 +163,10 @@ class XGBoostRegressor (
|
||||
set(evalMetric, setupDefaultEvalMetric())
|
||||
}
|
||||
|
||||
if (isDefined(customObj) && $(customObj) != null) {
|
||||
set(objectiveType, "regression")
|
||||
}
|
||||
|
||||
val weight = if (!isDefined(weightCol) || $(weightCol).isEmpty) lit(1.0) else col($(weightCol))
|
||||
val baseMargin = if (!isDefined(baseMarginCol) || $(baseMarginCol).isEmpty) {
|
||||
lit(Float.NaN)
|
||||
@@ -185,7 +194,7 @@ class XGBoostRegressor (
|
||||
// All non-null param maps in XGBoostRegressor are in derivedXGBParamMap.
|
||||
val (_booster, _metrics) = XGBoost.trainDistributed(instances, derivedXGBParamMap,
|
||||
$(numRound), $(numWorkers), $(customObj), $(customEval), $(useExternalMemory),
|
||||
$(missing))
|
||||
$(missing), hasGroup = group != lit(-1))
|
||||
val model = new XGBoostRegressionModel(uid, _booster)
|
||||
val summary = XGBoostTrainingSummary(_metrics)
|
||||
model.setSummary(summary)
|
||||
|
||||
@@ -240,7 +240,7 @@ private[spark] trait BoosterParams extends Params {
|
||||
final val treeLimit = new IntParam(this, name = "treeLimit",
|
||||
doc = "number of trees used in the prediction; defaults to 0 (use all trees).")
|
||||
|
||||
final def getTreeLimit: Double = $(treeLimit)
|
||||
final def getTreeLimit: Int = $(treeLimit)
|
||||
|
||||
setDefault(eta -> 0.3, gamma -> 0, maxDepth -> 6,
|
||||
minChildWeight -> 1, maxDeltaStep -> 0,
|
||||
|
||||
@@ -100,7 +100,6 @@ class TrackerConfParam(
|
||||
override def jsonDecode(json: String): TrackerConf = {
|
||||
implicit val formats = DefaultFormats
|
||||
val parsedValue = parse(json)
|
||||
println(parsedValue.children)
|
||||
parsedValue.extract[TrackerConf]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -224,10 +224,10 @@ private[spark] trait ParamMapFuncs extends Params {
|
||||
def XGBoostToMLlibParams(xgboostParams: Map[String, Any]): Unit = {
|
||||
for ((paramName, paramValue) <- xgboostParams) {
|
||||
if ((paramName == "booster" && paramValue != "gbtree") ||
|
||||
(paramName == "updater" && paramValue != "grow_colmaker,prune")) {
|
||||
(paramName == "updater" && paramValue != "grow_histmaker,prune")) {
|
||||
throw new IllegalArgumentException(s"you specified $paramName as $paramValue," +
|
||||
s" XGBoost-Spark only supports gbtree as booster type" +
|
||||
" and grow_colmaker,prune as the updater type")
|
||||
" and grow_histmaker,prune as the updater type")
|
||||
}
|
||||
val name = CaseFormat.LOWER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, paramName)
|
||||
params.find(_.name == name) match {
|
||||
|
||||
@@ -33,6 +33,18 @@ private[spark] trait LearningTaskParams extends Params {
|
||||
|
||||
final def getObjective: String = $(objective)
|
||||
|
||||
/**
|
||||
* The learning objective type of the specified custom objective and eval.
|
||||
* Corresponding type will be assigned if custom objective is defined
|
||||
* options: regression, classification. default: null
|
||||
*/
|
||||
final val objectiveType = new Param[String](this, "objectiveType", "objective type used for " +
|
||||
s"training, options: {${LearningTaskParams.supportedObjectiveType.mkString(",")}",
|
||||
(value: String) => LearningTaskParams.supportedObjectiveType.contains(value))
|
||||
|
||||
final def getObjectiveType: String = $(objectiveType)
|
||||
|
||||
|
||||
/**
|
||||
* the initial prediction score of all instances, global bias. default=0.5
|
||||
*/
|
||||
@@ -75,6 +87,13 @@ private[spark] trait LearningTaskParams extends Params {
|
||||
|
||||
final def getNumEarlyStoppingRounds: Int = $(numEarlyStoppingRounds)
|
||||
|
||||
|
||||
final val maximizeEvaluationMetrics = new BooleanParam(this, "maximizeEvaluationMetrics",
|
||||
"define the expected optimization to the evaluation metrics, true to maximize otherwise" +
|
||||
" minimize it")
|
||||
|
||||
final def getMaximizeEvaluationMetrics: Boolean = $(maximizeEvaluationMetrics)
|
||||
|
||||
setDefault(objective -> "reg:linear", baseScore -> 0.5,
|
||||
trainTestRatio -> 1.0, numEarlyStoppingRounds -> 0)
|
||||
}
|
||||
@@ -82,7 +101,9 @@ private[spark] trait LearningTaskParams extends Params {
|
||||
private[spark] object LearningTaskParams {
|
||||
val supportedObjective = HashSet("reg:linear", "reg:logistic", "binary:logistic",
|
||||
"binary:logitraw", "count:poisson", "multi:softmax", "multi:softprob", "rank:pairwise",
|
||||
"reg:gamma", "reg:tweedie")
|
||||
"rank:ndcg", "rank:map", "reg:gamma", "reg:tweedie")
|
||||
|
||||
val supportedObjectiveType = HashSet("regression", "classification")
|
||||
|
||||
val supportedEvalMetrics = HashSet("rmse", "mae", "logloss", "error", "merror", "mlogloss",
|
||||
"auc", "aucpr", "ndcg", "map", "gamma-deviance")
|
||||
|
||||
@@ -19,13 +19,14 @@ package org.apache.spark
|
||||
import java.net.URL
|
||||
|
||||
import org.apache.commons.logging.LogFactory
|
||||
|
||||
import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd}
|
||||
import org.codehaus.jackson.map.ObjectMapper
|
||||
|
||||
import scala.collection.JavaConverters._
|
||||
import scala.concurrent.ExecutionContext.Implicits.global
|
||||
import scala.concurrent.duration._
|
||||
import scala.concurrent.{Await, Future, TimeoutException}
|
||||
import scala.util.control.ControlThrowable
|
||||
|
||||
/**
|
||||
* A tracker that ensures enough number of executor cores are alive.
|
||||
@@ -111,11 +112,15 @@ class SparkParallelismTracker(
|
||||
}
|
||||
}
|
||||
|
||||
private class ErrorInXGBoostTraining(msg: String) extends ControlThrowable {
|
||||
override def toString: String = s"ErrorInXGBoostTraining: $msg"
|
||||
}
|
||||
|
||||
private[spark] class TaskFailedListener extends SparkListener {
|
||||
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = {
|
||||
taskEnd.reason match {
|
||||
case reason: TaskFailedReason =>
|
||||
throw new InterruptedException(s"ExecutorLost during XGBoost Training: " +
|
||||
throw new ErrorInXGBoostTraining(s"ExecutorLost during XGBoost Training: " +
|
||||
s"${reason.toErrorString}")
|
||||
case _ =>
|
||||
}
|
||||
|
||||
@@ -133,8 +133,7 @@ class PersistenceSuite extends FunSuite with PerTest with BeforeAndAfterAll {
|
||||
.setOutputCol("features")
|
||||
|
||||
val paramMap = Map("eta" -> "0.1", "max_depth" -> "6", "silent" -> "1",
|
||||
"objective" -> "binary:logistic", "num_round" -> "10", "num_workers" -> numWorkers,
|
||||
"tracker_conf" -> TrackerConf(60 * 60 * 1000, "scala"))
|
||||
"objective" -> "binary:logistic", "num_round" -> "10", "num_workers" -> numWorkers)
|
||||
val xgb = new XGBoostClassifier(paramMap)
|
||||
|
||||
// Construct MLlib pipeline, save and load
|
||||
|
||||
@@ -140,15 +140,18 @@ class XGBoostClassifierSuite extends FunSuite with PerTest {
|
||||
}
|
||||
|
||||
test("XGBoost and Spark parameters synchronize correctly") {
|
||||
val xgbParamMap = Map("eta" -> "1", "objective" -> "binary:logistic")
|
||||
val xgbParamMap = Map("eta" -> "1", "objective" -> "binary:logistic",
|
||||
"objective_type" -> "classification")
|
||||
// from xgboost params to spark params
|
||||
val xgb = new XGBoostClassifier(xgbParamMap)
|
||||
assert(xgb.getEta === 1.0)
|
||||
assert(xgb.getObjective === "binary:logistic")
|
||||
assert(xgb.getObjectiveType === "classification")
|
||||
// from spark to xgboost params
|
||||
val xgbCopy = xgb.copy(ParamMap.empty)
|
||||
assert(xgbCopy.MLlib2XGBoostParams("eta").toString.toDouble === 1.0)
|
||||
assert(xgbCopy.MLlib2XGBoostParams("objective").toString === "binary:logistic")
|
||||
assert(xgbCopy.MLlib2XGBoostParams("objective_type").toString === "classification")
|
||||
val xgbCopy2 = xgb.copy(ParamMap.empty.put(xgb.evalMetric, "logloss"))
|
||||
assert(xgbCopy2.MLlib2XGBoostParams("eval_metric").toString === "logloss")
|
||||
}
|
||||
@@ -170,7 +173,7 @@ class XGBoostClassifierSuite extends FunSuite with PerTest {
|
||||
val training2 = training1.withColumn("margin", functions.rand())
|
||||
val test = buildDataFrame(Classification.test)
|
||||
val paramMap = Map("eta" -> "1", "max_depth" -> "6", "silent" -> "1",
|
||||
"objective" -> "binary:logistic", "test_train_split" -> "0.5",
|
||||
"objective" -> "binary:logistic", "train_test_ratio" -> "1.0",
|
||||
"num_round" -> 5, "num_workers" -> numWorkers)
|
||||
|
||||
val xgb = new XGBoostClassifier(paramMap)
|
||||
|
||||
@@ -47,4 +47,34 @@ class XGBoostConfigureSuite extends FunSuite with PerTest {
|
||||
val eval = new EvalError()
|
||||
assert(eval.eval(model._booster.predict(testDM, outPutMargin = true), testDM) < 0.1)
|
||||
}
|
||||
|
||||
test("Check for Spark encryption over-the-wire") {
|
||||
val originalSslConfOpt = ss.conf.getOption("spark.ssl.enabled")
|
||||
ss.conf.set("spark.ssl.enabled", true)
|
||||
|
||||
val paramMap = Map("eta" -> "1", "max_depth" -> "2", "silent" -> "1",
|
||||
"objective" -> "binary:logistic", "num_round" -> 2, "num_workers" -> numWorkers)
|
||||
val training = buildDataFrame(Classification.train)
|
||||
|
||||
withClue("xgboost-spark should throw an exception when spark.ssl.enabled = true but " +
|
||||
"xgboost.spark.ignoreSsl != true") {
|
||||
val thrown = intercept[Exception] {
|
||||
new XGBoostClassifier(paramMap).fit(training)
|
||||
}
|
||||
assert(thrown.getMessage.contains("xgboost.spark.ignoreSsl") &&
|
||||
thrown.getMessage.contains("spark.ssl.enabled"))
|
||||
}
|
||||
|
||||
// Confirm that this check can be overridden.
|
||||
ss.conf.set("xgboost.spark.ignoreSsl", true)
|
||||
new XGBoostClassifier(paramMap).fit(training)
|
||||
|
||||
originalSslConfOpt match {
|
||||
case None =>
|
||||
ss.conf.unset("spark.ssl.enabled")
|
||||
case Some(originalSslConf) =>
|
||||
ss.conf.set("spark.ssl.enabled", originalSslConf)
|
||||
}
|
||||
ss.conf.unset("xgboost.spark.ignoreSsl")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,10 +19,12 @@ package ml.dmlc.xgboost4j.scala.spark
|
||||
import java.nio.file.Files
|
||||
import java.util.concurrent.LinkedBlockingDeque
|
||||
import ml.dmlc.xgboost4j.java.Rabit
|
||||
import ml.dmlc.xgboost4j.{LabeledPoint => XGBLabeledPoint}
|
||||
import ml.dmlc.xgboost4j.scala.DMatrix
|
||||
import ml.dmlc.xgboost4j.scala.rabit.RabitTracker
|
||||
import ml.dmlc.xgboost4j.scala.{XGBoost => SXGBoost, _}
|
||||
import org.apache.hadoop.fs.{FileSystem, Path}
|
||||
import org.apache.spark.TaskContext
|
||||
import org.apache.spark.ml.linalg.Vectors
|
||||
import org.apache.spark.sql._
|
||||
import org.scalatest.FunSuite
|
||||
@@ -71,18 +73,16 @@ class XGBoostGeneralSuite extends FunSuite with PerTest {
|
||||
assert(collectedAllReduceResults.poll().sameElements(maxVec))
|
||||
}
|
||||
|
||||
test("build RDD containing boosters with the specified worker number") {
|
||||
test("distributed training with the specified worker number") {
|
||||
val trainingRDD = sc.parallelize(Classification.train)
|
||||
val partitionedRDD = XGBoost.repartitionForTraining(trainingRDD, 2)
|
||||
val boosterRDD = XGBoost.buildDistributedBoosters(
|
||||
partitionedRDD,
|
||||
val (booster, metrics) = XGBoost.trainDistributed(
|
||||
trainingRDD,
|
||||
List("eta" -> "1", "max_depth" -> "6", "silent" -> "1",
|
||||
"objective" -> "binary:logistic").toMap,
|
||||
new java.util.HashMap[String, String](),
|
||||
round = 5, eval = null, obj = null, useExternalMemory = true,
|
||||
missing = Float.NaN, prevBooster = null)
|
||||
val boosterCount = boosterRDD.count()
|
||||
assert(boosterCount === 2)
|
||||
round = 5, nWorkers = numWorkers, eval = null, obj = null, useExternalMemory = false,
|
||||
hasGroup = false, missing = Float.NaN)
|
||||
|
||||
assert(booster != null)
|
||||
}
|
||||
|
||||
test("training with external memory cache") {
|
||||
@@ -235,4 +235,45 @@ class XGBoostGeneralSuite extends FunSuite with PerTest {
|
||||
assert(error(prevModel._booster) > error(nextModel._booster))
|
||||
assert(error(nextModel._booster) < 0.1)
|
||||
}
|
||||
|
||||
test("repartitionForTrainingGroup with group data") {
|
||||
// test different splits to cover the corner cases.
|
||||
for (split <- 1 to 20) {
|
||||
val trainingRDD = sc.parallelize(Ranking.train, split)
|
||||
val traingGroupsRDD = XGBoost.repartitionForTrainingGroup(trainingRDD, 4)
|
||||
val trainingGroups: Array[Array[XGBLabeledPoint]] = traingGroupsRDD.collect()
|
||||
// check the the order of the groups with group id.
|
||||
// Ranking.train has 20 groups
|
||||
assert(trainingGroups.length == 20)
|
||||
|
||||
// compare all points
|
||||
val allPoints = trainingGroups.sortBy(_(0).group).flatten
|
||||
assert(allPoints.length == Ranking.train.size)
|
||||
for (i <- 0 to Ranking.train.size - 1) {
|
||||
assert(allPoints(i).group == Ranking.train(i).group)
|
||||
assert(allPoints(i).label == Ranking.train(i).label)
|
||||
assert(allPoints(i).values.sameElements(Ranking.train(i).values))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test("repartitionForTrainingGroup with group data which has empty partition") {
|
||||
val trainingRDD = sc.parallelize(Ranking.train, 5).mapPartitions(it => {
|
||||
// make one partition empty for testing
|
||||
it.filter(_ => TaskContext.getPartitionId() != 3)
|
||||
})
|
||||
XGBoost.repartitionForTrainingGroup(trainingRDD, 4)
|
||||
}
|
||||
|
||||
test("distributed training with group data") {
|
||||
val trainingRDD = sc.parallelize(Ranking.train, 5)
|
||||
val (booster, metrics) = XGBoost.trainDistributed(
|
||||
trainingRDD,
|
||||
List("eta" -> "1", "max_depth" -> "6", "silent" -> "1",
|
||||
"objective" -> "binary:logistic").toMap,
|
||||
round = 5, nWorkers = numWorkers, eval = null, obj = null, useExternalMemory = false,
|
||||
hasGroup = true, missing = Float.NaN)
|
||||
|
||||
assert(booster != null)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,10 +6,10 @@
|
||||
<parent>
|
||||
<groupId>ml.dmlc</groupId>
|
||||
<artifactId>xgboost-jvm</artifactId>
|
||||
<version>0.80</version>
|
||||
<version>0.81</version>
|
||||
</parent>
|
||||
<artifactId>xgboost4j</artifactId>
|
||||
<version>0.80</version>
|
||||
<version>0.81</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<dependencies>
|
||||
|
||||
@@ -369,15 +369,68 @@ public class Booster implements Serializable, KryoSerializable {
|
||||
return modelInfos[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the dump of the model as a string array with specified feature names.
|
||||
*
|
||||
* @param featureNames Names of the features.
|
||||
* @return dumped model information
|
||||
* @throws XGBoostError
|
||||
*/
|
||||
public String[] getModelDump(String[] featureNames, boolean withStats) throws XGBoostError {
|
||||
return getModelDump(featureNames, withStats, "text");
|
||||
}
|
||||
|
||||
public String[] getModelDump(String[] featureNames, boolean withStats, String format)
|
||||
throws XGBoostError {
|
||||
int statsFlag = 0;
|
||||
if (withStats) {
|
||||
statsFlag = 1;
|
||||
}
|
||||
if (format == null) {
|
||||
format = "text";
|
||||
}
|
||||
String[][] modelInfos = new String[1][];
|
||||
XGBoostJNI.checkCall(XGBoostJNI.XGBoosterDumpModelExWithFeatures(
|
||||
handle, featureNames, statsFlag, format, modelInfos));
|
||||
return modelInfos[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get importance of each feature with specified feature names.
|
||||
*
|
||||
* @return featureScoreMap key: feature name, value: feature importance score, can be nill.
|
||||
* @throws XGBoostError native error
|
||||
*/
|
||||
public Map<String, Integer> getFeatureScore(String[] featureNames) throws XGBoostError {
|
||||
String[] modelInfos = getModelDump(featureNames, false);
|
||||
Map<String, Integer> featureScore = new HashMap<>();
|
||||
for (String tree : modelInfos) {
|
||||
for (String node : tree.split("\n")) {
|
||||
String[] array = node.split("\\[");
|
||||
if (array.length == 1) {
|
||||
continue;
|
||||
}
|
||||
String fid = array[1].split("\\]")[0];
|
||||
fid = fid.split("<")[0];
|
||||
if (featureScore.containsKey(fid)) {
|
||||
featureScore.put(fid, 1 + featureScore.get(fid));
|
||||
} else {
|
||||
featureScore.put(fid, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
return featureScore;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get importance of each feature
|
||||
*
|
||||
* @return featureMap key: feature index, value: feature importance score, can be nill
|
||||
* @return featureScoreMap key: feature index, value: feature importance score, can be nill
|
||||
* @throws XGBoostError native error
|
||||
*/
|
||||
public Map<String, Integer> getFeatureScore(String featureMap) throws XGBoostError {
|
||||
String[] modelInfos = getModelDump(featureMap, false);
|
||||
Map<String, Integer> featureScore = new HashMap<String, Integer>();
|
||||
Map<String, Integer> featureScore = new HashMap<>();
|
||||
for (String tree : modelInfos) {
|
||||
for (String node : tree.split("\n")) {
|
||||
String[] array = node.split("\\[");
|
||||
|
||||
@@ -118,9 +118,9 @@ public class XGBoost {
|
||||
* performance on the validation set.
|
||||
* @param metrics array containing the evaluation metrics for each matrix in watches for each
|
||||
* iteration
|
||||
* @param earlyStoppingRound if non-zero, training would be stopped
|
||||
* @param earlyStoppingRounds if non-zero, training would be stopped
|
||||
* after a specified number of consecutive
|
||||
* increases in any evaluation metric.
|
||||
* goes to the unexpected direction in any evaluation metric.
|
||||
* @param obj customized objective
|
||||
* @param eval customized evaluation
|
||||
* @param booster train from scratch if set to null; train from an existing booster if not null.
|
||||
@@ -134,7 +134,7 @@ public class XGBoost {
|
||||
float[][] metrics,
|
||||
IObjective obj,
|
||||
IEvaluation eval,
|
||||
int earlyStoppingRound,
|
||||
int earlyStoppingRounds,
|
||||
Booster booster) throws XGBoostError {
|
||||
|
||||
//collect eval matrixs
|
||||
@@ -196,17 +196,14 @@ public class XGBoost {
|
||||
for (int i = 0; i < metricsOut.length; i++) {
|
||||
metrics[i][iter] = metricsOut[i];
|
||||
}
|
||||
|
||||
boolean decreasing = true;
|
||||
float[] criterion = metrics[metrics.length - 1];
|
||||
for (int shift = 0; shift < Math.min(iter, earlyStoppingRound) - 1; shift++) {
|
||||
decreasing &= criterion[iter - shift] <= criterion[iter - shift - 1];
|
||||
}
|
||||
|
||||
if (!decreasing) {
|
||||
Rabit.trackerPrint(String.format(
|
||||
"early stopping after %d decreasing rounds", earlyStoppingRound));
|
||||
break;
|
||||
if (earlyStoppingRounds > 0) {
|
||||
boolean onTrack = judgeIfTrainingOnTrack(params, earlyStoppingRounds, metrics, iter);
|
||||
if (!onTrack) {
|
||||
String reversedDirection = getReversedDirection(params);
|
||||
Rabit.trackerPrint(String.format(
|
||||
"early stopping after %d %s rounds", earlyStoppingRounds, reversedDirection));
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (Rabit.getRank() == 0) {
|
||||
Rabit.trackerPrint(evalInfo + '\n');
|
||||
@@ -217,6 +214,41 @@ public class XGBoost {
|
||||
return booster;
|
||||
}
|
||||
|
||||
static boolean judgeIfTrainingOnTrack(
|
||||
Map<String, Object> params, int earlyStoppingRounds, float[][] metrics, int iter) {
|
||||
boolean maximizeEvaluationMetrics = getMetricsExpectedDirection(params);
|
||||
boolean onTrack = false;
|
||||
float[] criterion = metrics[metrics.length - 1];
|
||||
for (int shift = 0; shift < Math.min(iter, earlyStoppingRounds) - 1; shift++) {
|
||||
onTrack |= maximizeEvaluationMetrics ?
|
||||
criterion[iter - shift] >= criterion[iter - shift - 1] :
|
||||
criterion[iter - shift] <= criterion[iter - shift - 1];
|
||||
}
|
||||
return onTrack;
|
||||
}
|
||||
|
||||
private static String getReversedDirection(Map<String, Object> params) {
|
||||
String reversedDirection = null;
|
||||
if (Boolean.valueOf(String.valueOf(params.get("maximize_evaluation_metrics")))) {
|
||||
reversedDirection = "descending";
|
||||
} else if (!Boolean.valueOf(String.valueOf(params.get("maximize_evaluation_metrics")))) {
|
||||
reversedDirection = "ascending";
|
||||
}
|
||||
return reversedDirection;
|
||||
}
|
||||
|
||||
private static boolean getMetricsExpectedDirection(Map<String, Object> params) {
|
||||
try {
|
||||
String maximize = String.valueOf(params.get("maximize_evaluation_metrics"));
|
||||
assert(maximize != null);
|
||||
return Boolean.valueOf(maximize);
|
||||
} catch (Exception ex) {
|
||||
logger.error("maximize_evaluation_metrics has to be specified for enabling early stop," +
|
||||
" allowed value: true/false", ex);
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Cross-validation with given parameters.
|
||||
*
|
||||
|
||||
@@ -111,6 +111,9 @@ class XGBoostJNI {
|
||||
public final static native int XGBoosterDumpModelEx(long handle, String fmap, int with_stats,
|
||||
String format, String[][] out_strings);
|
||||
|
||||
public final static native int XGBoosterDumpModelExWithFeatures(
|
||||
long handle, String[] feature_names, int with_stats, String format, String[][] out_strings);
|
||||
|
||||
public final static native int XGBoosterGetAttr(long handle, String key, String[] out_string);
|
||||
public final static native int XGBoosterSetAttr(long handle, String key, String value);
|
||||
public final static native int XGBoosterLoadRabitCheckpoint(long handle, int[] out_version);
|
||||
|
||||
@@ -187,16 +187,42 @@ class Booster private[xgboost4j](private[xgboost4j] var booster: JBooster)
|
||||
booster.getModelDump(featureMap, withStats, format)
|
||||
}
|
||||
|
||||
/**
|
||||
* Dump model as Array of string with specified feature names.
|
||||
*
|
||||
* @param featureNames Names of features.
|
||||
*/
|
||||
@throws(classOf[XGBoostError])
|
||||
def getModelDump(featureNames: Array[String]): Array[String] = {
|
||||
booster.getModelDump(featureNames, false, "text")
|
||||
}
|
||||
|
||||
def getModelDump(featureNames: Array[String], withStats: Boolean, format: String)
|
||||
: Array[String] = {
|
||||
booster.getModelDump(featureNames, withStats, format)
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get importance of each feature
|
||||
*
|
||||
* @return featureMap key: feature index, value: feature importance score
|
||||
* @return featureScoreMap key: feature index, value: feature importance score
|
||||
*/
|
||||
@throws(classOf[XGBoostError])
|
||||
def getFeatureScore(featureMap: String = null): mutable.Map[String, Integer] = {
|
||||
booster.getFeatureScore(featureMap).asScala
|
||||
}
|
||||
|
||||
/**
|
||||
* Get importance of each feature with specified feature names.
|
||||
*
|
||||
* @return featureScoreMap key: feature name, value: feature importance score
|
||||
*/
|
||||
@throws(classOf[XGBoostError])
|
||||
def getFeatureScore(featureNames: Array[String]): mutable.Map[String, Integer] = {
|
||||
booster.getFeatureScore(featureNames).asScala
|
||||
}
|
||||
|
||||
def getVersion: Int = booster.getVersion
|
||||
|
||||
def toByteArray: Array[Byte] = {
|
||||
|
||||
@@ -20,7 +20,7 @@ import java.net.{InetAddress, InetSocketAddress}
|
||||
|
||||
import akka.actor.ActorSystem
|
||||
import akka.pattern.ask
|
||||
import ml.dmlc.xgboost4j.java.IRabitTracker
|
||||
import ml.dmlc.xgboost4j.java.{IRabitTracker, TrackerProperties}
|
||||
import ml.dmlc.xgboost4j.scala.rabit.handler.RabitTrackerHandler
|
||||
|
||||
import scala.concurrent.duration._
|
||||
@@ -93,8 +93,11 @@ private[scala] class RabitTracker(numWorkers: Int, port: Option[Int] = None,
|
||||
* @return Boolean flag indicating if the Rabit tracker starts successfully.
|
||||
*/
|
||||
private def start(timeout: Duration): Boolean = {
|
||||
val hostAddress = Option(TrackerProperties.getInstance().getHostIp)
|
||||
.map(InetAddress.getByName).getOrElse(InetAddress.getLocalHost)
|
||||
|
||||
handler ? RabitTrackerHandler.StartTracker(
|
||||
new InetSocketAddress(InetAddress.getLocalHost, port.getOrElse(0)), maxPortTrials, timeout)
|
||||
new InetSocketAddress(hostAddress, port.getOrElse(0)), maxPortTrials, timeout)
|
||||
|
||||
// block by waiting for the actor to bind to a port
|
||||
Try(Await.result(handler ? RabitTrackerHandler.RequestBoundFuture, askTimeout.duration)
|
||||
|
||||
@@ -656,6 +656,56 @@ JNIEXPORT jint JNICALL Java_ml_dmlc_xgboost4j_java_XGBoostJNI_XGBoosterDumpModel
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: ml_dmlc_xgboost4j_java_XGBoostJNI
|
||||
* Method: XGBoosterDumpModelExWithFeatures
|
||||
* Signature: (JLjava/lang/String;I[[Ljava/lang/String;)I
|
||||
*/
|
||||
JNIEXPORT jint JNICALL Java_ml_dmlc_xgboost4j_java_XGBoostJNI_XGBoosterDumpModelExWithFeatures
|
||||
(JNIEnv *jenv, jclass jcls, jlong jhandle, jobjectArray jfeature_names, jint jwith_stats,
|
||||
jstring jformat, jobjectArray jout) {
|
||||
|
||||
BoosterHandle handle = (BoosterHandle) jhandle;
|
||||
bst_ulong feature_num = (bst_ulong)jenv->GetArrayLength(jfeature_names);
|
||||
|
||||
std::vector<std::string> feature_names;
|
||||
std::vector<char*> feature_names_char;
|
||||
|
||||
std::string feature_type_q = "q";
|
||||
std::vector<char*> feature_types_char;
|
||||
|
||||
for (bst_ulong i = 0; i < feature_num; ++i) {
|
||||
jstring jfeature_name = (jstring)jenv->GetObjectArrayElement(jfeature_names, i);
|
||||
const char *s = jenv->GetStringUTFChars(jfeature_name, 0);
|
||||
feature_names.push_back(std::string(s, jenv->GetStringLength(jfeature_name)));
|
||||
if (s != nullptr) jenv->ReleaseStringUTFChars(jfeature_name, s);
|
||||
if (feature_names.back().length() == 0) feature_names.pop_back();
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < feature_names.size(); ++i) {
|
||||
feature_names_char.push_back(&feature_names[i][0]);
|
||||
feature_types_char.push_back(&feature_type_q[0]);
|
||||
}
|
||||
|
||||
const char *format = jenv->GetStringUTFChars(jformat, 0);
|
||||
bst_ulong len = 0;
|
||||
char **result;
|
||||
|
||||
int ret = XGBoosterDumpModelExWithFeatures(handle, feature_num,
|
||||
(const char **) dmlc::BeginPtr(feature_names_char),
|
||||
(const char **) dmlc::BeginPtr(feature_types_char),
|
||||
jwith_stats, format, &len, (const char ***) &result);
|
||||
|
||||
jsize jlen = (jsize) len;
|
||||
jobjectArray jinfos = jenv->NewObjectArray(jlen, jenv->FindClass("java/lang/String"), jenv->NewStringUTF(""));
|
||||
for(int i=0 ; i<jlen; i++) {
|
||||
jenv->SetObjectArrayElement(jinfos, i, jenv->NewStringUTF((const char*) result[i]));
|
||||
}
|
||||
jenv->SetObjectArrayElement(jout, 0, jinfos);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: ml_dmlc_xgboost4j_java_XGBoostJNI
|
||||
* Method: XGBoosterLoadRabitCheckpoint
|
||||
|
||||
@@ -223,6 +223,14 @@ JNIEXPORT jint JNICALL Java_ml_dmlc_xgboost4j_java_XGBoostJNI_XGBoosterGetModelR
|
||||
JNIEXPORT jint JNICALL Java_ml_dmlc_xgboost4j_java_XGBoostJNI_XGBoosterDumpModelEx
|
||||
(JNIEnv *, jclass, jlong, jstring, jint, jstring, jobjectArray);
|
||||
|
||||
/*
|
||||
* Class: ml_dmlc_xgboost4j_java_XGBoostJNI
|
||||
* Method: XGBoosterDumpModelExWithFeatures
|
||||
* Signature: (JLjava/lang/String;I[[Ljava/lang/String;)I
|
||||
*/
|
||||
JNIEXPORT jint JNICALL Java_ml_dmlc_xgboost4j_java_XGBoostJNI_XGBoosterDumpModelExWithFeatures
|
||||
(JNIEnv *, jclass, jlong, jobjectArray, jint, jstring, jobjectArray);
|
||||
|
||||
/*
|
||||
* Class: ml_dmlc_xgboost4j_java_XGBoostJNI
|
||||
* Method: XGBoosterGetAttr
|
||||
|
||||
@@ -152,6 +152,66 @@ public class BoosterImplTest {
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDescendMetrics() {
|
||||
Map<String, Object> paramMap = new HashMap<String, Object>() {
|
||||
{
|
||||
put("max_depth", 3);
|
||||
put("silent", 1);
|
||||
put("objective", "binary:logistic");
|
||||
put("maximize_evaluation_metrics", "false");
|
||||
}
|
||||
};
|
||||
float[][] metrics = new float[1][5];
|
||||
for (int i = 0; i < 5; i++) {
|
||||
metrics[0][i] = i;
|
||||
}
|
||||
boolean onTrack = XGBoost.judgeIfTrainingOnTrack(paramMap, 5, metrics, 4);
|
||||
TestCase.assertFalse(onTrack);
|
||||
for (int i = 0; i < 5; i++) {
|
||||
metrics[0][i] = 5 - i;
|
||||
}
|
||||
onTrack = XGBoost.judgeIfTrainingOnTrack(paramMap, 5, metrics, 4);
|
||||
TestCase.assertTrue(onTrack);
|
||||
for (int i = 0; i < 5; i++) {
|
||||
metrics[0][i] = 5 - i;
|
||||
}
|
||||
metrics[0][0] = 1;
|
||||
metrics[0][2] = 5;
|
||||
onTrack = XGBoost.judgeIfTrainingOnTrack(paramMap, 5, metrics, 4);
|
||||
TestCase.assertTrue(onTrack);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAscendMetrics() {
|
||||
Map<String, Object> paramMap = new HashMap<String, Object>() {
|
||||
{
|
||||
put("max_depth", 3);
|
||||
put("silent", 1);
|
||||
put("objective", "binary:logistic");
|
||||
put("maximize_evaluation_metrics", "true");
|
||||
}
|
||||
};
|
||||
float[][] metrics = new float[1][5];
|
||||
for (int i = 0; i < 5; i++) {
|
||||
metrics[0][i] = i;
|
||||
}
|
||||
boolean onTrack = XGBoost.judgeIfTrainingOnTrack(paramMap, 5, metrics, 4);
|
||||
TestCase.assertTrue(onTrack);
|
||||
for (int i = 0; i < 5; i++) {
|
||||
metrics[0][i] = 5 - i;
|
||||
}
|
||||
onTrack = XGBoost.judgeIfTrainingOnTrack(paramMap, 5, metrics, 4);
|
||||
TestCase.assertFalse(onTrack);
|
||||
for (int i = 0; i < 5; i++) {
|
||||
metrics[0][i] = i;
|
||||
}
|
||||
metrics[0][0] = 6;
|
||||
metrics[0][2] = 1;
|
||||
onTrack = XGBoost.judgeIfTrainingOnTrack(paramMap, 5, metrics, 4);
|
||||
TestCase.assertTrue(onTrack);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBoosterEarlyStop() throws XGBoostError, IOException {
|
||||
DMatrix trainMat = new DMatrix("../../demo/data/agaricus.txt.train");
|
||||
@@ -162,6 +222,7 @@ public class BoosterImplTest {
|
||||
put("max_depth", 3);
|
||||
put("silent", 1);
|
||||
put("objective", "binary:logistic");
|
||||
put("maximize_evaluation_metrics", "false");
|
||||
}
|
||||
};
|
||||
Map<String, DMatrix> watches = new LinkedHashMap<>();
|
||||
@@ -271,6 +332,24 @@ public class BoosterImplTest {
|
||||
Booster booster = trainBooster(trainMat, testMat);
|
||||
String[] dump = booster.getModelDump("", false, "json");
|
||||
TestCase.assertEquals(" { \"nodeid\":", dump[0].substring(0, 13));
|
||||
|
||||
// test with specified feature names
|
||||
String[] featureNames = new String[126];
|
||||
for(int i = 0; i < 126; i++) featureNames[i] = "test_feature_name_" + i;
|
||||
dump = booster.getModelDump(featureNames, false, "json");
|
||||
TestCase.assertTrue(dump[0].contains("test_feature_name_"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetFeatureImportance() throws XGBoostError {
|
||||
DMatrix trainMat = new DMatrix("../../demo/data/agaricus.txt.train");
|
||||
DMatrix testMat = new DMatrix("../../demo/data/agaricus.txt.test");
|
||||
|
||||
Booster booster = trainBooster(trainMat, testMat);
|
||||
String[] featureNames = new String[126];
|
||||
for(int i = 0; i < 126; i++) featureNames[i] = "test_feature_name_" + i;
|
||||
Map<String, Integer> scoreMap = booster.getFeatureScore(featureNames);
|
||||
for (String fName: scoreMap.keySet()) TestCase.assertTrue(fName.startsWith("test_feature_name_"));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
||||
@@ -33,21 +33,22 @@ class MyLogistic : public ObjFunction {
|
||||
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
|
||||
param_.InitAllowUnknown(args);
|
||||
}
|
||||
void GetGradient(HostDeviceVector<bst_float> *preds,
|
||||
void GetGradient(const HostDeviceVector<bst_float> &preds,
|
||||
const MetaInfo &info,
|
||||
int iter,
|
||||
HostDeviceVector<GradientPair> *out_gpair) override {
|
||||
out_gpair->Resize(preds->Size());
|
||||
std::vector<bst_float>& preds_h = preds->HostVector();
|
||||
out_gpair->Resize(preds.Size());
|
||||
const std::vector<bst_float>& preds_h = preds.HostVector();
|
||||
std::vector<GradientPair>& out_gpair_h = out_gpair->HostVector();
|
||||
const std::vector<bst_float>& labels_h = info.labels_.HostVector();
|
||||
for (size_t i = 0; i < preds_h.size(); ++i) {
|
||||
bst_float w = info.GetWeight(i);
|
||||
// scale the negative examples!
|
||||
if (info.labels_[i] == 0.0f) w *= param_.scale_neg_weight;
|
||||
if (labels_h[i] == 0.0f) w *= param_.scale_neg_weight;
|
||||
// logistic transformation
|
||||
bst_float p = 1.0f / (1.0f + std::exp(-preds_h[i]));
|
||||
// this is the gradient
|
||||
bst_float grad = (p - info.labels_[i]) * w;
|
||||
bst_float grad = (p - labels_h[i]) * w;
|
||||
// this is the second order gradient
|
||||
bst_float hess = p * (1.0f - p) * w;
|
||||
out_gpair_h.at(i) = GradientPair(grad, hess);
|
||||
|
||||
@@ -177,15 +177,17 @@ class SparsePageLZ4Format : public SparsePageFormat {
|
||||
}
|
||||
|
||||
bool Read(SparsePage* page, dmlc::SeekStream* fi) override {
|
||||
if (!fi->Read(&(page->offset))) return false;
|
||||
CHECK_NE(page->offset.size(), 0) << "Invalid SparsePage file";
|
||||
auto& offset_vec = page->offset.HostVector();
|
||||
auto& data_vec = page->data.HostVector();
|
||||
if (!fi->Read(&(offset_vec))) return false;
|
||||
CHECK_NE(offset_vec.size(), 0) << "Invalid SparsePage file";
|
||||
this->LoadIndexValue(fi);
|
||||
|
||||
page->data.resize(page->offset.back());
|
||||
data_vec.resize(offset_vec.back());
|
||||
CHECK_EQ(index_.data.size(), value_.data.size());
|
||||
CHECK_EQ(index_.data.size(), page->data.size());
|
||||
for (size_t i = 0; i < page->data.size(); ++i) {
|
||||
page->data[i] = Entry(index_.data[i] + min_index_, value_.data[i]);
|
||||
CHECK_EQ(index_.data.size(), data_vec.size());
|
||||
for (size_t i = 0; i < data_vec.size(); ++i) {
|
||||
data_vec[i] = Entry(index_.data[i] + min_index_, value_.data[i]);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@@ -195,24 +197,25 @@ class SparsePageLZ4Format : public SparsePageFormat {
|
||||
const std::vector<bst_uint>& sorted_index_set) override {
|
||||
if (!fi->Read(&disk_offset_)) return false;
|
||||
this->LoadIndexValue(fi);
|
||||
|
||||
page->offset.clear();
|
||||
page->offset.push_back(0);
|
||||
auto& offset_vec = page->offset.HostVector();
|
||||
auto& data_vec = page->data.HostVector();
|
||||
offset_vec.clear();
|
||||
offset_vec.push_back(0);
|
||||
for (bst_uint cid : sorted_index_set) {
|
||||
page->offset.push_back(
|
||||
page->offset.back() + disk_offset_[cid + 1] - disk_offset_[cid]);
|
||||
offset_vec.push_back(
|
||||
offset_vec.back() + disk_offset_[cid + 1] - disk_offset_[cid]);
|
||||
}
|
||||
page->data.resize(page->offset.back());
|
||||
data_vec.resize(offset_vec.back());
|
||||
CHECK_EQ(index_.data.size(), value_.data.size());
|
||||
CHECK_EQ(index_.data.size(), disk_offset_.back());
|
||||
|
||||
for (size_t i = 0; i < sorted_index_set.size(); ++i) {
|
||||
bst_uint cid = sorted_index_set[i];
|
||||
size_t dst_begin = page->offset[i];
|
||||
size_t dst_begin = offset_vec[i];
|
||||
size_t src_begin = disk_offset_[cid];
|
||||
size_t num = disk_offset_[cid + 1] - disk_offset_[cid];
|
||||
for (size_t j = 0; j < num; ++j) {
|
||||
page->data[dst_begin + j] = Entry(
|
||||
data_vec[dst_begin + j] = Entry(
|
||||
index_.data[src_begin + j] + min_index_, value_.data[src_begin + j]);
|
||||
}
|
||||
}
|
||||
@@ -220,22 +223,24 @@ class SparsePageLZ4Format : public SparsePageFormat {
|
||||
}
|
||||
|
||||
void Write(const SparsePage& page, dmlc::Stream* fo) override {
|
||||
CHECK(page.offset.size() != 0 && page.offset[0] == 0);
|
||||
CHECK_EQ(page.offset.back(), page.data.size());
|
||||
fo->Write(page.offset);
|
||||
const auto& offset_vec = page.offset.HostVector();
|
||||
const auto& data_vec = page.data.HostVector();
|
||||
CHECK(offset_vec.size() != 0 && offset_vec[0] == 0);
|
||||
CHECK_EQ(offset_vec.back(), data_vec.size());
|
||||
fo->Write(offset_vec);
|
||||
min_index_ = page.base_rowid;
|
||||
fo->Write(&min_index_, sizeof(min_index_));
|
||||
index_.data.resize(page.data.size());
|
||||
value_.data.resize(page.data.size());
|
||||
index_.data.resize(data_vec.size());
|
||||
value_.data.resize(data_vec.size());
|
||||
|
||||
for (size_t i = 0; i < page.data.size(); ++i) {
|
||||
bst_uint idx = page.data[i].index - min_index_;
|
||||
for (size_t i = 0; i < data_vec.size(); ++i) {
|
||||
bst_uint idx = data_vec[i].index - min_index_;
|
||||
CHECK_LE(idx, static_cast<bst_uint>(std::numeric_limits<StorageIndex>::max()))
|
||||
<< "The storage index is chosen to limited to smaller equal than "
|
||||
<< std::numeric_limits<StorageIndex>::max()
|
||||
<< "min_index=" << min_index_;
|
||||
index_.data[i] = static_cast<StorageIndex>(idx);
|
||||
value_.data[i] = page.data[i].fvalue;
|
||||
value_.data[i] = data_vec[i].fvalue;
|
||||
}
|
||||
|
||||
index_.InitCompressChunks(kChunkSize, kMaxChunk);
|
||||
@@ -259,7 +264,7 @@ class SparsePageLZ4Format : public SparsePageFormat {
|
||||
raw_bytes_value_ += value_.RawBytes();
|
||||
encoded_bytes_index_ += index_.EncodedBytes();
|
||||
encoded_bytes_value_ += value_.EncodedBytes();
|
||||
raw_bytes_ += page.offset.size() * sizeof(size_t);
|
||||
raw_bytes_ += offset_vec.size() * sizeof(size_t);
|
||||
}
|
||||
|
||||
inline void LoadIndexValue(dmlc::SeekStream* fi) {
|
||||
|
||||
@@ -1 +1 @@
|
||||
0.80
|
||||
0.81
|
||||
|
||||
@@ -12,7 +12,7 @@ from .core import DMatrix, Booster
|
||||
from .training import train, cv
|
||||
from . import rabit # noqa
|
||||
try:
|
||||
from .sklearn import XGBModel, XGBClassifier, XGBRegressor
|
||||
from .sklearn import XGBModel, XGBClassifier, XGBRegressor, XGBRanker
|
||||
from .plotting import plot_importance, plot_tree, to_graphviz
|
||||
except ImportError:
|
||||
pass
|
||||
@@ -23,5 +23,5 @@ with open(VERSION_FILE) as f:
|
||||
|
||||
__all__ = ['DMatrix', 'Booster',
|
||||
'train', 'cv',
|
||||
'XGBModel', 'XGBClassifier', 'XGBRegressor',
|
||||
'XGBModel', 'XGBClassifier', 'XGBRegressor', 'XGBRanker',
|
||||
'plot_importance', 'plot_tree', 'to_graphviz']
|
||||
|
||||
@@ -32,7 +32,7 @@ def _fmt_metric(value, show_stdv=True):
|
||||
def print_evaluation(period=1, show_stdv=True):
|
||||
"""Create a callback that print evaluation result.
|
||||
|
||||
We print the evaluation results every ``period`` iterations
|
||||
We print the evaluation results every **period** iterations
|
||||
and on the first and the last iterations.
|
||||
|
||||
Parameters
|
||||
@@ -60,7 +60,7 @@ def print_evaluation(period=1, show_stdv=True):
|
||||
|
||||
|
||||
def record_evaluation(eval_result):
|
||||
"""Create a call back that records the evaluation history into eval_result.
|
||||
"""Create a call back that records the evaluation history into **eval_result**.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
@@ -109,10 +109,11 @@ def reset_learning_rate(learning_rates):
|
||||
learning_rates: list or function
|
||||
List of learning rate for each boosting round
|
||||
or a customized function that calculates eta in terms of
|
||||
current number of round and the total number of boosting round (e.g. yields
|
||||
learning rate decay)
|
||||
- list l: eta = l[boosting_round]
|
||||
- function f: eta = f(boosting_round, num_boost_round)
|
||||
current number of round and the total number of boosting round (e.g.
|
||||
yields learning rate decay)
|
||||
|
||||
* list ``l``: ``eta = l[boosting_round]``
|
||||
* function ``f``: ``eta = f(boosting_round, num_boost_round)``
|
||||
|
||||
Returns
|
||||
-------
|
||||
@@ -150,14 +151,14 @@ def early_stop(stopping_rounds, maximize=False, verbose=True):
|
||||
"""Create a callback that activates early stoppping.
|
||||
|
||||
Validation error needs to decrease at least
|
||||
every <stopping_rounds> round(s) to continue training.
|
||||
Requires at least one item in evals.
|
||||
every **stopping_rounds** round(s) to continue training.
|
||||
Requires at least one item in **evals**.
|
||||
If there's more than one, will use the last.
|
||||
Returns the model from the last iteration (not the best one).
|
||||
If early stopping occurs, the model will have three additional fields:
|
||||
bst.best_score, bst.best_iteration and bst.best_ntree_limit.
|
||||
(Use bst.best_ntree_limit to get the correct value if num_parallel_tree
|
||||
and/or num_class appears in the parameters)
|
||||
``bst.best_score``, ``bst.best_iteration`` and ``bst.best_ntree_limit``.
|
||||
(Use ``bst.best_ntree_limit`` to get the correct value if ``num_parallel_tree``
|
||||
and/or ``num_class`` appears in the parameters)
|
||||
|
||||
Parameters
|
||||
----------
|
||||
@@ -190,19 +191,18 @@ def early_stop(stopping_rounds, maximize=False, verbose=True):
|
||||
maximize_metrics = ('auc', 'map', 'ndcg')
|
||||
maximize_at_n_metrics = ('auc@', 'map@', 'ndcg@')
|
||||
maximize_score = maximize
|
||||
metric = env.evaluation_result_list[-1][0]
|
||||
metric_label = env.evaluation_result_list[-1][0]
|
||||
metric = metric_label.split('-', 1)[-1]
|
||||
|
||||
if any(env.evaluation_result_list[-1][0].split('-')[-1].startswith(x)
|
||||
for x in maximize_at_n_metrics):
|
||||
if any(metric.startswith(x) for x in maximize_at_n_metrics):
|
||||
maximize_score = True
|
||||
|
||||
if any(env.evaluation_result_list[-1][0].split('-')[-1].split(":")[0] == x
|
||||
for x in maximize_metrics):
|
||||
if any(metric.split(":")[0] == x for x in maximize_metrics):
|
||||
maximize_score = True
|
||||
|
||||
if verbose and env.rank == 0:
|
||||
msg = "Will train until {} hasn't improved in {} rounds.\n"
|
||||
rabit.tracker_print(msg.format(metric, stopping_rounds))
|
||||
rabit.tracker_print(msg.format(metric_label, stopping_rounds))
|
||||
|
||||
state['maximize_score'] = maximize_score
|
||||
state['best_iteration'] = 0
|
||||
|
||||
@@ -118,13 +118,27 @@ def _load_lib():
|
||||
if len(lib_paths) == 0:
|
||||
return None
|
||||
pathBackup = os.environ['PATH']
|
||||
lib_success = False
|
||||
os_error_list = []
|
||||
for lib_path in lib_paths:
|
||||
try:
|
||||
# needed when the lib is linked with non-system-available dependencies
|
||||
os.environ['PATH'] = pathBackup + os.pathsep + os.path.dirname(lib_path)
|
||||
lib = ctypes.cdll.LoadLibrary(lib_path)
|
||||
except OSError:
|
||||
lib_success = True
|
||||
except OSError as e:
|
||||
os_error_list.append(str(e))
|
||||
continue
|
||||
if not lib_success:
|
||||
libname = os.path.basename(lib_paths[0])
|
||||
raise XGBoostError(
|
||||
'XGBoost Library ({}) could not be loaded.\n'.format(libname) +
|
||||
'Likely causes:\n' +
|
||||
' * OpenMP runtime is not installed ' +
|
||||
'(vcomp140.dll or libgomp-1.dll for Windows, ' +
|
||||
'libgomp.so for UNIX-like OSes)\n' +
|
||||
' * You are running 32-bit Python on a 64-bit OS\n' +
|
||||
'Error message(s): {}\n'.format(os_error_list))
|
||||
lib.XGBGetLastError.restype = ctypes.c_char_p
|
||||
lib.callback = _get_log_callback_func()
|
||||
if lib.XGBRegisterLogCallback(lib.callback) != 0:
|
||||
@@ -337,6 +351,11 @@ class DMatrix(object):
|
||||
# force into void_p, mac need to pass things in as void_p
|
||||
if data is None:
|
||||
self.handle = None
|
||||
|
||||
if feature_names is not None:
|
||||
self._feature_names = feature_names
|
||||
if feature_types is not None:
|
||||
self._feature_types = feature_types
|
||||
return
|
||||
|
||||
data, feature_names, feature_types = _maybe_pandas_data(data,
|
||||
@@ -479,7 +498,7 @@ class DMatrix(object):
|
||||
nthread))
|
||||
|
||||
def __del__(self):
|
||||
if self.handle is not None:
|
||||
if hasattr(self, "handle") and self.handle is not None:
|
||||
_check_call(_LIB.XGDMatrixFree(self.handle))
|
||||
self.handle = None
|
||||
|
||||
@@ -725,7 +744,8 @@ class DMatrix(object):
|
||||
res : DMatrix
|
||||
A new DMatrix containing only selected indices.
|
||||
"""
|
||||
res = DMatrix(None, feature_names=self.feature_names)
|
||||
res = DMatrix(None, feature_names=self.feature_names,
|
||||
feature_types=self.feature_types)
|
||||
res.handle = ctypes.c_void_p()
|
||||
_check_call(_LIB.XGDMatrixSliceDMatrix(self.handle,
|
||||
c_array(ctypes.c_int, rindex),
|
||||
@@ -830,7 +850,7 @@ class DMatrix(object):
|
||||
|
||||
|
||||
class Booster(object):
|
||||
"""A Booster of of XGBoost.
|
||||
"""A Booster of XGBoost.
|
||||
|
||||
Booster is the model of xgboost, that contains low level routines for
|
||||
training, prediction and evaluation.
|
||||
@@ -861,6 +881,10 @@ class Booster(object):
|
||||
ctypes.byref(self.handle)))
|
||||
self.set_param({'seed': 0})
|
||||
self.set_param(params or {})
|
||||
if (params is not None) and ('booster' in params):
|
||||
self.booster = params['booster']
|
||||
else:
|
||||
self.booster = 'gbtree'
|
||||
if model_file is not None:
|
||||
self.load_model(model_file)
|
||||
|
||||
@@ -1280,36 +1304,54 @@ class Booster(object):
|
||||
ptr = (ctypes.c_char * len(buf)).from_buffer(buf)
|
||||
_check_call(_LIB.XGBoosterLoadModelFromBuffer(self.handle, ptr, length))
|
||||
|
||||
def dump_model(self, fout, fmap='', with_stats=False):
|
||||
def dump_model(self, fout, fmap='', with_stats=False, dump_format="text"):
|
||||
"""
|
||||
Dump model into a text file.
|
||||
Dump model into a text or JSON file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
foout : string
|
||||
fout : string
|
||||
Output file name.
|
||||
fmap : string, optional
|
||||
Name of the file containing feature map names.
|
||||
with_stats : bool (optional)
|
||||
with_stats : bool, optional
|
||||
Controls whether the split statistics are output.
|
||||
dump_format : string, optional
|
||||
Format of model dump file. Can be 'text' or 'json'.
|
||||
"""
|
||||
if isinstance(fout, STRING_TYPES):
|
||||
fout = open(fout, 'w')
|
||||
need_close = True
|
||||
else:
|
||||
need_close = False
|
||||
ret = self.get_dump(fmap, with_stats)
|
||||
for i in range(len(ret)):
|
||||
fout.write('booster[{}]:\n'.format(i))
|
||||
fout.write(ret[i])
|
||||
ret = self.get_dump(fmap, with_stats, dump_format)
|
||||
if dump_format == 'json':
|
||||
fout.write('[\n')
|
||||
for i in range(len(ret)):
|
||||
fout.write(ret[i])
|
||||
if i < len(ret) - 1:
|
||||
fout.write(",\n")
|
||||
fout.write('\n]')
|
||||
else:
|
||||
for i in range(len(ret)):
|
||||
fout.write('booster[{}]:\n'.format(i))
|
||||
fout.write(ret[i])
|
||||
if need_close:
|
||||
fout.close()
|
||||
|
||||
def get_dump(self, fmap='', with_stats=False, dump_format="text"):
|
||||
"""
|
||||
Returns the dump the model as a list of strings.
|
||||
"""
|
||||
Returns the model dump as a list of strings.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fmap : string, optional
|
||||
Name of the file containing feature map names.
|
||||
with_stats : bool, optional
|
||||
Controls whether the split statistics are output.
|
||||
dump_format : string, optional
|
||||
Format of model dump. Can be 'text' or 'json'.
|
||||
"""
|
||||
length = c_bst_ulong()
|
||||
sarr = ctypes.POINTER(ctypes.c_char_p)()
|
||||
if self.feature_names is not None and fmap == '':
|
||||
@@ -1347,6 +1389,17 @@ class Booster(object):
|
||||
def get_fscore(self, fmap=''):
|
||||
"""Get feature importance of each feature.
|
||||
|
||||
.. note:: Feature importance is defined only for tree boosters
|
||||
|
||||
Feature importance is only defined when the decision tree model is chosen as base
|
||||
learner (`booster=gbtree`). It is not defined for other base learner types, such
|
||||
as linear learners (`booster=gblinear`).
|
||||
|
||||
.. note:: Zero-importance features will not be included
|
||||
|
||||
Keep in mind that this function does not include zero-importance feature, i.e.
|
||||
those features that have not been used in any split conditions.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fmap: str (optional)
|
||||
@@ -1365,6 +1418,12 @@ class Booster(object):
|
||||
* 'total_gain': the total gain across all splits the feature is used in.
|
||||
* 'total_cover': the total coverage across all splits the feature is used in.
|
||||
|
||||
.. note:: Feature importance is defined only for tree boosters
|
||||
|
||||
Feature importance is only defined when the decision tree model is chosen as base
|
||||
learner (`booster=gbtree`). It is not defined for other base learner types, such
|
||||
as linear learners (`booster=gblinear`).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fmap: str (optional)
|
||||
@@ -1373,6 +1432,10 @@ class Booster(object):
|
||||
One of the importance types defined above.
|
||||
"""
|
||||
|
||||
if self.booster != 'gbtree':
|
||||
raise ValueError('Feature importance is not defined for Booster type {}'
|
||||
.format(self.booster))
|
||||
|
||||
allowed_importance_types = ['weight', 'gain', 'cover', 'total_gain', 'total_cover']
|
||||
if importance_type not in allowed_importance_types:
|
||||
msg = ("importance_type mismatch, got '{}', expected one of " +
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
# coding: utf-8
|
||||
# pylint: disable=too-many-arguments, too-many-locals, invalid-name, fixme, E0012, R0912
|
||||
# pylint: disable=too-many-arguments, too-many-locals, invalid-name, fixme, E0012, R0912, C0302
|
||||
"""Scikit-Learn Wrapper interface for XGBoost."""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import numpy as np
|
||||
import warnings
|
||||
import json
|
||||
from .core import Booster, DMatrix, XGBoostError
|
||||
from .training import train
|
||||
|
||||
@@ -69,9 +70,9 @@ class XGBModel(XGBModelBase):
|
||||
booster: string
|
||||
Specify which booster to use: gbtree, gblinear or dart.
|
||||
nthread : int
|
||||
Number of parallel threads used to run xgboost. (Deprecated, please use n_jobs)
|
||||
Number of parallel threads used to run xgboost. (Deprecated, please use ``n_jobs``)
|
||||
n_jobs : int
|
||||
Number of parallel threads used to run xgboost. (replaces nthread)
|
||||
Number of parallel threads used to run xgboost. (replaces ``nthread``)
|
||||
gamma : float
|
||||
Minimum loss reduction required to make a further partition on a leaf node of the tree.
|
||||
min_child_weight : int
|
||||
@@ -181,6 +182,27 @@ class XGBModel(XGBModelBase):
|
||||
raise XGBoostError('need to call fit or load_model beforehand')
|
||||
return self._Booster
|
||||
|
||||
def set_params(self, **params):
|
||||
"""Set the parameters of this estimator.
|
||||
Modification of the sklearn method to allow unknown kwargs. This allows using
|
||||
the full range of xgboost parameters that are not defined as member variables
|
||||
in sklearn grid search.
|
||||
Returns
|
||||
-------
|
||||
self
|
||||
"""
|
||||
if not params:
|
||||
# Simple optimization to gain speed (inspect is slow)
|
||||
return self
|
||||
|
||||
for key, value in params.items():
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, value)
|
||||
else:
|
||||
self.kwargs[key] = value
|
||||
|
||||
return self
|
||||
|
||||
def get_params(self, deep=False):
|
||||
"""Get parameters."""
|
||||
params = super(XGBModel, self).get_params(deep=deep)
|
||||
@@ -220,6 +242,13 @@ class XGBModel(XGBModelBase):
|
||||
"""
|
||||
Save the model to a file.
|
||||
|
||||
The model is saved in an XGBoost internal binary format which is
|
||||
universal among the various XGBoost interfaces. Auxiliary attributes of
|
||||
the Python Booster object (such as feature names) will not be loaded.
|
||||
Label encodings (text labels to numeric labels) will be also lost.
|
||||
**If you are using only the Python interface, we recommend pickling the
|
||||
model object for best results.**
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : string
|
||||
@@ -231,6 +260,13 @@ class XGBModel(XGBModelBase):
|
||||
"""
|
||||
Load the model from a file.
|
||||
|
||||
The model is loaded from an XGBoost internal binary format which is
|
||||
universal among the various XGBoost interfaces. Auxiliary attributes of
|
||||
the Python Booster object (such as feature names) will not be loaded.
|
||||
Label encodings (text labels to numeric labels) will be also lost.
|
||||
**If you are using only the Python interface, we recommend pickling the
|
||||
model object for best results.**
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : string or a memory buffer
|
||||
@@ -242,7 +278,7 @@ class XGBModel(XGBModelBase):
|
||||
|
||||
def fit(self, X, y, sample_weight=None, eval_set=None, eval_metric=None,
|
||||
early_stopping_rounds=None, verbose=True, xgb_model=None,
|
||||
sample_weight_eval_set=None):
|
||||
sample_weight_eval_set=None, callbacks=None):
|
||||
# pylint: disable=missing-docstring,invalid-name,attribute-defined-outside-init
|
||||
"""
|
||||
Fit the gradient boosting model
|
||||
@@ -285,6 +321,14 @@ class XGBModel(XGBModelBase):
|
||||
xgb_model : str
|
||||
file name of stored xgb model or 'Booster' instance Xgb model to be
|
||||
loaded before training (allows training continuation).
|
||||
callbacks : list of callback functions
|
||||
List of callback functions that are applied at end of each iteration.
|
||||
It is possible to use predefined callbacks by using :ref:`callback_api`.
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[xgb.callback.reset_learning_rate(custom_rates)]
|
||||
"""
|
||||
if sample_weight is not None:
|
||||
trainDmatrix = DMatrix(X, label=y, weight=sample_weight,
|
||||
@@ -325,7 +369,8 @@ class XGBModel(XGBModelBase):
|
||||
self.n_estimators, evals=evals,
|
||||
early_stopping_rounds=early_stopping_rounds,
|
||||
evals_result=evals_result, obj=obj, feval=feval,
|
||||
verbose_eval=verbose, xgb_model=xgb_model)
|
||||
verbose_eval=verbose, xgb_model=xgb_model,
|
||||
callbacks=callbacks)
|
||||
|
||||
if evals_result:
|
||||
for val in evals_result.items():
|
||||
@@ -339,7 +384,7 @@ class XGBModel(XGBModelBase):
|
||||
self.best_ntree_limit = self._Booster.best_ntree_limit
|
||||
return self
|
||||
|
||||
def predict(self, data, output_margin=False, ntree_limit=None):
|
||||
def predict(self, data, output_margin=False, ntree_limit=None, validate_features=True):
|
||||
"""
|
||||
Predict with `data`.
|
||||
|
||||
@@ -369,6 +414,9 @@ class XGBModel(XGBModelBase):
|
||||
ntree_limit : int
|
||||
Limit number of trees in the prediction; defaults to best_ntree_limit if defined
|
||||
(i.e. it has been trained with early stopping), otherwise 0 (use all trees).
|
||||
validate_features : bool
|
||||
When this is True, validate that the Booster's and data's feature_names are identical.
|
||||
Otherwise, it is assumed that the feature_names are the same.
|
||||
Returns
|
||||
-------
|
||||
prediction : numpy array
|
||||
@@ -381,7 +429,8 @@ class XGBModel(XGBModelBase):
|
||||
ntree_limit = getattr(self, "best_ntree_limit", 0)
|
||||
return self.get_booster().predict(test_dmatrix,
|
||||
output_margin=output_margin,
|
||||
ntree_limit=ntree_limit)
|
||||
ntree_limit=ntree_limit,
|
||||
validate_features=validate_features)
|
||||
|
||||
def apply(self, X, ntree_limit=0):
|
||||
"""Return the predicted leaf every tree for each sample.
|
||||
@@ -409,10 +458,10 @@ class XGBModel(XGBModelBase):
|
||||
def evals_result(self):
|
||||
"""Return the evaluation results.
|
||||
|
||||
If ``eval_set`` is passed to the `fit` function, you can call ``evals_result()`` to
|
||||
get evaluation results for all passed eval_sets. When ``eval_metric`` is also
|
||||
passed to the ``fit`` function, the ``evals_result`` will contain the ``eval_metrics``
|
||||
passed to the ``fit`` function
|
||||
If **eval_set** is passed to the `fit` function, you can call
|
||||
``evals_result()`` to get evaluation results for all passed **eval_sets**.
|
||||
When **eval_metric** is also passed to the `fit` function, the
|
||||
**evals_result** will contain the **eval_metrics** passed to the `fit` function.
|
||||
|
||||
Returns
|
||||
-------
|
||||
@@ -434,9 +483,9 @@ class XGBModel(XGBModelBase):
|
||||
|
||||
evals_result = clf.evals_result()
|
||||
|
||||
The variable evals_result will contain:
|
||||
The variable **evals_result** will contain:
|
||||
|
||||
.. code-block:: none
|
||||
.. code-block:: python
|
||||
|
||||
{'validation_0': {'logloss': ['0.604835', '0.531479']},
|
||||
'validation_1': {'logloss': ['0.41965', '0.17686']}}
|
||||
@@ -453,17 +502,68 @@ class XGBModel(XGBModelBase):
|
||||
"""
|
||||
Feature importances property
|
||||
|
||||
.. note:: Feature importance is defined only for tree boosters
|
||||
|
||||
Feature importance is only defined when the decision tree model is chosen as base
|
||||
learner (`booster=gbtree`). It is not defined for other base learner types, such
|
||||
as linear learners (`booster=gblinear`).
|
||||
|
||||
Returns
|
||||
-------
|
||||
feature_importances_ : array of shape ``[n_features]``
|
||||
|
||||
"""
|
||||
if self.booster != 'gbtree':
|
||||
raise AttributeError('Feature importance is not defined for Booster type {}'
|
||||
.format(self.booster))
|
||||
b = self.get_booster()
|
||||
fs = b.get_fscore()
|
||||
all_features = [fs.get(f, 0.) for f in b.feature_names]
|
||||
all_features = np.array(all_features, dtype=np.float32)
|
||||
return all_features / all_features.sum()
|
||||
|
||||
@property
|
||||
def coef_(self):
|
||||
"""
|
||||
Coefficients property
|
||||
|
||||
.. note:: Coefficients are defined only for linear learners
|
||||
|
||||
Coefficients are only defined when the linear model is chosen as base
|
||||
learner (`booster=gblinear`). It is not defined for other base learner types, such
|
||||
as tree learners (`booster=gbtree`).
|
||||
|
||||
Returns
|
||||
-------
|
||||
coef_ : array of shape ``[n_features]``
|
||||
"""
|
||||
if self.booster != 'gblinear':
|
||||
raise AttributeError('Coefficients are not defined for Booster type {}'
|
||||
.format(self.booster))
|
||||
b = self.get_booster()
|
||||
return json.loads(b.get_dump(dump_format='json')[0])['weight']
|
||||
|
||||
@property
|
||||
def intercept_(self):
|
||||
"""
|
||||
Intercept (bias) property
|
||||
|
||||
.. note:: Intercept is defined only for linear learners
|
||||
|
||||
Intercept (bias) is only defined when the linear model is chosen as base
|
||||
learner (`booster=gblinear`). It is not defined for other base learner types, such
|
||||
as tree learners (`booster=gbtree`).
|
||||
|
||||
Returns
|
||||
-------
|
||||
intercept_ : array of shape ``[n_features]``
|
||||
"""
|
||||
if self.booster != 'gblinear':
|
||||
raise AttributeError('Intercept (bias) is not defined for Booster type {}'
|
||||
.format(self.booster))
|
||||
b = self.get_booster()
|
||||
return json.loads(b.get_dump(dump_format='json')[0])['bias']
|
||||
|
||||
|
||||
class XGBClassifier(XGBModel, XGBClassifierBase):
|
||||
# pylint: disable=missing-docstring,too-many-arguments,invalid-name
|
||||
@@ -488,7 +588,7 @@ class XGBClassifier(XGBModel, XGBClassifierBase):
|
||||
|
||||
def fit(self, X, y, sample_weight=None, eval_set=None, eval_metric=None,
|
||||
early_stopping_rounds=None, verbose=True, xgb_model=None,
|
||||
sample_weight_eval_set=None):
|
||||
sample_weight_eval_set=None, callbacks=None):
|
||||
# pylint: disable = attribute-defined-outside-init,arguments-differ
|
||||
"""
|
||||
Fit gradient boosting classifier
|
||||
@@ -531,6 +631,14 @@ class XGBClassifier(XGBModel, XGBClassifierBase):
|
||||
xgb_model : str
|
||||
file name of stored xgb model or 'Booster' instance Xgb model to be
|
||||
loaded before training (allows training continuation).
|
||||
callbacks : list of callback functions
|
||||
List of callback functions that are applied at end of each iteration.
|
||||
It is possible to use predefined callbacks by using :ref:`callback_api`.
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[xgb.callback.reset_learning_rate(custom_rates)]
|
||||
"""
|
||||
evals_result = {}
|
||||
self.classes_ = np.unique(y)
|
||||
@@ -588,7 +696,8 @@ class XGBClassifier(XGBModel, XGBClassifierBase):
|
||||
evals=evals,
|
||||
early_stopping_rounds=early_stopping_rounds,
|
||||
evals_result=evals_result, obj=obj, feval=feval,
|
||||
verbose_eval=verbose, xgb_model=None)
|
||||
verbose_eval=verbose, xgb_model=None,
|
||||
callbacks=callbacks)
|
||||
|
||||
self.objective = xgb_options["objective"]
|
||||
if evals_result:
|
||||
@@ -604,7 +713,7 @@ class XGBClassifier(XGBModel, XGBClassifierBase):
|
||||
|
||||
return self
|
||||
|
||||
def predict(self, data, output_margin=False, ntree_limit=None):
|
||||
def predict(self, data, output_margin=False, ntree_limit=None, validate_features=True):
|
||||
"""
|
||||
Predict with `data`.
|
||||
|
||||
@@ -634,6 +743,9 @@ class XGBClassifier(XGBModel, XGBClassifierBase):
|
||||
ntree_limit : int
|
||||
Limit number of trees in the prediction; defaults to best_ntree_limit if defined
|
||||
(i.e. it has been trained with early stopping), otherwise 0 (use all trees).
|
||||
validate_features : bool
|
||||
When this is True, validate that the Booster's and data's feature_names are identical.
|
||||
Otherwise, it is assumed that the feature_names are the same.
|
||||
Returns
|
||||
-------
|
||||
prediction : numpy array
|
||||
@@ -643,7 +755,12 @@ class XGBClassifier(XGBModel, XGBClassifierBase):
|
||||
ntree_limit = getattr(self, "best_ntree_limit", 0)
|
||||
class_probs = self.get_booster().predict(test_dmatrix,
|
||||
output_margin=output_margin,
|
||||
ntree_limit=ntree_limit)
|
||||
ntree_limit=ntree_limit,
|
||||
validate_features=validate_features)
|
||||
if output_margin:
|
||||
# If output_margin is active, simply return the scores
|
||||
return class_probs
|
||||
|
||||
if len(class_probs.shape) > 1:
|
||||
column_indexes = np.argmax(class_probs, axis=1)
|
||||
else:
|
||||
@@ -651,7 +768,7 @@ class XGBClassifier(XGBModel, XGBClassifierBase):
|
||||
column_indexes[class_probs > 0.5] = 1
|
||||
return self._le.inverse_transform(column_indexes)
|
||||
|
||||
def predict_proba(self, data, ntree_limit=None):
|
||||
def predict_proba(self, data, ntree_limit=None, validate_features=True):
|
||||
"""
|
||||
Predict the probability of each `data` example being of a given class.
|
||||
|
||||
@@ -668,6 +785,9 @@ class XGBClassifier(XGBModel, XGBClassifierBase):
|
||||
ntree_limit : int
|
||||
Limit number of trees in the prediction; defaults to best_ntree_limit if defined
|
||||
(i.e. it has been trained with early stopping), otherwise 0 (use all trees).
|
||||
validate_features : bool
|
||||
When this is True, validate that the Booster's and data's feature_names are identical.
|
||||
Otherwise, it is assumed that the feature_names are the same.
|
||||
|
||||
Returns
|
||||
-------
|
||||
@@ -678,7 +798,8 @@ class XGBClassifier(XGBModel, XGBClassifierBase):
|
||||
if ntree_limit is None:
|
||||
ntree_limit = getattr(self, "best_ntree_limit", 0)
|
||||
class_probs = self.get_booster().predict(test_dmatrix,
|
||||
ntree_limit=ntree_limit)
|
||||
ntree_limit=ntree_limit,
|
||||
validate_features=validate_features)
|
||||
if self.objective == "multi:softprob":
|
||||
return class_probs
|
||||
else:
|
||||
@@ -689,10 +810,10 @@ class XGBClassifier(XGBModel, XGBClassifierBase):
|
||||
def evals_result(self):
|
||||
"""Return the evaluation results.
|
||||
|
||||
If eval_set is passed to the `fit` function, you can call evals_result() to
|
||||
get evaluation results for all passed eval_sets. When eval_metric is also
|
||||
passed to the `fit` function, the evals_result will contain the eval_metrics
|
||||
passed to the `fit` function
|
||||
If **eval_set** is passed to the `fit` function, you can call
|
||||
``evals_result()`` to get evaluation results for all passed **eval_sets**.
|
||||
When **eval_metric** is also passed to the `fit` function, the
|
||||
**evals_result** will contain the **eval_metrics** passed to the `fit` function.
|
||||
|
||||
Returns
|
||||
-------
|
||||
@@ -714,9 +835,9 @@ class XGBClassifier(XGBModel, XGBClassifierBase):
|
||||
|
||||
evals_result = clf.evals_result()
|
||||
|
||||
The variable ``evals_result`` will contain
|
||||
The variable **evals_result** will contain
|
||||
|
||||
.. code-block:: none
|
||||
.. code-block:: python
|
||||
|
||||
{'validation_0': {'logloss': ['0.604835', '0.531479']},
|
||||
'validation_1': {'logloss': ['0.41965', '0.17686']}}
|
||||
@@ -733,3 +854,259 @@ class XGBRegressor(XGBModel, XGBRegressorBase):
|
||||
# pylint: disable=missing-docstring
|
||||
__doc__ = "Implementation of the scikit-learn API for XGBoost regression.\n\n"\
|
||||
+ '\n'.join(XGBModel.__doc__.split('\n')[2:])
|
||||
|
||||
|
||||
class XGBRanker(XGBModel):
|
||||
# pylint: disable=missing-docstring,too-many-arguments,invalid-name
|
||||
"""Implementation of the Scikit-Learn API for XGBoost Ranking.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
max_depth : int
|
||||
Maximum tree depth for base learners.
|
||||
learning_rate : float
|
||||
Boosting learning rate (xgb's "eta")
|
||||
n_estimators : int
|
||||
Number of boosted trees to fit.
|
||||
silent : boolean
|
||||
Whether to print messages while running boosting.
|
||||
objective : string
|
||||
Specify the learning task and the corresponding learning objective.
|
||||
Only "rank:pairwise" is supported currently.
|
||||
booster: string
|
||||
Specify which booster to use: gbtree, gblinear or dart.
|
||||
nthread : int
|
||||
Number of parallel threads used to run xgboost. (Deprecated, please use ``n_jobs``)
|
||||
n_jobs : int
|
||||
Number of parallel threads used to run xgboost. (replaces ``nthread``)
|
||||
gamma : float
|
||||
Minimum loss reduction required to make a further partition on a leaf node of the tree.
|
||||
min_child_weight : int
|
||||
Minimum sum of instance weight(hessian) needed in a child.
|
||||
max_delta_step : int
|
||||
Maximum delta step we allow each tree's weight estimation to be.
|
||||
subsample : float
|
||||
Subsample ratio of the training instance.
|
||||
colsample_bytree : float
|
||||
Subsample ratio of columns when constructing each tree.
|
||||
colsample_bylevel : float
|
||||
Subsample ratio of columns for each split, in each level.
|
||||
reg_alpha : float (xgb's alpha)
|
||||
L1 regularization term on weights
|
||||
reg_lambda : float (xgb's lambda)
|
||||
L2 regularization term on weights
|
||||
scale_pos_weight : float
|
||||
Balancing of positive and negative weights.
|
||||
base_score:
|
||||
The initial prediction score of all instances, global bias.
|
||||
seed : int
|
||||
Random number seed. (Deprecated, please use random_state)
|
||||
random_state : int
|
||||
Random number seed. (replaces seed)
|
||||
missing : float, optional
|
||||
Value in the data which needs to be present as a missing value. If
|
||||
None, defaults to np.nan.
|
||||
\*\*kwargs : dict, optional
|
||||
Keyword arguments for XGBoost Booster object. Full documentation of parameters can
|
||||
be found here: https://github.com/dmlc/xgboost/blob/master/doc/parameter.rst.
|
||||
Attempting to set a parameter via the constructor args and \*\*kwargs dict
|
||||
simultaneously will result in a TypeError.
|
||||
|
||||
.. note:: \*\*kwargs unsupported by scikit-learn
|
||||
|
||||
\*\*kwargs is unsupported by scikit-learn. We do not guarantee that parameters
|
||||
passed via this argument will interact properly with scikit-learn.
|
||||
|
||||
Note
|
||||
----
|
||||
A custom objective function is currently not supported by XGBRanker.
|
||||
|
||||
Note
|
||||
----
|
||||
Group information is required for ranking tasks.
|
||||
|
||||
Before fitting the model, your data need to be sorted by group. When
|
||||
fitting the model, you need to provide an additional array that
|
||||
contains the size of each group.
|
||||
|
||||
For example, if your original data look like:
|
||||
|
||||
+-------+-----------+---------------+
|
||||
| qid | label | features |
|
||||
+-------+-----------+---------------+
|
||||
| 1 | 0 | x_1 |
|
||||
+-------+-----------+---------------+
|
||||
| 1 | 1 | x_2 |
|
||||
+-------+-----------+---------------+
|
||||
| 1 | 0 | x_3 |
|
||||
+-------+-----------+---------------+
|
||||
| 2 | 0 | x_4 |
|
||||
+-------+-----------+---------------+
|
||||
| 2 | 1 | x_5 |
|
||||
+-------+-----------+---------------+
|
||||
| 2 | 1 | x_6 |
|
||||
+-------+-----------+---------------+
|
||||
| 2 | 1 | x_7 |
|
||||
+-------+-----------+---------------+
|
||||
|
||||
then your group array should be ``[3, 4]``.
|
||||
"""
|
||||
|
||||
def __init__(self, max_depth=3, learning_rate=0.1, n_estimators=100,
|
||||
silent=True, objective="rank:pairwise", booster='gbtree',
|
||||
n_jobs=-1, nthread=None, gamma=0, min_child_weight=1, max_delta_step=0,
|
||||
subsample=1, colsample_bytree=1, colsample_bylevel=1,
|
||||
reg_alpha=0, reg_lambda=1, scale_pos_weight=1,
|
||||
base_score=0.5, random_state=0, seed=None, missing=None, **kwargs):
|
||||
|
||||
super(XGBRanker, self).__init__(max_depth, learning_rate,
|
||||
n_estimators, silent, objective, booster,
|
||||
n_jobs, nthread, gamma, min_child_weight, max_delta_step,
|
||||
subsample, colsample_bytree, colsample_bylevel,
|
||||
reg_alpha, reg_lambda, scale_pos_weight,
|
||||
base_score, random_state, seed, missing)
|
||||
if callable(self.objective):
|
||||
raise ValueError("custom objective function not supported by XGBRanker")
|
||||
elif "rank:" not in self.objective:
|
||||
raise ValueError("please use XGBRanker for ranking task")
|
||||
|
||||
def fit(self, X, y, group, sample_weight=None, eval_set=None, sample_weight_eval_set=None,
|
||||
eval_group=None, eval_metric=None, early_stopping_rounds=None,
|
||||
verbose=False, xgb_model=None, callbacks=None):
|
||||
# pylint: disable = attribute-defined-outside-init,arguments-differ
|
||||
"""
|
||||
Fit the gradient boosting model
|
||||
|
||||
Parameters
|
||||
----------
|
||||
X : array_like
|
||||
Feature matrix
|
||||
y : array_like
|
||||
Labels
|
||||
group : array_like
|
||||
group size of training data
|
||||
sample_weight : array_like
|
||||
instance weights
|
||||
eval_set : list, optional
|
||||
A list of (X, y) tuple pairs to use as a validation set for
|
||||
early-stopping
|
||||
sample_weight_eval_set : list, optional
|
||||
A list of the form [L_1, L_2, ..., L_n], where each L_i is a list of
|
||||
instance weights on the i-th validation set.
|
||||
eval_group : list of arrays, optional
|
||||
A list that contains the group size corresponds to each
|
||||
(X, y) pair in eval_set
|
||||
eval_metric : str, callable, optional
|
||||
If a str, should be a built-in evaluation metric to use. See
|
||||
doc/parameter.rst. If callable, a custom evaluation metric. The call
|
||||
signature is func(y_predicted, y_true) where y_true will be a
|
||||
DMatrix object such that you may need to call the get_label
|
||||
method. It must return a str, value pair where the str is a name
|
||||
for the evaluation and value is the value of the evaluation
|
||||
function. This objective is always minimized.
|
||||
early_stopping_rounds : int
|
||||
Activates early stopping. Validation error needs to decrease at
|
||||
least every <early_stopping_rounds> round(s) to continue training.
|
||||
Requires at least one item in evals. If there's more than one,
|
||||
will use the last. Returns the model from the last iteration
|
||||
(not the best one). If early stopping occurs, the model will
|
||||
have three additional fields: bst.best_score, bst.best_iteration
|
||||
and bst.best_ntree_limit.
|
||||
(Use bst.best_ntree_limit to get the correct value if num_parallel_tree
|
||||
and/or num_class appears in the parameters)
|
||||
verbose : bool
|
||||
If `verbose` and an evaluation set is used, writes the evaluation
|
||||
metric measured on the validation set to stderr.
|
||||
xgb_model : str
|
||||
file name of stored xgb model or 'Booster' instance Xgb model to be
|
||||
loaded before training (allows training continuation).
|
||||
callbacks : list of callback functions
|
||||
List of callback functions that are applied at end of each iteration.
|
||||
It is possible to use predefined callbacks by using :ref:`callback_api`.
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[xgb.callback.reset_learning_rate(custom_rates)]
|
||||
"""
|
||||
# check if group information is provided
|
||||
if group is None:
|
||||
raise ValueError("group is required for ranking task")
|
||||
|
||||
if eval_set is not None:
|
||||
if eval_group is None:
|
||||
raise ValueError("eval_group is required if eval_set is not None")
|
||||
elif len(eval_group) != len(eval_set):
|
||||
raise ValueError("length of eval_group should match that of eval_set")
|
||||
elif any(group is None for group in eval_group):
|
||||
raise ValueError("group is required for all eval datasets for ranking task")
|
||||
|
||||
def _dmat_init(group, **params):
|
||||
ret = DMatrix(**params)
|
||||
ret.set_group(group)
|
||||
return ret
|
||||
|
||||
if sample_weight is not None:
|
||||
train_dmatrix = _dmat_init(group, data=X, label=y, weight=sample_weight,
|
||||
missing=self.missing, nthread=self.n_jobs)
|
||||
else:
|
||||
train_dmatrix = _dmat_init(group, data=X, label=y,
|
||||
missing=self.missing, nthread=self.n_jobs)
|
||||
|
||||
evals_result = {}
|
||||
|
||||
if eval_set is not None:
|
||||
if sample_weight_eval_set is None:
|
||||
sample_weight_eval_set = [None] * len(eval_set)
|
||||
evals = [_dmat_init(eval_group[i], data=eval_set[i][0], label=eval_set[i][1],
|
||||
missing=self.missing, weight=sample_weight_eval_set[i],
|
||||
nthread=self.n_jobs) for i in range(len(eval_set))]
|
||||
nevals = len(evals)
|
||||
eval_names = ["eval_{}".format(i) for i in range(nevals)]
|
||||
evals = list(zip(evals, eval_names))
|
||||
else:
|
||||
evals = ()
|
||||
|
||||
params = self.get_xgb_params()
|
||||
|
||||
feval = eval_metric if callable(eval_metric) else None
|
||||
if eval_metric is not None:
|
||||
if callable(eval_metric):
|
||||
eval_metric = None
|
||||
else:
|
||||
params.update({'eval_metric': eval_metric})
|
||||
|
||||
self._Booster = train(params, train_dmatrix,
|
||||
self.n_estimators,
|
||||
early_stopping_rounds=early_stopping_rounds, evals=evals,
|
||||
evals_result=evals_result, feval=feval,
|
||||
verbose_eval=verbose, xgb_model=xgb_model,
|
||||
callbacks=callbacks)
|
||||
|
||||
self.objective = params["objective"]
|
||||
|
||||
if evals_result:
|
||||
for val in evals_result.items():
|
||||
evals_result_key = list(val[1].keys())[0]
|
||||
evals_result[val[0]][evals_result_key] = val[1][evals_result_key]
|
||||
self.evals_result = evals_result
|
||||
|
||||
if early_stopping_rounds is not None:
|
||||
self.best_score = self._Booster.best_score
|
||||
self.best_iteration = self._Booster.best_iteration
|
||||
self.best_ntree_limit = self._Booster.best_ntree_limit
|
||||
|
||||
return self
|
||||
|
||||
def predict(self, data, output_margin=False, ntree_limit=0, validate_features=True):
|
||||
|
||||
test_dmatrix = DMatrix(data, missing=self.missing)
|
||||
if ntree_limit is None:
|
||||
ntree_limit = getattr(self, "best_ntree_limit", 0)
|
||||
|
||||
return self.get_booster().predict(test_dmatrix,
|
||||
output_margin=output_margin,
|
||||
ntree_limit=ntree_limit,
|
||||
validate_features=validate_features)
|
||||
|
||||
predict.__doc__ = XGBModel.predict.__doc__
|
||||
|
||||
@@ -137,34 +137,35 @@ def train(params, dtrain, num_boost_round=10, evals=(), obj=None, feval=None,
|
||||
Whether to maximize feval.
|
||||
early_stopping_rounds: int
|
||||
Activates early stopping. Validation error needs to decrease at least
|
||||
every <early_stopping_rounds> round(s) to continue training.
|
||||
Requires at least one item in evals.
|
||||
every **early_stopping_rounds** round(s) to continue training.
|
||||
Requires at least one item in **evals**.
|
||||
If there's more than one, will use the last.
|
||||
Returns the model from the last iteration (not the best one).
|
||||
If early stopping occurs, the model will have three additional fields:
|
||||
bst.best_score, bst.best_iteration and bst.best_ntree_limit.
|
||||
(Use bst.best_ntree_limit to get the correct value if num_parallel_tree
|
||||
and/or num_class appears in the parameters)
|
||||
``bst.best_score``, ``bst.best_iteration`` and ``bst.best_ntree_limit``.
|
||||
(Use ``bst.best_ntree_limit`` to get the correct value if
|
||||
``num_parallel_tree`` and/or ``num_class`` appears in the parameters)
|
||||
evals_result: dict
|
||||
This dictionary stores the evaluation results of all the items in watchlist.
|
||||
|
||||
Example: with a watchlist containing [(dtest,'eval'), (dtrain,'train')] and
|
||||
a parameter containing ('eval_metric': 'logloss'), the **evals_result**
|
||||
returns
|
||||
Example: with a watchlist containing
|
||||
``[(dtest,'eval'), (dtrain,'train')]`` and
|
||||
a parameter containing ``('eval_metric': 'logloss')``,
|
||||
the **evals_result** returns
|
||||
|
||||
.. code-block:: none
|
||||
.. code-block:: python
|
||||
|
||||
{'train': {'logloss': ['0.48253', '0.35953']},
|
||||
'eval': {'logloss': ['0.480385', '0.357756']}}
|
||||
|
||||
verbose_eval : bool or int
|
||||
Requires at least one item in evals.
|
||||
Requires at least one item in **evals**.
|
||||
If **verbose_eval** is True then the evaluation metric on the validation set is
|
||||
printed at each boosting stage.
|
||||
If **verbose_eval** is an integer then the evaluation metric on the validation set
|
||||
is printed at every given **verbose_eval** boosting stage. The last boosting stage
|
||||
/ the boosting stage found by using **early_stopping_rounds** is also printed.
|
||||
Example: with ``verbose_eval=4`` and at least one item in evals, an evaluation metric
|
||||
Example: with ``verbose_eval=4`` and at least one item in **evals**, an evaluation metric
|
||||
is printed every 4 boosting stages, instead of every boosting stage.
|
||||
learning_rates: list or function (deprecated - use callback API instead)
|
||||
List of learning rate for each boosting round
|
||||
@@ -175,12 +176,17 @@ def train(params, dtrain, num_boost_round=10, evals=(), obj=None, feval=None,
|
||||
Xgb model to be loaded before training (allows training continuation).
|
||||
callbacks : list of callback functions
|
||||
List of callback functions that are applied at end of each iteration.
|
||||
It is possible to use predefined callbacks by using xgb.callback module.
|
||||
Example: [xgb.callback.reset_learning_rate(custom_rates)]
|
||||
It is possible to use predefined callbacks by using
|
||||
:ref:`Callback API <callback_api>`.
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[xgb.callback.reset_learning_rate(custom_rates)]
|
||||
|
||||
Returns
|
||||
-------
|
||||
booster : a trained booster model
|
||||
Booster : a trained booster model
|
||||
"""
|
||||
callbacks = [] if callbacks is None else callbacks
|
||||
|
||||
@@ -334,7 +340,7 @@ def cv(params, dtrain, num_boost_round=10, nfold=3, stratified=False, folds=None
|
||||
folds : a KFold or StratifiedKFold instance or list of fold indices
|
||||
Sklearn KFolds or StratifiedKFolds object.
|
||||
Alternatively may explicitly pass sample indices for each fold.
|
||||
For ``n`` folds, ``folds`` should be a length ``n`` list of tuples.
|
||||
For ``n`` folds, **folds** should be a length ``n`` list of tuples.
|
||||
Each tuple is ``(in,out)`` where ``in`` is a list of indices to be used
|
||||
as the training samples for the ``n`` th fold and ``out`` is a list of
|
||||
indices to be used as the testing samples for the ``n`` th fold.
|
||||
@@ -368,10 +374,11 @@ def cv(params, dtrain, num_boost_round=10, nfold=3, stratified=False, folds=None
|
||||
Seed used to generate the folds (passed to numpy.random.seed).
|
||||
callbacks : list of callback functions
|
||||
List of callback functions that are applied at end of each iteration.
|
||||
It is possible to use predefined callbacks by using xgb.callback module.
|
||||
It is possible to use predefined callbacks by using
|
||||
:ref:`Callback API <callback_api>`.
|
||||
Example:
|
||||
|
||||
.. code-block:: none
|
||||
.. code-block:: python
|
||||
|
||||
[xgb.callback.reset_learning_rate(custom_rates)]
|
||||
shuffle : bool
|
||||
|
||||
2
rabit
2
rabit
Submodule rabit updated: 87143deb4c...eb2590b774
@@ -7,9 +7,10 @@
|
||||
#include <dmlc/thread_local.h>
|
||||
#include <rabit/rabit.h>
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <cstring>
|
||||
#include <memory>
|
||||
|
||||
#include "./c_api_error.h"
|
||||
@@ -52,6 +53,7 @@ class Booster {
|
||||
|
||||
inline void LazyInit() {
|
||||
if (!configured_) {
|
||||
LoadSavedParamFromAttr();
|
||||
learner_->Configure(cfg_);
|
||||
configured_ = true;
|
||||
}
|
||||
@@ -61,6 +63,25 @@ class Booster {
|
||||
}
|
||||
}
|
||||
|
||||
inline void LoadSavedParamFromAttr() {
|
||||
// Locate saved parameters from learner attributes
|
||||
const std::string prefix = "SAVED_PARAM_";
|
||||
for (const std::string& attr_name : learner_->GetAttrNames()) {
|
||||
if (attr_name.find(prefix) == 0) {
|
||||
const std::string saved_param = attr_name.substr(prefix.length());
|
||||
if (std::none_of(cfg_.begin(), cfg_.end(),
|
||||
[&](const std::pair<std::string, std::string>& x)
|
||||
{ return x.first == saved_param; })) {
|
||||
// If cfg_ contains the parameter already, skip it
|
||||
// (this is to allow the user to explicitly override its value)
|
||||
std::string saved_param_value;
|
||||
CHECK(learner_->GetAttr(attr_name, &saved_param_value));
|
||||
cfg_.emplace_back(saved_param, saved_param_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void LoadModel(dmlc::Stream* fi) {
|
||||
learner_->Load(fi);
|
||||
initialized_ = true;
|
||||
@@ -250,20 +271,22 @@ XGB_DLL int XGDMatrixCreateFromCSREx(const size_t* indptr,
|
||||
|
||||
API_BEGIN();
|
||||
data::SimpleCSRSource& mat = *source;
|
||||
mat.page_.offset.reserve(nindptr);
|
||||
mat.page_.data.reserve(nelem);
|
||||
mat.page_.offset.resize(1);
|
||||
mat.page_.offset[0] = 0;
|
||||
auto& offset_vec = mat.page_.offset.HostVector();
|
||||
auto& data_vec = mat.page_.data.HostVector();
|
||||
offset_vec.reserve(nindptr);
|
||||
data_vec.reserve(nelem);
|
||||
offset_vec.resize(1);
|
||||
offset_vec[0] = 0;
|
||||
size_t num_column = 0;
|
||||
for (size_t i = 1; i < nindptr; ++i) {
|
||||
for (size_t j = indptr[i - 1]; j < indptr[i]; ++j) {
|
||||
if (!common::CheckNAN(data[j])) {
|
||||
// automatically skip nan.
|
||||
mat.page_.data.emplace_back(Entry(indices[j], data[j]));
|
||||
data_vec.emplace_back(Entry(indices[j], data[j]));
|
||||
num_column = std::max(num_column, static_cast<size_t>(indices[j] + 1));
|
||||
}
|
||||
}
|
||||
mat.page_.offset.push_back(mat.page_.data.size());
|
||||
offset_vec.push_back(mat.page_.data.Size());
|
||||
}
|
||||
|
||||
mat.info.num_col_ = num_column;
|
||||
@@ -273,7 +296,7 @@ XGB_DLL int XGDMatrixCreateFromCSREx(const size_t* indptr,
|
||||
mat.info.num_col_ = num_col;
|
||||
}
|
||||
mat.info.num_row_ = nindptr - 1;
|
||||
mat.info.num_nonzero_ = mat.page_.data.size();
|
||||
mat.info.num_nonzero_ = mat.page_.data.Size();
|
||||
*out = new std::shared_ptr<DMatrix>(DMatrix::Create(std::move(source)));
|
||||
API_END();
|
||||
}
|
||||
@@ -305,7 +328,9 @@ XGB_DLL int XGDMatrixCreateFromCSCEx(const size_t* col_ptr,
|
||||
// FIXME: User should be able to control number of threads
|
||||
const int nthread = omp_get_max_threads();
|
||||
data::SimpleCSRSource& mat = *source;
|
||||
common::ParallelGroupBuilder<Entry> builder(&mat.page_.offset, &mat.page_.data);
|
||||
auto& offset_vec = mat.page_.offset.HostVector();
|
||||
auto& data_vec = mat.page_.data.HostVector();
|
||||
common::ParallelGroupBuilder<Entry> builder(&offset_vec, &data_vec);
|
||||
builder.InitBudget(0, nthread);
|
||||
size_t ncol = nindptr - 1; // NOLINT(*)
|
||||
#pragma omp parallel for schedule(static)
|
||||
@@ -329,15 +354,16 @@ XGB_DLL int XGDMatrixCreateFromCSCEx(const size_t* col_ptr,
|
||||
}
|
||||
}
|
||||
}
|
||||
mat.info.num_row_ = mat.page_.offset.size() - 1;
|
||||
mat.info.num_row_ = mat.page_.offset.Size() - 1;
|
||||
if (num_row > 0) {
|
||||
CHECK_LE(mat.info.num_row_, num_row);
|
||||
// provision for empty rows at the bottom of matrix
|
||||
auto& offset_vec = mat.page_.offset.HostVector();
|
||||
for (uint64_t i = mat.info.num_row_; i < static_cast<uint64_t>(num_row); ++i) {
|
||||
mat.page_.offset.push_back(mat.page_.offset.back());
|
||||
offset_vec.push_back(offset_vec.back());
|
||||
}
|
||||
mat.info.num_row_ = num_row;
|
||||
CHECK_EQ(mat.info.num_row_, mat.page_.offset.size() - 1); // sanity check
|
||||
CHECK_EQ(mat.info.num_row_, offset_vec.size() - 1); // sanity check
|
||||
}
|
||||
mat.info.num_col_ = ncol;
|
||||
mat.info.num_nonzero_ = nelem;
|
||||
@@ -368,7 +394,9 @@ XGB_DLL int XGDMatrixCreateFromMat(const bst_float* data,
|
||||
|
||||
API_BEGIN();
|
||||
data::SimpleCSRSource& mat = *source;
|
||||
mat.page_.offset.resize(1+nrow);
|
||||
auto& offset_vec = mat.page_.offset.HostVector();
|
||||
auto& data_vec = mat.page_.data.HostVector();
|
||||
offset_vec.resize(1+nrow);
|
||||
bool nan_missing = common::CheckNAN(missing);
|
||||
mat.info.num_row_ = nrow;
|
||||
mat.info.num_col_ = ncol;
|
||||
@@ -388,9 +416,9 @@ XGB_DLL int XGDMatrixCreateFromMat(const bst_float* data,
|
||||
}
|
||||
}
|
||||
}
|
||||
mat.page_.offset[i+1] = mat.page_.offset[i] + nelem;
|
||||
offset_vec[i+1] = offset_vec[i] + nelem;
|
||||
}
|
||||
mat.page_.data.resize(mat.page_.data.size() + mat.page_.offset.back());
|
||||
data_vec.resize(mat.page_.data.Size() + offset_vec.back());
|
||||
|
||||
data = data0;
|
||||
for (xgboost::bst_ulong i = 0; i < nrow; ++i, data += ncol) {
|
||||
@@ -399,14 +427,14 @@ XGB_DLL int XGDMatrixCreateFromMat(const bst_float* data,
|
||||
if (common::CheckNAN(data[j])) {
|
||||
} else {
|
||||
if (nan_missing || data[j] != missing) {
|
||||
mat.page_.data[mat.page_.offset[i] + matj] = Entry(j, data[j]);
|
||||
data_vec[offset_vec[i] + matj] = Entry(j, data[j]);
|
||||
++matj;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mat.info.num_nonzero_ = mat.page_.data.size();
|
||||
mat.info.num_nonzero_ = mat.page_.data.Size();
|
||||
*out = new std::shared_ptr<DMatrix>(DMatrix::Create(std::move(source)));
|
||||
API_END();
|
||||
}
|
||||
@@ -461,7 +489,9 @@ XGB_DLL int XGDMatrixCreateFromMat_omp(const bst_float* data, // NOLINT
|
||||
|
||||
std::unique_ptr<data::SimpleCSRSource> source(new data::SimpleCSRSource());
|
||||
data::SimpleCSRSource& mat = *source;
|
||||
mat.page_.offset.resize(1+nrow);
|
||||
auto& offset_vec = mat.page_.offset.HostVector();
|
||||
auto& data_vec = mat.page_.data.HostVector();
|
||||
offset_vec.resize(1+nrow);
|
||||
mat.info.num_row_ = nrow;
|
||||
mat.info.num_col_ = ncol;
|
||||
|
||||
@@ -487,7 +517,7 @@ XGB_DLL int XGDMatrixCreateFromMat_omp(const bst_float* data, // NOLINT
|
||||
++nelem;
|
||||
}
|
||||
}
|
||||
mat.page_.offset[i+1] = nelem;
|
||||
offset_vec[i+1] = nelem;
|
||||
}
|
||||
}
|
||||
// Inform about any NaNs and resize data matrix
|
||||
@@ -496,8 +526,8 @@ XGB_DLL int XGDMatrixCreateFromMat_omp(const bst_float* data, // NOLINT
|
||||
}
|
||||
|
||||
// do cumulative sum (to avoid otherwise need to copy)
|
||||
PrefixSum(&mat.page_.offset[0], mat.page_.offset.size());
|
||||
mat.page_.data.resize(mat.page_.data.size() + mat.page_.offset.back());
|
||||
PrefixSum(&offset_vec[0], offset_vec.size());
|
||||
data_vec.resize(mat.page_.data.Size() + offset_vec.back());
|
||||
|
||||
// Fill data matrix (now that know size, no need for slow push_back())
|
||||
#pragma omp parallel num_threads(nthread)
|
||||
@@ -508,7 +538,7 @@ XGB_DLL int XGDMatrixCreateFromMat_omp(const bst_float* data, // NOLINT
|
||||
for (xgboost::bst_ulong j = 0; j < ncol; ++j) {
|
||||
if (common::CheckNAN(data[ncol * i + j])) {
|
||||
} else if (nan_missing || data[ncol * i + j] != missing) {
|
||||
mat.page_.data[mat.page_.offset[i] + matj] =
|
||||
data_vec[offset_vec[i] + matj] =
|
||||
Entry(j, data[ncol * i + j]);
|
||||
++matj;
|
||||
}
|
||||
@@ -518,7 +548,7 @@ XGB_DLL int XGDMatrixCreateFromMat_omp(const bst_float* data, // NOLINT
|
||||
// restore omp state
|
||||
omp_set_num_threads(nthread_orig);
|
||||
|
||||
mat.info.num_nonzero_ = mat.page_.data.size();
|
||||
mat.info.num_nonzero_ = mat.page_.data.Size();
|
||||
*out = new std::shared_ptr<DMatrix>(DMatrix::Create(std::move(source)));
|
||||
API_END();
|
||||
}
|
||||
@@ -611,10 +641,11 @@ XGB_DLL int XGDMatrixCreateFromDT(void** data, const char** feature_stypes,
|
||||
|
||||
std::unique_ptr<data::SimpleCSRSource> source(new data::SimpleCSRSource());
|
||||
data::SimpleCSRSource& mat = *source;
|
||||
mat.page_.offset.resize(1 + nrow);
|
||||
mat.page_.offset.Resize(1 + nrow);
|
||||
mat.info.num_row_ = nrow;
|
||||
mat.info.num_col_ = ncol;
|
||||
|
||||
auto& page_offset = mat.page_.offset.HostVector();
|
||||
#pragma omp parallel num_threads(nthread)
|
||||
{
|
||||
// Count elements per row, column by column
|
||||
@@ -624,15 +655,17 @@ XGB_DLL int XGDMatrixCreateFromDT(void** data, const char** feature_stypes,
|
||||
for (omp_ulong i = 0; i < nrow; ++i) {
|
||||
float val = DTGetValue(data[j], dtype, i);
|
||||
if (!std::isnan(val)) {
|
||||
mat.page_.offset[i + 1]++;
|
||||
page_offset[i + 1]++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// do cumulative sum (to avoid otherwise need to copy)
|
||||
PrefixSum(&mat.page_.offset[0], mat.page_.offset.size());
|
||||
PrefixSum(&page_offset[0], page_offset.size());
|
||||
|
||||
mat.page_.data.resize(mat.page_.data.size() + mat.page_.offset.back());
|
||||
mat.page_.data.Resize(mat.page_.data.Size() + page_offset.back());
|
||||
|
||||
auto& page_data = mat.page_.data.HostVector();
|
||||
|
||||
// Fill data matrix (now that know size, no need for slow push_back())
|
||||
std::vector<size_t> position(nrow);
|
||||
@@ -644,7 +677,7 @@ XGB_DLL int XGDMatrixCreateFromDT(void** data, const char** feature_stypes,
|
||||
for (omp_ulong i = 0; i < nrow; ++i) {
|
||||
float val = DTGetValue(data[j], dtype, i);
|
||||
if (!std::isnan(val)) {
|
||||
mat.page_.data[mat.page_.offset[i] + position[i]] = Entry(j, val);
|
||||
page_data[page_offset[i] + position[i]] = Entry(j, val);
|
||||
position[i]++;
|
||||
}
|
||||
}
|
||||
@@ -654,7 +687,7 @@ XGB_DLL int XGDMatrixCreateFromDT(void** data, const char** feature_stypes,
|
||||
// restore omp state
|
||||
omp_set_num_threads(nthread_orig);
|
||||
|
||||
mat.info.num_nonzero_ = mat.page_.data.size();
|
||||
mat.info.num_nonzero_ = mat.page_.data.Size();
|
||||
*out = new std::shared_ptr<DMatrix>(DMatrix::Create(std::move(source)));
|
||||
API_END();
|
||||
}
|
||||
@@ -682,24 +715,33 @@ XGB_DLL int XGDMatrixSliceDMatrix(DMatrixHandle handle,
|
||||
iter->BeforeFirst();
|
||||
CHECK(iter->Next());
|
||||
|
||||
const auto& batch = iter->Value();
|
||||
const auto& batch = iter->Value();
|
||||
const auto& src_labels = src.info.labels_.ConstHostVector();
|
||||
const auto& src_weights = src.info.weights_.ConstHostVector();
|
||||
const auto& src_base_margin = src.info.base_margin_.ConstHostVector();
|
||||
auto& ret_labels = ret.info.labels_.HostVector();
|
||||
auto& ret_weights = ret.info.weights_.HostVector();
|
||||
auto& ret_base_margin = ret.info.base_margin_.HostVector();
|
||||
auto& offset_vec = ret.page_.offset.HostVector();
|
||||
auto& data_vec = ret.page_.data.HostVector();
|
||||
|
||||
for (xgboost::bst_ulong i = 0; i < len; ++i) {
|
||||
const int ridx = idxset[i];
|
||||
auto inst = batch[ridx];
|
||||
CHECK_LT(static_cast<xgboost::bst_ulong>(ridx), batch.Size());
|
||||
ret.page_.data.insert(ret.page_.data.end(), inst.data,
|
||||
inst.data + inst.length);
|
||||
ret.page_.offset.push_back(ret.page_.offset.back() + inst.length);
|
||||
ret.info.num_nonzero_ += inst.length;
|
||||
data_vec.insert(data_vec.end(), inst.data(),
|
||||
inst.data() + inst.size());
|
||||
offset_vec.push_back(offset_vec.back() + inst.size());
|
||||
ret.info.num_nonzero_ += inst.size();
|
||||
|
||||
if (src.info.labels_.size() != 0) {
|
||||
ret.info.labels_.push_back(src.info.labels_[ridx]);
|
||||
if (src_labels.size() != 0) {
|
||||
ret_labels.push_back(src_labels[ridx]);
|
||||
}
|
||||
if (src.info.weights_.size() != 0) {
|
||||
ret.info.weights_.push_back(src.info.weights_[ridx]);
|
||||
if (src_weights.size() != 0) {
|
||||
ret_weights.push_back(src_weights[ridx]);
|
||||
}
|
||||
if (src.info.base_margin_.size() != 0) {
|
||||
ret.info.base_margin_.push_back(src.info.base_margin_[ridx]);
|
||||
if (src_base_margin.size() != 0) {
|
||||
ret_base_margin.push_back(src_base_margin[ridx]);
|
||||
}
|
||||
if (src.info.root_index_.size() != 0) {
|
||||
ret.info.root_index_.push_back(src.info.root_index_[ridx]);
|
||||
@@ -771,11 +813,11 @@ XGB_DLL int XGDMatrixGetFloatInfo(const DMatrixHandle handle,
|
||||
const MetaInfo& info = static_cast<std::shared_ptr<DMatrix>*>(handle)->get()->Info();
|
||||
const std::vector<bst_float>* vec = nullptr;
|
||||
if (!std::strcmp(field, "label")) {
|
||||
vec = &info.labels_;
|
||||
vec = &info.labels_.HostVector();
|
||||
} else if (!std::strcmp(field, "weight")) {
|
||||
vec = &info.weights_;
|
||||
vec = &info.weights_.HostVector();
|
||||
} else if (!std::strcmp(field, "base_margin")) {
|
||||
vec = &info.base_margin_;
|
||||
vec = &info.base_margin_.HostVector();
|
||||
} else {
|
||||
LOG(FATAL) << "Unknown float field name " << field;
|
||||
}
|
||||
@@ -1128,5 +1170,14 @@ XGB_DLL int XGBoosterSaveRabitCheckpoint(BoosterHandle handle) {
|
||||
API_END();
|
||||
}
|
||||
|
||||
/* hidden method; only known to C++ test suite */
|
||||
const std::map<std::string, std::string>&
|
||||
QueryBoosterConfigurationArguments(BoosterHandle handle) {
|
||||
CHECK_HANDLE();
|
||||
auto* bst = static_cast<Booster*>(handle);
|
||||
bst->LazyInit();
|
||||
return bst->learner()->GetConfigurationArguments();
|
||||
}
|
||||
|
||||
// force link rabit
|
||||
static DMLC_ATTRIBUTE_UNUSED int XGBOOST_LINK_RABIT_C_API_ = RabitLinkTag();
|
||||
|
||||
@@ -332,7 +332,7 @@ void CLIPredict(const CLIParam& param) {
|
||||
std::unique_ptr<dmlc::Stream> fo(
|
||||
dmlc::Stream::Create(param.name_pred.c_str(), "w"));
|
||||
dmlc::ostream os(fo.get());
|
||||
for (bst_float p : preds.HostVector()) {
|
||||
for (bst_float p : preds.ConstHostVector()) {
|
||||
os << std::setprecision(std::numeric_limits<bst_float>::max_digits10 + 2)
|
||||
<< p << '\n';
|
||||
}
|
||||
|
||||
@@ -1,287 +0,0 @@
|
||||
/*!
|
||||
* Copyright 2017 by Contributors
|
||||
* \author Rory Mitchell
|
||||
*/
|
||||
#pragma once
|
||||
#include <algorithm>
|
||||
#include "xgboost/base.h"
|
||||
|
||||
#ifdef XGBOOST_USE_AVX
|
||||
namespace avx {
|
||||
/**
|
||||
* \struct Float8
|
||||
*
|
||||
* \brief Helper class for processing a vector of eight floats using AVX
|
||||
* instructions. Implements basic math operators.
|
||||
*/
|
||||
|
||||
struct Float8 {
|
||||
__m256 x;
|
||||
explicit Float8(const __m256& x) : x(x) {}
|
||||
explicit Float8(const float& val) : x(_mm256_broadcast_ss(&val)) {}
|
||||
explicit Float8(const float* vec) : x(_mm256_loadu_ps(vec)) {}
|
||||
Float8() : x() {}
|
||||
Float8& operator+=(const Float8& rhs) {
|
||||
x = _mm256_add_ps(x, rhs.x);
|
||||
return *this;
|
||||
}
|
||||
Float8& operator-=(const Float8& rhs) {
|
||||
x = _mm256_sub_ps(x, rhs.x);
|
||||
return *this;
|
||||
}
|
||||
Float8& operator*=(const Float8& rhs) {
|
||||
x = _mm256_mul_ps(x, rhs.x);
|
||||
return *this;
|
||||
}
|
||||
Float8& operator/=(const Float8& rhs) {
|
||||
x = _mm256_div_ps(x, rhs.x);
|
||||
return *this;
|
||||
}
|
||||
void Print() {
|
||||
float* f = reinterpret_cast<float*>(&x);
|
||||
printf("%f %f %f %f %f %f %f %f\n", f[0], f[1], f[2], f[3], f[4], f[5],
|
||||
f[6], f[7]);
|
||||
}
|
||||
};
|
||||
|
||||
inline Float8 operator+(Float8 lhs, const Float8& rhs) {
|
||||
lhs += rhs;
|
||||
return lhs;
|
||||
}
|
||||
inline Float8 operator-(Float8 lhs, const Float8& rhs) {
|
||||
lhs -= rhs;
|
||||
return lhs;
|
||||
}
|
||||
inline Float8 operator*(Float8 lhs, const Float8& rhs) {
|
||||
lhs *= rhs;
|
||||
return lhs;
|
||||
}
|
||||
inline Float8 operator/(Float8 lhs, const Float8& rhs) {
|
||||
lhs /= rhs;
|
||||
return lhs;
|
||||
}
|
||||
|
||||
inline Float8 round(const Float8& x) {
|
||||
return Float8(_mm256_round_ps(x.x, _MM_FROUND_TO_NEAREST_INT));
|
||||
}
|
||||
} // namespace avx
|
||||
|
||||
// Overload std::max/min
|
||||
namespace std {
|
||||
inline avx::Float8 max(const avx::Float8& a, const avx::Float8& b) { // NOLINT
|
||||
return avx::Float8(_mm256_max_ps(a.x, b.x));
|
||||
}
|
||||
inline avx::Float8 min(const avx::Float8& a, const avx::Float8& b) { // NOLINT
|
||||
return avx::Float8(_mm256_min_ps(a.x, b.x));
|
||||
}
|
||||
} // namespace std
|
||||
|
||||
namespace avx {
|
||||
|
||||
// https://codingforspeed.com/using-faster-exponential-approximation/
|
||||
inline Float8 Exp4096(Float8 x) {
|
||||
x *= Float8(1.0f / 4096.0f);
|
||||
x += Float8(1.0f);
|
||||
x *= x;
|
||||
x *= x;
|
||||
x *= x;
|
||||
x *= x;
|
||||
x *= x;
|
||||
x *= x;
|
||||
x *= x;
|
||||
x *= x;
|
||||
x *= x;
|
||||
x *= x;
|
||||
x *= x;
|
||||
x *= x;
|
||||
return x;
|
||||
}
|
||||
|
||||
inline Float8 pow2n(Float8 const& n) {
|
||||
const float pow2_23 = 8388608.0; // 2^23
|
||||
const float bias = 127.0; // bias in exponent
|
||||
Float8 a =
|
||||
n + Float8(bias + pow2_23); // put n + bias in least significant bits
|
||||
__m256i b = _mm256_castps_si256(a.x);
|
||||
|
||||
// Do bit shift in SSE so we don't have to use AVX2 instructions
|
||||
__m128i c1 = _mm256_castsi256_si128(b);
|
||||
b = _mm256_permute2f128_si256(b, b, 1);
|
||||
__m128i c2 = _mm256_castsi256_si128(b);
|
||||
c1 = _mm_slli_epi32(c1, 23);
|
||||
c2 = _mm_slli_epi32(c2, 23);
|
||||
|
||||
__m256i c = _mm256_insertf128_si256(_mm256_castsi128_si256(c1), (c2), 0x1);
|
||||
return Float8(_mm256_castsi256_ps(c));
|
||||
}
|
||||
|
||||
inline Float8 polynomial_5(Float8 const& x, const float c0, const float c1,
|
||||
const float c2, const float c3, const float c4,
|
||||
const float c5) {
|
||||
// calculates polynomial c5*x^5 + c4*x^4 + c3*x^3 + c2*x^2 + c1*x + c0
|
||||
Float8 x2 = x * x;
|
||||
Float8 x4 = x2 * x2;
|
||||
return (Float8(c2) + Float8(c3) * x) * x2 +
|
||||
((Float8(c4) + Float8(c5) * x) * x4 + (Float8(c0) + Float8(c1) * x));
|
||||
}
|
||||
|
||||
// AVX exp Function based off Agner Fog's vector library
|
||||
// https://github.com/darealshinji/vectorclass/blob/master/vectormath_exp.h
|
||||
// Modified so it doesn't require AVX2 instructions
|
||||
// Clamps input values to the range -87.3f, +87.3f
|
||||
inline Float8 ExpAgner(Float8 x) {
|
||||
// Clamp input values
|
||||
float max_x = 87.3f;
|
||||
x = std::min(x, Float8(max_x));
|
||||
x = std::max(x, Float8(-max_x));
|
||||
|
||||
// 1/log(2)
|
||||
const float log2e = 1.44269504088896340736f;
|
||||
|
||||
// Taylor coefficients
|
||||
const float P0expf = 1.f / 2.f;
|
||||
const float P1expf = 1.f / 6.f;
|
||||
const float P2expf = 1.f / 24.f;
|
||||
const float P3expf = 1.f / 120.f;
|
||||
const float P4expf = 1.f / 720.f;
|
||||
const float P5expf = 1.f / 5040.f;
|
||||
|
||||
const float ln2f_hi = 0.693359375f;
|
||||
const float ln2f_lo = -2.12194440e-4f;
|
||||
|
||||
Float8 r = round(x * Float8(log2e));
|
||||
x -= r * Float8(ln2f_hi);
|
||||
x -= r * Float8(ln2f_lo);
|
||||
|
||||
Float8 x2 = x * x;
|
||||
Float8 z = polynomial_5(x, P0expf, P1expf, P2expf, P3expf, P4expf, P5expf);
|
||||
z *= x2;
|
||||
z += x;
|
||||
|
||||
// multiply by power of 2
|
||||
Float8 n2 = pow2n(r);
|
||||
|
||||
z = (z + Float8(1.0f)) * n2;
|
||||
return z;
|
||||
}
|
||||
|
||||
inline Float8 Sigmoid(Float8 x) {
|
||||
Float8 exp = ExpAgner(x * Float8(-1.0f));
|
||||
x = Float8(1.0f) + exp;
|
||||
return Float8(_mm256_rcp_ps(x.x));
|
||||
}
|
||||
|
||||
// Store 8 gradient pairs given vectors containing gradient and Hessian
|
||||
inline void StoreGpair(xgboost::GradientPair* dst, const Float8& grad,
|
||||
const Float8& hess) {
|
||||
float* ptr = reinterpret_cast<float*>(dst);
|
||||
__m256 gpair_low = _mm256_unpacklo_ps(grad.x, hess.x);
|
||||
__m256 gpair_high = _mm256_unpackhi_ps(grad.x, hess.x);
|
||||
_mm256_storeu_ps(ptr, _mm256_permute2f128_ps(gpair_low, gpair_high, 0x20));
|
||||
_mm256_storeu_ps(ptr + 8,
|
||||
_mm256_permute2f128_ps(gpair_low, gpair_high, 0x31));
|
||||
}
|
||||
} // namespace avx
|
||||
#else
|
||||
namespace avx {
|
||||
/**
|
||||
* \struct Float8
|
||||
*
|
||||
* \brief Fallback implementation not using AVX.
|
||||
*/
|
||||
|
||||
struct Float8 { // NOLINT
|
||||
float x[8];
|
||||
explicit Float8(const float& val) {
|
||||
for (float & i : x) {
|
||||
i = val;
|
||||
}
|
||||
}
|
||||
explicit Float8(const float* vec) {
|
||||
for (int i = 0; i < 8; i++) {
|
||||
x[i] = vec[i];
|
||||
}
|
||||
}
|
||||
Float8() = default;
|
||||
Float8& operator+=(const Float8& rhs) {
|
||||
for (int i = 0; i < 8; i++) {
|
||||
x[i] += rhs.x[i];
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
Float8& operator-=(const Float8& rhs) {
|
||||
for (int i = 0; i < 8; i++) {
|
||||
x[i] -= rhs.x[i];
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
Float8& operator*=(const Float8& rhs) {
|
||||
for (int i = 0; i < 8; i++) {
|
||||
x[i] *= rhs.x[i];
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
Float8& operator/=(const Float8& rhs) {
|
||||
for (int i = 0; i < 8; i++) {
|
||||
x[i] /= rhs.x[i];
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
void Print() {
|
||||
auto* f = reinterpret_cast<float*>(&x);
|
||||
printf("%f %f %f %f %f %f %f %f\n", f[0], f[1], f[2], f[3], f[4], f[5],
|
||||
f[6], f[7]);
|
||||
}
|
||||
};
|
||||
|
||||
inline Float8 operator+(Float8 lhs, const Float8& rhs) {
|
||||
lhs += rhs;
|
||||
return lhs;
|
||||
}
|
||||
inline Float8 operator-(Float8 lhs, const Float8& rhs) {
|
||||
lhs -= rhs;
|
||||
return lhs;
|
||||
}
|
||||
inline Float8 operator*(Float8 lhs, const Float8& rhs) {
|
||||
lhs *= rhs;
|
||||
return lhs;
|
||||
}
|
||||
inline Float8 operator/(Float8 lhs, const Float8& rhs) {
|
||||
lhs /= rhs;
|
||||
return lhs;
|
||||
}
|
||||
|
||||
// Store 8 gradient pairs given vectors containing gradient and Hessian
|
||||
inline void StoreGpair(xgboost::GradientPair* dst, const Float8& grad,
|
||||
const Float8& hess) {
|
||||
for (int i = 0; i < 8; i++) {
|
||||
dst[i] = xgboost::GradientPair(grad.x[i], hess.x[i]);
|
||||
}
|
||||
}
|
||||
|
||||
inline Float8 Sigmoid(Float8 x) {
|
||||
Float8 sig;
|
||||
for (int i = 0; i < 8; i++) {
|
||||
sig.x[i] = 1.0f / (1.0f + std::exp(-x.x[i]));
|
||||
}
|
||||
return sig;
|
||||
}
|
||||
} // namespace avx
|
||||
|
||||
namespace std {
|
||||
inline avx::Float8 max(const avx::Float8& a, const avx::Float8& b) { // NOLINT
|
||||
avx::Float8 max;
|
||||
for (int i = 0; i < 8; i++) {
|
||||
max.x[i] = std::max(a.x[i], b.x[i]);
|
||||
}
|
||||
return max;
|
||||
}
|
||||
inline avx::Float8 min(const avx::Float8& a, const avx::Float8& b) { // NOLINT
|
||||
avx::Float8 min;
|
||||
for (int i = 0; i < 8; i++) {
|
||||
min.x[i] = std::min(a.x[i], b.x[i]);
|
||||
}
|
||||
return min;
|
||||
}
|
||||
} // namespace std
|
||||
#endif
|
||||
@@ -1,9 +1,11 @@
|
||||
/*!
|
||||
* Copyright 2015 by Contributors
|
||||
* Copyright 2015-2018 by Contributors
|
||||
* \file common.cc
|
||||
* \brief Enable all kinds of global variables in common.
|
||||
*/
|
||||
#include <dmlc/thread_local.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "./random.h"
|
||||
|
||||
namespace xgboost {
|
||||
@@ -20,4 +22,11 @@ GlobalRandomEngine& GlobalRandom() {
|
||||
return RandomThreadLocalStore::Get()->engine;
|
||||
}
|
||||
} // namespace common
|
||||
|
||||
#if !defined(XGBOOST_USE_CUDA)
|
||||
int AllVisibleImpl::AllVisible() {
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace xgboost
|
||||
|
||||
20
src/common/common.cu
Normal file
20
src/common/common.cu
Normal file
@@ -0,0 +1,20 @@
|
||||
/*!
|
||||
* Copyright 2018 XGBoost contributors
|
||||
*/
|
||||
#include "common.h"
|
||||
|
||||
namespace xgboost {
|
||||
|
||||
int AllVisibleImpl::AllVisible() {
|
||||
int n_visgpus = 0;
|
||||
try {
|
||||
// When compiled with CUDA but running on CPU only device,
|
||||
// cudaGetDeviceCount will fail.
|
||||
dh::safe_cuda(cudaGetDeviceCount(&n_visgpus));
|
||||
} catch(const dmlc::Error &except) {
|
||||
return 0;
|
||||
}
|
||||
return n_visgpus;
|
||||
}
|
||||
|
||||
} // namespace xgboost
|
||||
@@ -1,15 +1,52 @@
|
||||
/*!
|
||||
* Copyright 2015 by Contributors
|
||||
* Copyright 2015-2018 by Contributors
|
||||
* \file common.h
|
||||
* \brief Common utilities
|
||||
*/
|
||||
#ifndef XGBOOST_COMMON_COMMON_H_
|
||||
#define XGBOOST_COMMON_COMMON_H_
|
||||
|
||||
#include <xgboost/base.h>
|
||||
#include <xgboost/logging.h>
|
||||
|
||||
#include <exception>
|
||||
#include <limits>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
|
||||
#if defined(__CUDACC__)
|
||||
#include <thrust/system/cuda/error.h>
|
||||
#include <thrust/system_error.h>
|
||||
|
||||
#define WITH_CUDA() true
|
||||
|
||||
#else
|
||||
|
||||
#define WITH_CUDA() false
|
||||
|
||||
#endif
|
||||
|
||||
namespace dh {
|
||||
#if defined(__CUDACC__)
|
||||
/*
|
||||
* Error handling functions
|
||||
*/
|
||||
#define safe_cuda(ans) ThrowOnCudaError((ans), __FILE__, __LINE__)
|
||||
|
||||
inline cudaError_t ThrowOnCudaError(cudaError_t code, const char *file,
|
||||
int line) {
|
||||
if (code != cudaSuccess) {
|
||||
LOG(FATAL) << thrust::system_error(code, thrust::cuda_category(),
|
||||
std::string{file} + ": " + // NOLINT
|
||||
std::to_string(line)).what();
|
||||
}
|
||||
return code;
|
||||
}
|
||||
#endif
|
||||
} // namespace dh
|
||||
|
||||
namespace xgboost {
|
||||
namespace common {
|
||||
/*!
|
||||
@@ -35,6 +72,152 @@ inline std::string ToString(const T& data) {
|
||||
return os.str();
|
||||
}
|
||||
|
||||
/*
|
||||
* Range iterator
|
||||
*/
|
||||
class Range {
|
||||
public:
|
||||
using DifferenceType = int64_t;
|
||||
|
||||
class Iterator {
|
||||
friend class Range;
|
||||
|
||||
public:
|
||||
XGBOOST_DEVICE DifferenceType operator*() const { return i_; }
|
||||
XGBOOST_DEVICE const Iterator &operator++() {
|
||||
i_ += step_;
|
||||
return *this;
|
||||
}
|
||||
XGBOOST_DEVICE Iterator operator++(int) {
|
||||
Iterator res {*this};
|
||||
i_ += step_;
|
||||
return res;
|
||||
}
|
||||
|
||||
XGBOOST_DEVICE bool operator==(const Iterator &other) const {
|
||||
return i_ >= other.i_;
|
||||
}
|
||||
XGBOOST_DEVICE bool operator!=(const Iterator &other) const {
|
||||
return i_ < other.i_;
|
||||
}
|
||||
|
||||
XGBOOST_DEVICE void Step(DifferenceType s) { step_ = s; }
|
||||
|
||||
protected:
|
||||
XGBOOST_DEVICE explicit Iterator(DifferenceType start) : i_(start) {}
|
||||
XGBOOST_DEVICE explicit Iterator(DifferenceType start, DifferenceType step) :
|
||||
i_{start}, step_{step} {}
|
||||
|
||||
public:
|
||||
int64_t i_;
|
||||
DifferenceType step_ = 1;
|
||||
};
|
||||
|
||||
XGBOOST_DEVICE Iterator begin() const { return begin_; } // NOLINT
|
||||
XGBOOST_DEVICE Iterator end() const { return end_; } // NOLINT
|
||||
|
||||
XGBOOST_DEVICE Range(DifferenceType begin, DifferenceType end)
|
||||
: begin_(begin), end_(end) {}
|
||||
XGBOOST_DEVICE Range(DifferenceType begin, DifferenceType end,
|
||||
DifferenceType step)
|
||||
: begin_(begin, step), end_(end) {}
|
||||
|
||||
XGBOOST_DEVICE bool operator==(const Range& other) const {
|
||||
return *begin_ == *other.begin_ && *end_ == *other.end_;
|
||||
}
|
||||
XGBOOST_DEVICE bool operator!=(const Range& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
XGBOOST_DEVICE void Step(DifferenceType s) { begin_.Step(s); }
|
||||
|
||||
private:
|
||||
Iterator begin_;
|
||||
Iterator end_;
|
||||
};
|
||||
|
||||
} // namespace common
|
||||
struct AllVisibleImpl {
|
||||
static int AllVisible();
|
||||
};
|
||||
/* \brief set of devices across which HostDeviceVector can be distributed.
|
||||
*
|
||||
* Currently implemented as a range, but can be changed later to something else,
|
||||
* e.g. a bitset
|
||||
*/
|
||||
class GPUSet {
|
||||
public:
|
||||
explicit GPUSet(int start = 0, int ndevices = 0)
|
||||
: devices_(start, start + ndevices) {}
|
||||
|
||||
static GPUSet Empty() { return GPUSet(); }
|
||||
|
||||
static GPUSet Range(int start, int ndevices) {
|
||||
return ndevices <= 0 ? Empty() : GPUSet{start, ndevices};
|
||||
}
|
||||
/*! \brief ndevices and num_rows both are upper bounds. */
|
||||
static GPUSet All(int ndevices, int num_rows = std::numeric_limits<int>::max()) {
|
||||
int n_devices_visible = AllVisible().Size();
|
||||
if (ndevices < 0 || ndevices > n_devices_visible) {
|
||||
ndevices = n_devices_visible;
|
||||
}
|
||||
// fix-up device number to be limited by number of rows
|
||||
ndevices = ndevices > num_rows ? num_rows : ndevices;
|
||||
return Range(0, ndevices);
|
||||
}
|
||||
static GPUSet AllVisible() {
|
||||
int n = AllVisibleImpl::AllVisible();
|
||||
return Range(0, n);
|
||||
}
|
||||
/*! \brief Ensure gpu_id is correct, so not dependent upon user knowing details */
|
||||
static int GetDeviceIdx(int gpu_id) {
|
||||
auto devices = AllVisible();
|
||||
CHECK(!devices.IsEmpty()) << "Empty device.";
|
||||
return (std::abs(gpu_id) + 0) % devices.Size();
|
||||
}
|
||||
/*! \brief Counting from gpu_id */
|
||||
GPUSet Normalised(int gpu_id) const {
|
||||
return Range(gpu_id, Size());
|
||||
}
|
||||
/*! \brief Counting from 0 */
|
||||
GPUSet Unnormalised() const {
|
||||
return Range(0, Size());
|
||||
}
|
||||
|
||||
int Size() const {
|
||||
int res = *devices_.end() - *devices_.begin();
|
||||
return res < 0 ? 0 : res;
|
||||
}
|
||||
/*! \brief Get normalised device id. */
|
||||
int operator[](int index) const {
|
||||
CHECK(index >= 0 && index < Size());
|
||||
return *devices_.begin() + index;
|
||||
}
|
||||
|
||||
bool IsEmpty() const { return Size() == 0; }
|
||||
/*! \brief Get un-normalised index. */
|
||||
int Index(int device) const {
|
||||
CHECK(Contains(device));
|
||||
return device - *devices_.begin();
|
||||
}
|
||||
|
||||
bool Contains(int device) const {
|
||||
return *devices_.begin() <= device && device < *devices_.end();
|
||||
}
|
||||
|
||||
common::Range::Iterator begin() const { return devices_.begin(); } // NOLINT
|
||||
common::Range::Iterator end() const { return devices_.end(); } // NOLINT
|
||||
|
||||
friend bool operator==(const GPUSet& lhs, const GPUSet& rhs) {
|
||||
return lhs.devices_ == rhs.devices_;
|
||||
}
|
||||
friend bool operator!=(const GPUSet& lhs, const GPUSet& rhs) {
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
private:
|
||||
common::Range devices_;
|
||||
};
|
||||
|
||||
} // namespace xgboost
|
||||
#endif // XGBOOST_COMMON_COMMON_H_
|
||||
|
||||
@@ -7,6 +7,10 @@
|
||||
#include <thrust/system/cuda/error.h>
|
||||
#include <thrust/system_error.h>
|
||||
#include <xgboost/logging.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "span.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <ctime>
|
||||
@@ -28,25 +32,6 @@ namespace dh {
|
||||
#define HOST_DEV_INLINE XGBOOST_DEVICE __forceinline__
|
||||
#define DEV_INLINE __device__ __forceinline__
|
||||
|
||||
/*
|
||||
* Error handling functions
|
||||
*/
|
||||
|
||||
#define safe_cuda(ans) ThrowOnCudaError((ans), __FILE__, __LINE__)
|
||||
|
||||
inline cudaError_t ThrowOnCudaError(cudaError_t code, const char *file,
|
||||
int line) {
|
||||
if (code != cudaSuccess) {
|
||||
std::stringstream ss;
|
||||
ss << file << "(" << line << ")";
|
||||
std::string file_and_line;
|
||||
ss >> file_and_line;
|
||||
throw thrust::system_error(code, thrust::cuda_category(), file_and_line);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
#ifdef XGBOOST_USE_NCCL
|
||||
#define safe_nccl(ans) ThrowOnNcclError((ans), __FILE__, __LINE__)
|
||||
|
||||
@@ -73,45 +58,20 @@ const T *Raw(const thrust::device_vector<T> &v) { // NOLINT
|
||||
return raw_pointer_cast(v.data());
|
||||
}
|
||||
|
||||
inline int NVisibleDevices() {
|
||||
int n_visgpus = 0;
|
||||
|
||||
dh::safe_cuda(cudaGetDeviceCount(&n_visgpus));
|
||||
|
||||
return n_visgpus;
|
||||
}
|
||||
|
||||
inline int NDevicesAll(int n_gpus) {
|
||||
int n_devices_visible = dh::NVisibleDevices();
|
||||
int n_devices = n_gpus < 0 ? n_devices_visible : n_gpus;
|
||||
return (n_devices);
|
||||
}
|
||||
inline int NDevices(int n_gpus, int num_rows) {
|
||||
int n_devices = dh::NDevicesAll(n_gpus);
|
||||
// fix-up device number to be limited by number of rows
|
||||
n_devices = n_devices > num_rows ? num_rows : n_devices;
|
||||
return (n_devices);
|
||||
}
|
||||
|
||||
// if n_devices=-1, then use all visible devices
|
||||
inline void SynchronizeNDevices(int n_devices, std::vector<int> dList) {
|
||||
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
|
||||
int device_idx = dList[d_idx];
|
||||
safe_cuda(cudaSetDevice(device_idx));
|
||||
safe_cuda(cudaDeviceSynchronize());
|
||||
}
|
||||
}
|
||||
inline void SynchronizeAll() {
|
||||
for (int device_idx = 0; device_idx < NVisibleDevices(); device_idx++) {
|
||||
safe_cuda(cudaSetDevice(device_idx));
|
||||
inline void SynchronizeNDevices(xgboost::GPUSet devices) {
|
||||
devices = devices.IsEmpty() ? xgboost::GPUSet::AllVisible() : devices;
|
||||
for (auto const d : devices.Unnormalised()) {
|
||||
safe_cuda(cudaSetDevice(d));
|
||||
safe_cuda(cudaDeviceSynchronize());
|
||||
}
|
||||
}
|
||||
|
||||
inline std::string DeviceName(int device_idx) {
|
||||
cudaDeviceProp prop;
|
||||
dh::safe_cuda(cudaGetDeviceProperties(&prop, device_idx));
|
||||
return std::string(prop.name);
|
||||
inline void SynchronizeAll() {
|
||||
for (int device_idx : xgboost::GPUSet::AllVisible()) {
|
||||
safe_cuda(cudaSetDevice(device_idx));
|
||||
safe_cuda(cudaDeviceSynchronize());
|
||||
}
|
||||
}
|
||||
|
||||
inline size_t AvailableMemory(int device_idx) {
|
||||
@@ -144,31 +104,23 @@ inline size_t MaxSharedMemory(int device_idx) {
|
||||
return prop.sharedMemPerBlock;
|
||||
}
|
||||
|
||||
// ensure gpu_id is correct, so not dependent upon user knowing details
|
||||
inline int GetDeviceIdx(int gpu_id) {
|
||||
// protect against overrun for gpu_id
|
||||
return (std::abs(gpu_id) + 0) % dh::NVisibleDevices();
|
||||
}
|
||||
|
||||
inline void CheckComputeCapability() {
|
||||
int n_devices = NVisibleDevices();
|
||||
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
|
||||
for (int d_idx : xgboost::GPUSet::AllVisible()) {
|
||||
cudaDeviceProp prop;
|
||||
safe_cuda(cudaGetDeviceProperties(&prop, d_idx));
|
||||
std::ostringstream oss;
|
||||
oss << "CUDA Capability Major/Minor version number: " << prop.major << "."
|
||||
<< prop.minor << " is insufficient. Need >=3.5";
|
||||
int failed = prop.major < 3 || prop.major == 3 && prop.minor < 5;
|
||||
int failed = prop.major < 3 || (prop.major == 3 && prop.minor < 5);
|
||||
if (failed) LOG(WARNING) << oss.str() << " for device: " << d_idx;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
DEV_INLINE void AtomicOrByte(unsigned int* __restrict__ buffer, size_t ibyte, unsigned char b) {
|
||||
atomicOr(&buffer[ibyte / sizeof(unsigned int)], (unsigned int)b << (ibyte % (sizeof(unsigned int)) * 8));
|
||||
}
|
||||
|
||||
/*!
|
||||
/*!
|
||||
* \brief Find the strict upper bound for an element in a sorted array
|
||||
* using binary search.
|
||||
* \param cuts pointer to the first element of the sorted array
|
||||
@@ -178,15 +130,10 @@ DEV_INLINE void AtomicOrByte(unsigned int* __restrict__ buffer, size_t ibyte, un
|
||||
* than all elements of the array
|
||||
*/
|
||||
DEV_INLINE int UpperBound(const float* __restrict__ cuts, int n, float v) {
|
||||
if (n == 0) {
|
||||
return 0;
|
||||
}
|
||||
if (cuts[n - 1] <= v) {
|
||||
return n;
|
||||
}
|
||||
if (cuts[0] > v) {
|
||||
return 0;
|
||||
}
|
||||
if (n == 0) { return 0; }
|
||||
if (cuts[n - 1] <= v) { return n; }
|
||||
if (cuts[0] > v) { return 0; }
|
||||
|
||||
int left = 0, right = n - 1;
|
||||
while (right - left > 1) {
|
||||
int middle = left + (right - left) / 2;
|
||||
@@ -194,72 +141,23 @@ DEV_INLINE int UpperBound(const float* __restrict__ cuts, int n, float v) {
|
||||
right = middle;
|
||||
} else {
|
||||
left = middle;
|
||||
}
|
||||
}
|
||||
}
|
||||
return right;
|
||||
}
|
||||
|
||||
/*
|
||||
* Range iterator
|
||||
*/
|
||||
|
||||
class Range {
|
||||
public:
|
||||
class Iterator {
|
||||
friend class Range;
|
||||
|
||||
public:
|
||||
XGBOOST_DEVICE int64_t operator*() const { return i_; }
|
||||
XGBOOST_DEVICE const Iterator &operator++() {
|
||||
i_ += step_;
|
||||
return *this;
|
||||
}
|
||||
XGBOOST_DEVICE Iterator operator++(int) {
|
||||
Iterator copy(*this);
|
||||
i_ += step_;
|
||||
return copy;
|
||||
}
|
||||
|
||||
XGBOOST_DEVICE bool operator==(const Iterator &other) const {
|
||||
return i_ >= other.i_;
|
||||
}
|
||||
XGBOOST_DEVICE bool operator!=(const Iterator &other) const {
|
||||
return i_ < other.i_;
|
||||
}
|
||||
|
||||
XGBOOST_DEVICE void Step(int s) { step_ = s; }
|
||||
|
||||
protected:
|
||||
XGBOOST_DEVICE explicit Iterator(int64_t start) : i_(start) {}
|
||||
|
||||
public:
|
||||
uint64_t i_;
|
||||
int step_ = 1;
|
||||
};
|
||||
|
||||
XGBOOST_DEVICE Iterator begin() const { return begin_; } // NOLINT
|
||||
XGBOOST_DEVICE Iterator end() const { return end_; } // NOLINT
|
||||
XGBOOST_DEVICE Range(int64_t begin, int64_t end)
|
||||
: begin_(begin), end_(end) {}
|
||||
XGBOOST_DEVICE void Step(int s) { begin_.Step(s); }
|
||||
|
||||
private:
|
||||
Iterator begin_;
|
||||
Iterator end_;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
__device__ Range GridStrideRange(T begin, T end) {
|
||||
__device__ xgboost::common::Range GridStrideRange(T begin, T end) {
|
||||
begin += blockDim.x * blockIdx.x + threadIdx.x;
|
||||
Range r(begin, end);
|
||||
xgboost::common::Range r(begin, end);
|
||||
r.Step(gridDim.x * blockDim.x);
|
||||
return r;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__device__ Range BlockStrideRange(T begin, T end) {
|
||||
__device__ xgboost::common::Range BlockStrideRange(T begin, T end) {
|
||||
begin += threadIdx.x;
|
||||
Range r(begin, end);
|
||||
xgboost::common::Range r(begin, end);
|
||||
r.Step(blockDim.x);
|
||||
return r;
|
||||
}
|
||||
@@ -282,18 +180,6 @@ T1 DivRoundUp(const T1 a, const T2 b) {
|
||||
return static_cast<T1>(ceil(static_cast<double>(a) / b));
|
||||
}
|
||||
|
||||
inline void RowSegments(size_t n_rows, size_t n_devices, std::vector<size_t>* segments) {
|
||||
segments->push_back(0);
|
||||
size_t row_begin = 0;
|
||||
size_t shard_size = DivRoundUp(n_rows, n_devices);
|
||||
for (size_t d_idx = 0; d_idx < n_devices; ++d_idx) {
|
||||
size_t row_end = std::min(row_begin + shard_size, n_rows);
|
||||
segments->push_back(row_end);
|
||||
row_begin = row_end;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename L>
|
||||
__global__ void LaunchNKernel(size_t begin, size_t end, L lambda) {
|
||||
for (auto i : GridStrideRange(begin, end)) {
|
||||
@@ -318,7 +204,7 @@ inline void LaunchN(int device_idx, size_t n, L lambda) {
|
||||
const int GRID_SIZE =
|
||||
static_cast<int>(DivRoundUp(n, ITEMS_PER_THREAD * BLOCK_THREADS));
|
||||
LaunchNKernel<<<GRID_SIZE, BLOCK_THREADS>>>(static_cast<size_t>(0), n,
|
||||
lambda);
|
||||
lambda);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -420,8 +306,8 @@ class DVec {
|
||||
void copy(IterT begin, IterT end) {
|
||||
safe_cuda(cudaSetDevice(this->DeviceIdx()));
|
||||
if (end - begin != Size()) {
|
||||
throw std::runtime_error(
|
||||
"Cannot copy assign vector to DVec, sizes are different");
|
||||
LOG(FATAL) << "Cannot copy assign vector to DVec, sizes are different" <<
|
||||
" vector::Size(): " << end - begin << " DVec::Size(): " << Size();
|
||||
}
|
||||
thrust::copy(begin, end, this->tbegin());
|
||||
}
|
||||
@@ -479,6 +365,7 @@ class DVec2 {
|
||||
T *other() { return buff_.Alternate(); }
|
||||
};
|
||||
|
||||
/*! \brief Helper for allocating large block of memory. */
|
||||
template <MemoryType MemoryT>
|
||||
class BulkAllocator {
|
||||
std::vector<char *> d_ptr_;
|
||||
@@ -557,7 +444,7 @@ class BulkAllocator {
|
||||
BulkAllocator(BulkAllocator<MemoryT>&&) = delete;
|
||||
void operator=(const BulkAllocator<MemoryT>&) = delete;
|
||||
void operator=(BulkAllocator<MemoryT>&&) = delete;
|
||||
|
||||
|
||||
~BulkAllocator() {
|
||||
for (size_t i = 0; i < d_ptr_.size(); i++) {
|
||||
if (!(d_ptr_[i] == nullptr)) {
|
||||
@@ -1059,6 +946,29 @@ class AllReducer {
|
||||
}
|
||||
};
|
||||
|
||||
class SaveCudaContext {
|
||||
private:
|
||||
int saved_device_;
|
||||
|
||||
public:
|
||||
template <typename Functor>
|
||||
explicit SaveCudaContext (Functor func) : saved_device_{-1} {
|
||||
// When compiled with CUDA but running on CPU only device,
|
||||
// cudaGetDevice will fail.
|
||||
try {
|
||||
safe_cuda(cudaGetDevice(&saved_device_));
|
||||
} catch (const dmlc::Error &except) {
|
||||
saved_device_ = -1;
|
||||
}
|
||||
func();
|
||||
}
|
||||
~SaveCudaContext() {
|
||||
if (saved_device_ != -1) {
|
||||
safe_cuda(cudaSetDevice(saved_device_));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* \brief Executes some operation on each element of the input vector, using a
|
||||
* single controlling thread for each element.
|
||||
@@ -1071,10 +981,13 @@ class AllReducer {
|
||||
|
||||
template <typename T, typename FunctionT>
|
||||
void ExecuteShards(std::vector<T> *shards, FunctionT f) {
|
||||
SaveCudaContext {
|
||||
[&](){
|
||||
#pragma omp parallel for schedule(static, 1) if (shards->size() > 1)
|
||||
for (int shard = 0; shard < shards->size(); ++shard) {
|
||||
f(shards->at(shard));
|
||||
}
|
||||
for (int shard = 0; shard < shards->size(); ++shard) {
|
||||
f(shards->at(shard));
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1090,10 +1003,13 @@ void ExecuteShards(std::vector<T> *shards, FunctionT f) {
|
||||
|
||||
template <typename T, typename FunctionT>
|
||||
void ExecuteIndexShards(std::vector<T> *shards, FunctionT f) {
|
||||
SaveCudaContext {
|
||||
[&](){
|
||||
#pragma omp parallel for schedule(static, 1) if (shards->size() > 1)
|
||||
for (int shard = 0; shard < shards->size(); ++shard) {
|
||||
f(shard, shards->at(shard));
|
||||
}
|
||||
for (int shard = 0; shard < shards->size(); ++shard) {
|
||||
f(shard, shards->at(shard));
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1109,13 +1025,34 @@ void ExecuteIndexShards(std::vector<T> *shards, FunctionT f) {
|
||||
* \return A reduce_t.
|
||||
*/
|
||||
|
||||
template <typename ReduceT,typename T, typename FunctionT>
|
||||
ReduceT ReduceShards(std::vector<T> *shards, FunctionT f) {
|
||||
template <typename ReduceT, typename ShardT, typename FunctionT>
|
||||
ReduceT ReduceShards(std::vector<ShardT> *shards, FunctionT f) {
|
||||
std::vector<ReduceT> sums(shards->size());
|
||||
SaveCudaContext {
|
||||
[&](){
|
||||
#pragma omp parallel for schedule(static, 1) if (shards->size() > 1)
|
||||
for (int shard = 0; shard < shards->size(); ++shard) {
|
||||
sums[shard] = f(shards->at(shard));
|
||||
}
|
||||
for (int shard = 0; shard < shards->size(); ++shard) {
|
||||
sums[shard] = f(shards->at(shard));
|
||||
}}
|
||||
};
|
||||
return std::accumulate(sums.begin(), sums.end(), ReduceT());
|
||||
}
|
||||
|
||||
template <typename T,
|
||||
typename IndexT = typename xgboost::common::Span<T>::index_type>
|
||||
xgboost::common::Span<T> ToSpan(
|
||||
thrust::device_vector<T>& vec,
|
||||
IndexT offset = 0,
|
||||
IndexT size = -1) {
|
||||
size = size == -1 ? vec.size() : size;
|
||||
CHECK_LE(offset + size, vec.size());
|
||||
return {vec.data().get() + offset, static_cast<IndexT>(size)};
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
xgboost::common::Span<T> ToSpan(thrust::device_vector<T>& vec,
|
||||
size_t offset, size_t size) {
|
||||
using IndexT = typename xgboost::common::Span<T>::index_type;
|
||||
return ToSpan(vec, static_cast<IndexT>(offset), static_cast<IndexT>(size));
|
||||
}
|
||||
} // namespace dh
|
||||
|
||||
81
src/common/enum_class_param.h
Normal file
81
src/common/enum_class_param.h
Normal file
@@ -0,0 +1,81 @@
|
||||
/*!
|
||||
* Copyright 2018 by Contributors
|
||||
* \file enum_class_param.h
|
||||
* \brief macro for using C++11 enum class as DMLC parameter
|
||||
* \author Hyunsu Philip Cho
|
||||
*/
|
||||
|
||||
#ifndef XGBOOST_COMMON_ENUM_CLASS_PARAM_H_
|
||||
#define XGBOOST_COMMON_ENUM_CLASS_PARAM_H_
|
||||
|
||||
#include <dmlc/parameter.h>
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
|
||||
/*!
|
||||
* \brief Specialization of FieldEntry for enum class (backed by int)
|
||||
*
|
||||
* Use this macro to use C++11 enum class as DMLC parameters
|
||||
*
|
||||
* Usage:
|
||||
*
|
||||
* \code{.cpp}
|
||||
*
|
||||
* // enum class must inherit from int type
|
||||
* enum class Foo : int {
|
||||
* kBar = 0, kFrog = 1, kCat = 2, kDog = 3
|
||||
* };
|
||||
*
|
||||
* // This line is needed to prevent compilation error
|
||||
* DECLARE_FIELD_ENUM_CLASS(Foo);
|
||||
*
|
||||
* // Now define DMLC parameter as usual;
|
||||
* // enum classes can now be members.
|
||||
* struct MyParam : dmlc::Parameter<MyParam> {
|
||||
* Foo foo;
|
||||
* DMLC_DECLARE_PARAMETER(MyParam) {
|
||||
* DMLC_DECLARE_FIELD(foo)
|
||||
* .set_default(Foo::kBar)
|
||||
* .add_enum("bar", Foo::kBar)
|
||||
* .add_enum("frog", Foo::kFrog)
|
||||
* .add_enum("cat", Foo::kCat)
|
||||
* .add_enum("dog", Foo::kDog);
|
||||
* }
|
||||
* };
|
||||
*
|
||||
* DMLC_REGISTER_PARAMETER(MyParam);
|
||||
* \endcode
|
||||
*/
|
||||
#define DECLARE_FIELD_ENUM_CLASS(EnumClass) \
|
||||
namespace dmlc { \
|
||||
namespace parameter { \
|
||||
template <> \
|
||||
class FieldEntry<EnumClass> : public FieldEntry<int> { \
|
||||
public: \
|
||||
FieldEntry<EnumClass>() { \
|
||||
static_assert( \
|
||||
std::is_same<int, typename std::underlying_type<EnumClass>::type>::value, \
|
||||
"enum class must be backed by int"); \
|
||||
is_enum_ = true; \
|
||||
} \
|
||||
using Super = FieldEntry<int>; \
|
||||
void Set(void *head, const std::string &value) const override { \
|
||||
Super::Set(head, value); \
|
||||
} \
|
||||
inline FieldEntry<EnumClass>& add_enum(const std::string &key, EnumClass value) { \
|
||||
Super::add_enum(key, static_cast<int>(value)); \
|
||||
return *this; \
|
||||
} \
|
||||
inline FieldEntry<EnumClass>& set_default(const EnumClass& default_value) { \
|
||||
default_value_ = static_cast<int>(default_value); \
|
||||
has_default_ = true; \
|
||||
return *this; \
|
||||
} \
|
||||
inline void Init(const std::string &key, void *head, EnumClass& ref) { /* NOLINT */ \
|
||||
Super::Init(key, head, *reinterpret_cast<int*>(&ref)); \
|
||||
} \
|
||||
}; \
|
||||
} /* namespace parameter */ \
|
||||
} /* namespace dmlc */
|
||||
|
||||
#endif // XGBOOST_COMMON_ENUM_CLASS_PARAM_H_
|
||||
@@ -17,7 +17,6 @@ namespace xgboost {
|
||||
namespace common {
|
||||
|
||||
void HistCutMatrix::Init(DMatrix* p_fmat, uint32_t max_num_bins) {
|
||||
using WXQSketch = common::WXQuantileSketch<bst_float, bst_float>;
|
||||
const MetaInfo& info = p_fmat->Info();
|
||||
|
||||
// safe factor for better accuracy
|
||||
@@ -33,10 +32,8 @@ void HistCutMatrix::Init(DMatrix* p_fmat, uint32_t max_num_bins) {
|
||||
s.Init(info.num_row_, 1.0 / (max_num_bins * kFactor));
|
||||
}
|
||||
|
||||
auto iter = p_fmat->RowIterator();
|
||||
iter->BeforeFirst();
|
||||
while (iter->Next()) {
|
||||
auto &batch = iter->Value();
|
||||
const auto& weights = info.weights_.HostVector();
|
||||
for (const auto &batch : p_fmat->GetRowBatches()) {
|
||||
#pragma omp parallel num_threads(nthread)
|
||||
{
|
||||
CHECK_EQ(nthread, omp_get_num_threads());
|
||||
@@ -48,9 +45,10 @@ void HistCutMatrix::Init(DMatrix* p_fmat, uint32_t max_num_bins) {
|
||||
for (size_t i = 0; i < batch.Size(); ++i) { // NOLINT(*)
|
||||
size_t ridx = batch.base_rowid + i;
|
||||
SparsePage::Inst inst = batch[i];
|
||||
for (bst_uint j = 0; j < inst.length; ++j) {
|
||||
if (inst[j].index >= begin && inst[j].index < end) {
|
||||
sketchs[inst[j].index].Push(inst[j].fvalue, info.GetWeight(ridx));
|
||||
for (auto& ins : inst) {
|
||||
if (ins.index >= begin && ins.index < end) {
|
||||
sketchs[ins.index].Push(ins.fvalue,
|
||||
weights.size() > 0 ? weights[ridx] : 1.0f);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -127,20 +125,17 @@ uint32_t HistCutMatrix::GetBinIdx(const Entry& e) {
|
||||
|
||||
void GHistIndexMatrix::Init(DMatrix* p_fmat, int max_num_bins) {
|
||||
cut.Init(p_fmat, max_num_bins);
|
||||
auto iter = p_fmat->RowIterator();
|
||||
|
||||
const int nthread = omp_get_max_threads();
|
||||
const uint32_t nbins = cut.row_ptr.back();
|
||||
hit_count.resize(nbins, 0);
|
||||
hit_count_tloc_.resize(nthread * nbins, 0);
|
||||
|
||||
iter->BeforeFirst();
|
||||
row_ptr.push_back(0);
|
||||
while (iter->Next()) {
|
||||
auto &batch = iter->Value();
|
||||
for (const auto &batch : p_fmat->GetRowBatches()) {
|
||||
const size_t rbegin = row_ptr.size() - 1;
|
||||
for (size_t i = 0; i < batch.Size(); ++i) {
|
||||
row_ptr.push_back(batch[i].length + row_ptr.back());
|
||||
row_ptr.push_back(batch[i].size() + row_ptr.back());
|
||||
}
|
||||
index.resize(row_ptr.back());
|
||||
|
||||
@@ -154,9 +149,11 @@ void GHistIndexMatrix::Init(DMatrix* p_fmat, int max_num_bins) {
|
||||
size_t ibegin = row_ptr[rbegin + i];
|
||||
size_t iend = row_ptr[rbegin + i + 1];
|
||||
SparsePage::Inst inst = batch[i];
|
||||
CHECK_EQ(ibegin + inst.length, iend);
|
||||
for (bst_uint j = 0; j < inst.length; ++j) {
|
||||
|
||||
CHECK_EQ(ibegin + inst.size(), iend);
|
||||
for (bst_uint j = 0; j < inst.size(); ++j) {
|
||||
uint32_t idx = cut.GetBinIdx(inst[j]);
|
||||
|
||||
index[ibegin + j] = idx;
|
||||
++hit_count_tloc_[tid * nbins + idx];
|
||||
}
|
||||
@@ -400,7 +397,6 @@ void GHistIndexBlockMatrix::Init(const GHistIndexMatrix& gmat,
|
||||
void GHistBuilder::BuildHist(const std::vector<GradientPair>& gpair,
|
||||
const RowSetCollection::Elem row_indices,
|
||||
const GHistIndexMatrix& gmat,
|
||||
const std::vector<bst_uint>& feat_set,
|
||||
GHistRow hist) {
|
||||
data_.resize(nbins_ * nthread_, GHistEntry());
|
||||
std::fill(data_.begin(), data_.end(), GHistEntry());
|
||||
@@ -459,7 +455,6 @@ void GHistBuilder::BuildHist(const std::vector<GradientPair>& gpair,
|
||||
void GHistBuilder::BuildBlockHist(const std::vector<GradientPair>& gpair,
|
||||
const RowSetCollection::Elem row_indices,
|
||||
const GHistIndexBlockMatrix& gmatb,
|
||||
const std::vector<bst_uint>& feat_set,
|
||||
GHistRow hist) {
|
||||
constexpr int kUnroll = 8; // loop unrolling factor
|
||||
const size_t nblock = gmatb.GetNumBlock();
|
||||
|
||||
@@ -118,7 +118,7 @@ struct GPUSketcher {
|
||||
|
||||
void Init(const SparsePage& row_batch, const MetaInfo& info) {
|
||||
num_cols_ = info.num_col_;
|
||||
has_weights_ = info.weights_.size() > 0;
|
||||
has_weights_ = info.weights_.Size() > 0;
|
||||
|
||||
// find the batch size
|
||||
if (param_.gpu_batch_nrows == 0) {
|
||||
@@ -257,13 +257,13 @@ struct GPUSketcher {
|
||||
n_cuts_cur_[icol] = std::min(n_cuts_, n_unique);
|
||||
// if less elements than cuts: copy all elements with their weights
|
||||
if (n_cuts_ > n_unique) {
|
||||
auto weights2_iter = weights2_.begin();
|
||||
auto fvalues_iter = fvalues_cur_.begin();
|
||||
auto cuts_iter = cuts_d_.begin() + icol * n_cuts_;
|
||||
float* weights2_ptr = weights2_.data().get();
|
||||
float* fvalues_ptr = fvalues_cur_.data().get();
|
||||
WXQSketch::Entry* cuts_ptr = cuts_d_.data().get() + icol * n_cuts_;
|
||||
dh::LaunchN(device_, n_unique, [=]__device__(size_t i) {
|
||||
bst_float rmax = weights2_iter[i];
|
||||
bst_float rmin = i > 0 ? weights2_iter[i - 1] : 0;
|
||||
cuts_iter[i] = WXQSketch::Entry(rmin, rmax, rmax - rmin, fvalues_iter[i]);
|
||||
bst_float rmax = weights2_ptr[i];
|
||||
bst_float rmin = i > 0 ? weights2_ptr[i - 1] : 0;
|
||||
cuts_ptr[i] = WXQSketch::Entry(rmin, rmax, rmax - rmin, fvalues_ptr[i]);
|
||||
});
|
||||
} else if (n_cuts_cur_[icol] > 0) {
|
||||
// if more elements than cuts: use binary search on cumulative weights
|
||||
@@ -271,7 +271,7 @@ struct GPUSketcher {
|
||||
find_cuts_k<<<dh::DivRoundUp(n_cuts_cur_[icol], block), block>>>
|
||||
(cuts_d_.data().get() + icol * n_cuts_, fvalues_cur_.data().get(),
|
||||
weights2_.data().get(), n_unique, n_cuts_cur_[icol]);
|
||||
dh::safe_cuda(cudaGetLastError());
|
||||
dh::safe_cuda(cudaGetLastError()); // NOLINT
|
||||
}
|
||||
}
|
||||
|
||||
@@ -282,19 +282,23 @@ struct GPUSketcher {
|
||||
size_t batch_row_end = std::min((gpu_batch + 1) * gpu_batch_nrows_,
|
||||
static_cast<size_t>(n_rows_));
|
||||
size_t batch_nrows = batch_row_end - batch_row_begin;
|
||||
size_t n_entries =
|
||||
row_batch.offset[row_begin_ + batch_row_end] -
|
||||
row_batch.offset[row_begin_ + batch_row_begin];
|
||||
|
||||
const auto& offset_vec = row_batch.offset.HostVector();
|
||||
const auto& data_vec = row_batch.data.HostVector();
|
||||
|
||||
size_t n_entries = offset_vec[row_begin_ + batch_row_end] -
|
||||
offset_vec[row_begin_ + batch_row_begin];
|
||||
// copy the batch to the GPU
|
||||
dh::safe_cuda
|
||||
(cudaMemcpy(entries_.data().get(),
|
||||
&row_batch.data[row_batch.offset[row_begin_ + batch_row_begin]],
|
||||
data_vec.data() + offset_vec[row_begin_ + batch_row_begin],
|
||||
n_entries * sizeof(Entry), cudaMemcpyDefault));
|
||||
// copy the weights if necessary
|
||||
if (has_weights_) {
|
||||
const auto& weights_vec = info.weights_.HostVector();
|
||||
dh::safe_cuda
|
||||
(cudaMemcpy(weights_.data().get(),
|
||||
info.weights_.data() + row_begin_ + batch_row_begin,
|
||||
weights_vec.data() + row_begin_ + batch_row_begin,
|
||||
batch_nrows * sizeof(bst_float), cudaMemcpyDefault));
|
||||
}
|
||||
|
||||
@@ -310,15 +314,15 @@ struct GPUSketcher {
|
||||
row_ptrs_.data().get() + batch_row_begin,
|
||||
has_weights_ ? weights_.data().get() : nullptr, entries_.data().get(),
|
||||
gpu_batch_nrows_, num_cols_,
|
||||
row_batch.offset[row_begin_ + batch_row_begin], batch_nrows);
|
||||
dh::safe_cuda(cudaGetLastError());
|
||||
dh::safe_cuda(cudaDeviceSynchronize());
|
||||
offset_vec[row_begin_ + batch_row_begin], batch_nrows);
|
||||
dh::safe_cuda(cudaGetLastError()); // NOLINT
|
||||
dh::safe_cuda(cudaDeviceSynchronize()); // NOLINT
|
||||
|
||||
for (int icol = 0; icol < num_cols_; ++icol) {
|
||||
FindColumnCuts(batch_nrows, icol);
|
||||
}
|
||||
|
||||
dh::safe_cuda(cudaDeviceSynchronize());
|
||||
dh::safe_cuda(cudaDeviceSynchronize()); // NOLINT
|
||||
|
||||
// add cuts into sketches
|
||||
thrust::copy(cuts_d_.begin(), cuts_d_.end(), cuts_h_.begin());
|
||||
@@ -331,13 +335,11 @@ struct GPUSketcher {
|
||||
void Sketch(const SparsePage& row_batch, const MetaInfo& info) {
|
||||
// copy rows to the device
|
||||
dh::safe_cuda(cudaSetDevice(device_));
|
||||
const auto& offset_vec = row_batch.offset.HostVector();
|
||||
row_ptrs_.resize(n_rows_ + 1);
|
||||
thrust::copy(row_batch.offset.data() + row_begin_,
|
||||
row_batch.offset.data() + row_end_ + 1,
|
||||
row_ptrs_.begin());
|
||||
|
||||
thrust::copy(offset_vec.data() + row_begin_,
|
||||
offset_vec.data() + row_end_ + 1, row_ptrs_.begin());
|
||||
size_t gpu_nbatches = dh::DivRoundUp(n_rows_, gpu_batch_nrows_);
|
||||
|
||||
for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) {
|
||||
SketchBatch(row_batch, info, gpu_batch);
|
||||
}
|
||||
@@ -345,15 +347,13 @@ struct GPUSketcher {
|
||||
};
|
||||
|
||||
void Sketch(const SparsePage& batch, const MetaInfo& info, HistCutMatrix* hmat) {
|
||||
// partition input matrix into row segments
|
||||
std::vector<size_t> row_segments;
|
||||
dh::RowSegments(info.num_row_, devices_.Size(), &row_segments);
|
||||
|
||||
// create device shards
|
||||
shards_.resize(devices_.Size());
|
||||
shards_.resize(dist_.Devices().Size());
|
||||
dh::ExecuteIndexShards(&shards_, [&](int i, std::unique_ptr<DeviceShard>& shard) {
|
||||
size_t start = dist_.ShardStart(info.num_row_, i);
|
||||
size_t size = dist_.ShardSize(info.num_row_, i);
|
||||
shard = std::unique_ptr<DeviceShard>
|
||||
(new DeviceShard(devices_[i], row_segments[i], row_segments[i + 1], param_));
|
||||
(new DeviceShard(dist_.Devices()[i], start, start + size, param_));
|
||||
});
|
||||
|
||||
// compute sketches for each shard
|
||||
@@ -379,12 +379,13 @@ struct GPUSketcher {
|
||||
}
|
||||
|
||||
GPUSketcher(tree::TrainParam param, size_t n_rows) : param_(std::move(param)) {
|
||||
devices_ = GPUSet::Range(param_.gpu_id, dh::NDevices(param_.n_gpus, n_rows));
|
||||
dist_ = GPUDistribution::Block(GPUSet::All(param_.n_gpus, n_rows).
|
||||
Normalised(param_.gpu_id));
|
||||
}
|
||||
|
||||
std::vector<std::unique_ptr<DeviceShard>> shards_;
|
||||
tree::TrainParam param_;
|
||||
GPUSet devices_;
|
||||
GPUDistribution dist_;
|
||||
};
|
||||
|
||||
void DeviceSketch
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user