Compare commits

..

14 Commits

Author SHA1 Message Date
Philip Hyunsu Cho
6852d0afd5 Separate out restricted and unrestricted tasks (#3736) 2018-09-27 23:20:06 -07:00
Philip Hyunsu Cho
c0bd296354 Fix #3730: scikit-learn 0.20 compatibility fix (#3731)
* Fix #3730: scikit-learn 0.20 compatibility fix

sklearn.cross_validation has been removed from scikit-learn 0.20,
so replace it with sklearn.model_selection

* Display test names for Python tests for clarity
2018-09-27 15:04:25 -07:00
Philip Hyunsu Cho
09142c94f5 Disable flaky tests in R-package/tests/testthat/test_update.R (#3723) 2018-09-26 14:57:46 -07:00
Philip Cho
ba4244ef51 Mask tests for 32-bit Windows that fail due to difference between x87 and SSE 2018-09-05 16:18:20 -07:00
Philip Hyunsu Cho
a46b0ac2d2 Fix CRAN check by removing reference to std::cerr (#3660)
* Fix CRAN check by removing reference to std::cerr

* Mask tests that fail on 32-bit Windows R
2018-09-05 12:05:31 -07:00
gorogm
4bc7e94603 Link fixed. (#3640) 2018-09-05 12:04:46 -07:00
Philip Hyunsu Cho
a899e8f4cd Document CUDA requirement, lack of external memory on GPU (#3624)
* Document fact that GPU doesn't support external memory

* Document CUDA requirement
2018-09-05 12:03:46 -07:00
Philip Cho
f9a833f525 Update Python API doc (#3619)
* Show inherited members of XGBRegressor in API doc, since XGBRegressor uses default methods from XGBModel

* Add table of contents to Python API doc

* Skip JVM doc download if not available

* Show inherited members for XGBRegressor

* Add docstring to XGBRegressor.predict()

* Fix rendering errors in Python docstrings

* Fix lint
2018-09-05 12:02:11 -07:00
Grant W Schneider
1afd2f1b2d Remove errant $ (#3618) 2018-09-05 11:55:14 -07:00
Philip Hyunsu Cho
b1d76d533d Fix #3609: Removed unused parameter 'use_buffer' (#3610) 2018-09-05 11:54:54 -07:00
Philip Hyunsu Cho
9d70655c42 Fix #3598: document that custom objective can't contain colon (:) (#3601) 2018-09-05 11:54:19 -07:00
Grace Lam
dd1fda449c Add JSON dump functionality documentation (#3600) 2018-09-05 11:53:31 -07:00
Jakob Richter
324f3b5259 replace nround with nrounds to match actual parameter (#3592) 2018-09-05 11:52:34 -07:00
Philip Cho
24e08c2638 Add version to doc sidebar 2018-08-13 01:46:05 -07:00
304 changed files with 8377 additions and 18881 deletions

View File

@@ -1,4 +1,4 @@
Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
Checks: 'modernize-*,-modernize-make-*,-modernize-raw-string-literal,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
CheckOptions:
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
- { key: readability-identifier-naming.StructCase, value: CamelCase }

32
.github/lock.yml vendored
View File

@@ -1,32 +0,0 @@
# Configuration for lock-threads - https://github.com/dessant/lock-threads
# Number of days of inactivity before a closed issue or pull request is locked
daysUntilLock: 90
# Issues and pull requests with these labels will not be locked. Set to `[]` to disable
exemptLabels:
- feature-request
# Label to add before locking, such as `outdated`. Set to `false` to disable
lockLabel: false
# Comment to post before locking. Set to `false` to disable
lockComment: false
# Assign `resolved` as the reason for locking. Set to `false` to disable
setLockReason: true
# Limit to only `issues` or `pulls`
# only: issues
# Optionally, specify configuration settings just for `issues` or `pulls`
# issues:
# exemptLabels:
# - help-wanted
# lockLabel: outdated
# pulls:
# daysUntilLock: 30
# Repository to extend settings from
# _extends: repo

1
.gitignore vendored
View File

@@ -91,4 +91,3 @@ lib/
metastore_db
plugin/updater_gpu/test/cpp/data
/include/xgboost/build_config.h

View File

@@ -6,7 +6,9 @@ os:
- linux
- osx
osx_image: xcode9.3
osx_image: xcode8
group: deprecated-2017Q4
# Use Build Matrix to do lint and build seperately
env:
@@ -26,8 +28,6 @@ env:
- TASK=cpp_test
# distributed test
- TASK=distributed_test
# address sanitizer test
- TASK=sanitizer_test
matrix:
exclude:
@@ -43,8 +43,6 @@ matrix:
env: TASK=cpp_test
- os: osx
env: TASK=distributed_test
- os: osx
env: TASK=sanitizer_test
# dependent apt packages
addons:
@@ -64,13 +62,6 @@ addons:
- graphviz
- gcc-4.8
- g++-4.8
- gcc-7
- g++-7
homebrew:
packages:
- gcc@7
- graphviz
update: true
before_install:
- source dmlc-core/scripts/travis/travis_setup_env.sh

View File

@@ -8,31 +8,23 @@ set_default_configuration_release()
msvc_use_static_runtime()
# Options
## GPUs
option(USE_CUDA "Build with GPU acceleration" OFF)
option(USE_NCCL "Build with multiple GPUs support" OFF)
option(USE_CUDA "Build with GPU acceleration")
option(USE_AVX "Build with AVX instructions. May not produce identical results due to approximate math." OFF)
option(USE_NCCL "Build using NCCL for multi-GPU. Also requires USE_CUDA")
option(JVM_BINDINGS "Build JVM bindings" OFF)
option(GOOGLE_TEST "Build google tests" OFF)
option(R_LIB "Build shared library for R package" OFF)
option(USE_SANITIZER "Use santizer flags" OFF)
set(GPU_COMPUTE_VER "" CACHE STRING
"Space separated list of compute versions to be built against, e.g. '35 61'")
## Bindings
option(JVM_BINDINGS "Build JVM bindings" OFF)
option(R_LIB "Build shared library for R package" OFF)
## Devs
option(USE_SANITIZER "Use santizer flags" OFF)
option(SANITIZER_PATH "Path to sanitizes.")
set(ENABLED_SANITIZERS "address" "leak" CACHE STRING
"Semicolon separated list of sanitizer names. E.g 'address;leak'. Supported sanitizers are
address, leak and thread.")
option(GOOGLE_TEST "Build google tests" OFF)
# Plugins
option(PLUGIN_LZ4 "Build lz4 plugin" OFF)
option(PLUGIN_DENSE_PARSER "Build dense parser plugin" OFF)
# Deprecation warning
if(USE_AVX)
message(WARNING "The option 'USE_AVX' is deprecated as experimental AVX features have been removed from xgboost.")
if(PLUGIN_UPDATER_GPU)
set(USE_CUDA ON)
message(WARNING "The option 'PLUGIN_UPDATER_GPU' is deprecated. Set 'USE_CUDA' instead.")
endif()
# Compiler flags
@@ -55,32 +47,22 @@ if(WIN32 AND MINGW)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -static-libstdc++")
endif()
# Check existence of software pre-fetching
include(CheckCXXSourceCompiles)
check_cxx_source_compiles("
#include <xmmintrin.h>
int main() {
char data = 0;
const char* address = &data;
_mm_prefetch(address, _MM_HINT_NTA);
return 0;
}
" XGBOOST_MM_PREFETCH_PRESENT)
check_cxx_source_compiles("
int main() {
char data = 0;
const char* address = &data;
__builtin_prefetch(address, 0, 0);
return 0;
}
" XGBOOST_BUILTIN_PREFETCH_PRESENT)
# Sanitizer
if(USE_SANITIZER)
include(cmake/Sanitizer.cmake)
enable_sanitizers("${ENABLED_SANITIZERS}")
endif(USE_SANITIZER)
# AVX
if(USE_AVX)
if(MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX")
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx")
endif()
add_definitions(-DXGBOOST_USE_AVX)
endif()
# dmlc-core
add_subdirectory(dmlc-core)
set(LINK_LIBRARIES dmlc rabit)
@@ -101,19 +83,12 @@ if(R_LIB)
)
endif()
# Gather source files
include_directories (
${PROJECT_SOURCE_DIR}/include
${PROJECT_SOURCE_DIR}/dmlc-core/include
${PROJECT_SOURCE_DIR}/rabit/include
)
# Generate configurable header
set(CMAKE_LOCAL "${PROJECT_SOURCE_DIR}/cmake")
set(INCLUDE_ROOT "${PROJECT_SOURCE_DIR}/include")
message(STATUS "${CMAKE_LOCAL}/build_config.h.in -> ${INCLUDE_ROOT}/xgboost/build_config.h")
configure_file("${CMAKE_LOCAL}/build_config.h.in" "${INCLUDE_ROOT}/xgboost/build_config.h")
file(GLOB_RECURSE SOURCES
src/*.cc
src/*.h
@@ -128,17 +103,8 @@ file(GLOB_RECURSE CUDA_SOURCES
src/*.cuh
)
# Add plugins to source files
if(PLUGIN_LZ4)
list(APPEND SOURCES plugin/lz4/sparse_page_lz4_format.cc)
link_libraries(lz4)
endif()
if(PLUGIN_DENSE_PARSER)
list(APPEND SOURCES plugin/dense_parser/dense_libsvm.cc)
endif()
# rabit
# TODO: Use CMakeLists.txt from rabit.
# TODO: Create rabit cmakelists.txt
set(RABIT_SOURCES
rabit/src/allreduce_base.cc
rabit/src/allreduce_robust.cc
@@ -149,7 +115,6 @@ set(RABIT_EMPTY_SOURCES
rabit/src/engine_empty.cc
rabit/src/c_api.cc
)
if(MINGW OR R_LIB)
# build a dummy rabit library
add_library(rabit STATIC ${RABIT_EMPTY_SOURCES})
@@ -157,11 +122,7 @@ else()
add_library(rabit STATIC ${RABIT_SOURCES})
endif()
if (GENERATE_COMPILATION_DATABASE)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
endif (GENERATE_COMPILATION_DATABASE)
if(USE_CUDA AND (NOT GENERATE_COMPILATION_DATABASE))
if(USE_CUDA)
find_package(CUDA 8.0 REQUIRED)
cmake_minimum_required(VERSION 3.5)
@@ -171,7 +132,7 @@ if(USE_CUDA AND (NOT GENERATE_COMPILATION_DATABASE))
if(USE_NCCL)
find_package(Nccl REQUIRED)
cuda_include_directories(${NCCL_INCLUDE_DIR})
include_directories(${NCCL_INCLUDE_DIR})
add_definitions(-DXGBOOST_USE_NCCL)
endif()
@@ -191,39 +152,6 @@ if(USE_CUDA AND (NOT GENERATE_COMPILATION_DATABASE))
target_link_libraries(gpuxgboost ${NCCL_LIB_NAME})
endif()
list(APPEND LINK_LIBRARIES gpuxgboost)
elseif (USE_CUDA AND GENERATE_COMPILATION_DATABASE)
# Enable CUDA language to generate a compilation database.
cmake_minimum_required(VERSION 3.8)
find_package(CUDA 8.0 REQUIRED)
enable_language(CUDA)
set(CMAKE_CUDA_COMPILER clang++)
set(CUDA_SEPARABLE_COMPILATION ON)
if (NOT CLANG_CUDA_GENCODE)
set(CLANG_CUDA_GENCODE "--cuda-gpu-arch=sm_35")
endif (NOT CLANG_CUDA_GENCODE)
set(CMAKE_CUDA_FLAGS " -Wno-deprecated ${CLANG_CUDA_GENCODE} -fPIC ${GENCODE} -std=c++11 -x cuda")
message(STATUS "CMAKE_CUDA_FLAGS: ${CMAKE_CUDA_FLAGS}")
add_library(gpuxgboost STATIC ${CUDA_SOURCES})
if(USE_NCCL)
find_package(Nccl REQUIRED)
target_include_directories(gpuxgboost PUBLIC ${NCCL_INCLUDE_DIR})
target_compile_definitions(gpuxgboost PUBLIC -DXGBOOST_USE_NCCL)
target_link_libraries(gpuxgboost PUBLIC ${NCCL_LIB_NAME})
endif()
target_compile_definitions(gpuxgboost PUBLIC -DXGBOOST_USE_CUDA)
# A hack for CMake to make arguments valid for clang++
string(REPLACE "-x cu" "-x cuda" CMAKE_CUDA_COMPILE_PTX_COMPILATION
${CMAKE_CUDA_COMPILE_PTX_COMPILATION})
string(REPLACE "-x cu" "-x cuda" CMAKE_CUDA_COMPILE_WHOLE_COMPILATION
${CMAKE_CUDA_COMPILE_WHOLE_COMPILATION})
string(REPLACE "-x cu" "-x cuda" CMAKE_CUDA_COMPILE_SEPARABLE_COMPILATION
${CMAKE_CUDA_COMPILE_SEPARABLE_COMPILATION})
target_include_directories(gpuxgboost PUBLIC cub)
endif()
@@ -239,6 +167,7 @@ endif()
add_library(objxgboost OBJECT ${SOURCES})
# building shared library for R package
if(R_LIB)
find_package(LibR REQUIRED)
@@ -246,25 +175,22 @@ if(R_LIB)
list(APPEND LINK_LIBRARIES "${LIBR_CORE_LIBRARY}")
MESSAGE(STATUS "LIBR_CORE_LIBRARY " ${LIBR_CORE_LIBRARY})
# Shared library target for the R package
add_library(xgboost SHARED $<TARGET_OBJECTS:objxgboost>)
include_directories(xgboost
include_directories(
"${LIBR_INCLUDE_DIRS}"
"${PROJECT_SOURCE_DIR}"
)
# Shared library target for the R package
add_library(xgboost SHARED $<TARGET_OBJECTS:objxgboost>)
target_link_libraries(xgboost ${LINK_LIBRARIES})
# R uses no lib prefix in shared library names of its packages
set_target_properties(xgboost PROPERTIES PREFIX "")
if(APPLE)
set_target_properties(xgboost PROPERTIES SUFFIX ".so")
endif()
setup_rpackage_install_target(xgboost ${CMAKE_CURRENT_BINARY_DIR})
# use a dummy location for any other remaining installs
set(CMAKE_INSTALL_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/dummy_inst")
# main targets: shared library & exe
# main targets: shared library & exe
else()
# Executable
add_executable(runxgboost $<TARGET_OBJECTS:objxgboost> src/cli_main.cc)
@@ -287,20 +213,20 @@ else()
add_dependencies(xgboost runxgboost)
endif()
# JVM
if(JVM_BINDINGS)
find_package(JNI QUIET REQUIRED)
include_directories(${JNI_INCLUDE_DIRS} jvm-packages/xgboost4j/src/native)
add_library(xgboost4j SHARED
$<TARGET_OBJECTS:objxgboost>
jvm-packages/xgboost4j/src/native/xgboost4j.cpp)
target_include_directories(xgboost4j
PRIVATE ${JNI_INCLUDE_DIRS}
PRIVATE jvm-packages/xgboost4j/src/native)
target_link_libraries(xgboost4j
${LINK_LIBRARIES}
${JAVA_JVM_LIBRARY})
$<TARGET_OBJECTS:objxgboost>
jvm-packages/xgboost4j/src/native/xgboost4j.cpp)
set_output_directory(xgboost4j ${PROJECT_SOURCE_DIR}/lib)
target_link_libraries(xgboost4j
${LINK_LIBRARIES}
${JAVA_JVM_LIBRARY})
endif()
@@ -311,29 +237,17 @@ if(GOOGLE_TEST)
file(GLOB_RECURSE TEST_SOURCES "tests/cpp/*.cc")
auto_source_group("${TEST_SOURCES}")
include_directories(${GTEST_INCLUDE_DIRS})
if(USE_CUDA AND (NOT GENERATE_COMPILATION_DATABASE))
if(USE_CUDA)
file(GLOB_RECURSE CUDA_TEST_SOURCES "tests/cpp/*.cu")
cuda_include_directories(${GTEST_INCLUDE_DIRS})
cuda_compile(CUDA_TEST_OBJS ${CUDA_TEST_SOURCES})
elseif (USE_CUDA AND GENERATE_COMPILATION_DATABASE)
file(GLOB_RECURSE CUDA_TEST_SOURCES "tests/cpp/*.cu")
else()
set(CUDA_TEST_OBJS "")
endif()
if (USE_CUDA AND GENERATE_COMPILATION_DATABASE)
add_executable(testxgboost ${TEST_SOURCES} ${CUDA_TEST_SOURCES}
$<TARGET_OBJECTS:objxgboost>)
target_include_directories(testxgboost PRIVATE cub)
else ()
add_executable(testxgboost ${TEST_SOURCES} ${CUDA_TEST_OBJS}
$<TARGET_OBJECTS:objxgboost>)
endif ()
add_executable(testxgboost ${TEST_SOURCES} ${CUDA_TEST_OBJS} $<TARGET_OBJECTS:objxgboost>)
set_output_directory(testxgboost ${PROJECT_SOURCE_DIR})
target_include_directories(testxgboost
PRIVATE ${GTEST_INCLUDE_DIRS})
target_link_libraries(testxgboost ${GTEST_LIBRARIES} ${LINK_LIBRARIES})
add_test(TestXGBoost testxgboost)

View File

@@ -6,30 +6,21 @@ Committers
----------
Committers are people who have made substantial contribution to the project and granted write access to the project.
* [Tianqi Chen](https://github.com/tqchen), University of Washington
- Tianqi is a Ph.D. student working on large-scale machine learning. He is the creator of the project.
- Tianqi is a PhD working on large-scale machine learning, he is the creator of the project.
* [Tong He](https://github.com/hetong007), Amazon AI
- Tong is an applied scientist in Amazon AI. He is the maintainer of XGBoost R package.
- Tong is an applied scientist in Amazon AI, he is the maintainer of xgboost R package.
* [Vadim Khotilovich](https://github.com/khotilov)
- Vadim contributes many improvements in R and core packages.
* [Bing Xu](https://github.com/antinucleon)
- Bing is the original creator of XGBoost Python package and currently the maintainer of [XGBoost.jl](https://github.com/antinucleon/XGBoost.jl).
- Bing is the original creator of xgboost python package and currently the maintainer of [XGBoost.jl](https://github.com/antinucleon/XGBoost.jl).
* [Michael Benesty](https://github.com/pommedeterresautee)
- Michael is a lawyer and data scientist in France. He is the creator of XGBoost interactive analysis module in R.
* [Yuan Tang](https://github.com/terrytangyuan), Ant Financial
- Yuan is a software engineer in Ant Financial. He contributed mostly in R and Python packages.
* [Nan Zhu](https://github.com/CodingCat), Uber
- Nan is a software engineer in Uber. He contributed mostly in JVM packages.
* [Sergei Lebedev](https://github.com/superbobry), Criteo
- Sergei is a software engineer in Criteo. He contributed mostly in JVM packages.
* [Hongliang Liu](https://github.com/phunterlau)
* [Scott Lundberg](http://scottlundberg.com/), University of Washington
- Scott is a Ph.D. student at University of Washington. He is the creator of SHAP, a unified approach to explain the output of machine learning models such as decision tree ensembles. He also helps maintain the XGBoost Julia package.
* [Rory Mitchell](https://github.com/RAMitchell), University of Waikato
- Rory is a Ph.D. student at University of Waikato. He is the original creator of the GPU training algorithms. He improved the CMake build system and continuous integration.
* [Hyunsu Cho](http://hyunsu-cho.io/), Amazon AI
- Hyunsu is an applied scientist in Amazon AI. He is the maintainer of the XGBoost Python package. He also manages the Jenkins continuous integration system (https://xgboost-ci.net/). He is the initial author of the CPU 'hist' updater.
* [Jiaming](https://github.com/trivialfis)
- Jiaming contributed to the GPU algorithms. He has also introduced new abstractions to improve the quality of the C++ codebase.
- Micheal is a lawyer, data scientist in France, he is the creator of xgboost interactive analysis module in R.
* [Yuan Tang](https://github.com/terrytangyuan)
- Yuan is a data scientist in Chicago, US. He contributed mostly in R and Python packages.
* [Nan Zhu](https://github.com/CodingCat)
- Nan is a software engineer in Microsoft. He contributed mostly in JVM packages.
* [Sergei Lebedev](https://github.com/superbobry)
- Serget is a software engineer in Criteo. He contributed mostly in JVM packages.
Become a Committer
------------------
@@ -45,25 +36,28 @@ List of Contributors
* [Full List of Contributors](https://github.com/dmlc/xgboost/graphs/contributors)
- To contributors: please add your name to the list when you submit a patch to the project:)
* [Kailong Chen](https://github.com/kalenhaha)
- Kailong is an early contributor of XGBoost, he is creator of ranking objectives in XGBoost.
- Kailong is an early contributor of xgboost, he is creator of ranking objectives in xgboost.
* [Skipper Seabold](https://github.com/jseabold)
- Skipper is the major contributor to the scikit-learn module of XGBoost.
- Skipper is the major contributor to the scikit-learn module of xgboost.
* [Zygmunt Zając](https://github.com/zygmuntz)
- Zygmunt is the master behind the early stopping feature frequently used by kagglers.
* [Ajinkya Kale](https://github.com/ajkl)
* [Boliang Chen](https://github.com/cblsjtu)
* [Yangqing Men](https://github.com/yanqingmen)
- Yangqing is the creator of XGBoost java package.
- Yangqing is the creator of xgboost java package.
* [Engpeng Yao](https://github.com/yepyao)
* [Giulio](https://github.com/giuliohome)
- Giulio is the creator of Windows project of XGBoost
- Giulio is the creator of windows project of xgboost
* [Jamie Hall](https://github.com/nerdcha)
- Jamie is the initial creator of XGBoost scikit-learn module.
- Jamie is the initial creator of xgboost sklearn module.
* [Yen-Ying Lee](https://github.com/white1033)
* [Masaaki Horikoshi](https://github.com/sinhrks)
- Masaaki is the initial creator of XGBoost Python plotting module.
- Masaaki is the initial creator of xgboost python plotting module.
* [Hongliang Liu](https://github.com/phunterlau)
* [Hyunsu Cho](http://hyunsu-cho.io/)
- Hyunsu is the maintainer of the XGBoost Python package. He is in charge of submitting the Python package to Python Package Index (PyPI). He is also the initial author of the CPU 'hist' updater.
* [daiyl0320](https://github.com/daiyl0320)
- daiyl0320 contributed patch to XGBoost distributed version more robust, and scales stably on TB scale datasets.
- daiyl0320 contributed patch to xgboost distributed version more robust, and scales stably on TB scale datasets.
* [Huayi Zhang](https://github.com/irachex)
* [Johan Manders](https://github.com/johanmanders)
* [yoori](https://github.com/yoori)
@@ -74,6 +68,8 @@ List of Contributors
* [Alex Bain](https://github.com/convexquad)
* [Baltazar Bieniek](https://github.com/bbieniek)
* [Adam Pocock](https://github.com/Craigacp)
* [Rory Mitchell](https://github.com/RAMitchell)
- Rory is the author of the GPU plugin and also contributed the cmake build system and windows continuous integration
* [Gideon Whitehead](https://github.com/gaw89)
* [Yi-Lin Juang](https://github.com/frankyjuang)
* [Andrew Hannigan](https://github.com/andrewhannigan)
@@ -82,9 +78,3 @@ List of Contributors
* [Pierre de Sahb](https://github.com/pdesahb)
* [liuliang01](https://github.com/liuliang01)
- liuliang01 added support for the qid column for LibSVM input format. This makes ranking task easier in distributed setting.
* [Andrew Thia](https://github.com/BlueTea88)
- Andrew Thia implemented feature interaction constraints
* [Wei Tian](https://github.com/weitian)
* [Chen Qin](https://github.com/chenqin)
* [Sam Wilkinson](https://samwilkinson.io)
* [Matthew Jones](https://github.com/mt-jones)

63
Jenkinsfile vendored
View File

@@ -14,7 +14,6 @@ def dockerRun = 'tests/ci_build/ci_build.sh'
def utils
def buildMatrix = [
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "9.2", "multiGpu": true],
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "9.2" ],
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": false, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
@@ -53,7 +52,7 @@ pipeline {
parallel (buildMatrix.findAll{it['enabled']}.collectEntries{ c ->
def buildName = utils.getBuildName(c)
utils.buildFactory(buildName, c, false, this.&buildPlatformCmake)
} + [ "clang-tidy" : { buildClangTidyJob() } ])
})
}
}
}
@@ -68,60 +67,22 @@ def buildPlatformCmake(buildName, conf, nodeReq, dockerTarget) {
// Destination dir for artifacts
def distDir = "dist/${buildName}"
def dockerArgs = ""
if (conf["withGpu"]) {
if(conf["withGpu"]){
dockerArgs = "--build-arg CUDA_VERSION=" + conf["cudaVersion"]
}
def test_suite = conf["withGpu"] ? (conf["multiGpu"] ? "mgpu" : "gpu") : "cpu"
// Build node - this is returned result
retry(1) {
node(nodeReq) {
unstash name: 'srcs'
echo """
|===== XGBoost CMake build =====
| dockerTarget: ${dockerTarget}
| cmakeOpts : ${opts}
|=========================
""".stripMargin('|')
// Invoke command inside docker
sh """
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/build_via_cmake.sh ${opts}
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/test_${test_suite}.sh
"""
if (!conf["multiGpu"]) {
sh """
${dockerRun} ${dockerTarget} ${dockerArgs} bash -c "cd python-package; rm -f dist/*; python setup.py bdist_wheel --universal"
rm -rf "${distDir}"; mkdir -p "${distDir}/py"
cp xgboost "${distDir}"
cp -r python-package/dist "${distDir}/py"
# Test the wheel for compatibility on a barebones CPU container
${dockerRun} release ${dockerArgs} bash -c " \
pip install --user python-package/dist/xgboost-*-none-any.whl && \
pytest -v --fulltrace -s tests/python"
# Test the wheel for compatibility on CUDA 10.0 container
${dockerRun} gpu --build-arg CUDA_VERSION=10.0 bash -c " \
pip install --user python-package/dist/xgboost-*-none-any.whl && \
pytest -v -s --fulltrace -m '(not mgpu) and (not slow)' tests/python-gpu"
"""
}
}
}
}
/**
* Run a clang-tidy job on a GPU machine
*/
def buildClangTidyJob() {
def nodeReq = "linux && gpu && unrestricted"
node(nodeReq) {
unstash name: 'srcs'
echo "Running clang-tidy job..."
echo """
|===== XGBoost CMake build =====
| dockerTarget: ${dockerTarget}
| cmakeOpts : ${opts}
|=========================
""".stripMargin('|')
// Invoke command inside docker
// Install Google Test and Python yaml
dockerTarget = "clang_tidy"
dockerArgs = "--build-arg CUDA_VERSION=9.2"
sh """
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/clang_tidy.sh
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/build_via_cmake.sh ${opts}
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/test_${dockerTarget}.sh
"""
}
}
}
}

View File

@@ -13,10 +13,6 @@ def dockerRun = 'tests/ci_build/ci_build.sh'
// Utility functions
@Field
def utils
@Field
def commit_id
@Field
def branch_name
def buildMatrix = [
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "9.2" ],
@@ -46,28 +42,27 @@ pipeline {
script {
utils = load('tests/ci_build/jenkins_tools.Groovy')
utils.checkoutSrcs()
commit_id = "${GIT_COMMIT}"
branch_name = "${GIT_LOCAL_BRANCH}"
}
stash name: 'srcs', excludes: '.git/'
milestone label: 'Sources ready', ordinal: 1
}
}
stage('Jenkins: Build doc') {
agent {
label 'linux && cpu && restricted'
}
steps {
unstash name: 'srcs'
script {
retry(1) {
node('linux && cpu && restricted') {
unstash name: 'srcs'
echo 'Building doc...'
dir ('jvm-packages') {
sh "bash ./build_doc.sh ${commit_id}"
archiveArtifacts artifacts: "${commit_id}.tar.bz2", allowEmptyArchive: true
echo 'Deploying doc...'
withAWS(credentials:'xgboost-doc-bucket') {
s3Upload file: "${commit_id}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${branch_name}.tar.bz2"
}
}
def commit_id = "${GIT_COMMIT}"
def branch_name = "${GIT_LOCAL_BRANCH}"
echo 'Building doc...'
dir ('jvm-packages') {
sh "bash ./build_doc.sh ${commit_id}"
archiveArtifacts artifacts: "${commit_id}.tar.bz2", allowEmptyArchive: true
echo 'Deploying doc...'
withAWS(credentials:'xgboost-doc-bucket') {
s3Upload file: "${commit_id}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${branch_name}.tar.bz2"
}
}
}
@@ -99,25 +94,28 @@ def buildPlatformCmake(buildName, conf, nodeReq, dockerTarget) {
dockerArgs = "--build-arg CUDA_VERSION=" + conf["cudaVersion"]
}
// Build node - this is returned result
retry(1) {
node(nodeReq) {
unstash name: 'srcs'
echo """
|===== XGBoost CMake build =====
| dockerTarget: ${dockerTarget}
| cmakeOpts : ${opts}
|=========================
""".stripMargin('|')
// Invoke command inside docker
sh """
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/build_via_cmake.sh ${opts}
${dockerRun} ${dockerTarget} ${dockerArgs} bash -c "cd python-package; rm -f dist/*; python setup.py bdist_wheel --universal"
rm -rf "${distDir}"; mkdir -p "${distDir}/py"
cp xgboost "${distDir}"
cp -r lib "${distDir}"
cp -r python-package/dist "${distDir}/py"
"""
archiveArtifacts artifacts: "${distDir}/**/*.*", allowEmptyArchive: true
}
node(nodeReq) {
unstash name: 'srcs'
echo """
|===== XGBoost CMake build =====
| dockerTarget: ${dockerTarget}
| cmakeOpts : ${opts}
|=========================
""".stripMargin('|')
// Invoke command inside docker
sh """
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/build_via_cmake.sh ${opts}
${dockerRun} ${dockerTarget} ${dockerArgs} bash -c "cd python-package; rm -f dist/*; python setup.py bdist_wheel --universal"
rm -rf "${distDir}"; mkdir -p "${distDir}/py"
cp xgboost "${distDir}"
cp -r lib "${distDir}"
cp -r python-package/dist "${distDir}/py"
# Test the wheel for compatibility on a barebones CPU container
${dockerRun} release ${dockerArgs} bash -c " \
auditwheel show xgboost-*-py2-none-any.whl
pip install --user python-package/dist/xgboost-*-none-any.whl && \
python -m nose tests/python"
"""
archiveArtifacts artifacts: "${distDir}/**/*.*", allowEmptyArchive: true
}
}

208
LICENSE
View File

@@ -1,201 +1,13 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
Copyright (c) 2016 by Contributors
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
1. Definitions.
http://www.apache.org/licenses/LICENSE-2.0
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright (c) 2018 by Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -260,8 +260,7 @@ Rpack: clean_all
cp ./LICENSE xgboost
cat R-package/src/Makevars.in|sed '2s/.*/PKGROOT=./' | sed '3s/.*/ENABLE_STD_THREAD=0/' > xgboost/src/Makevars.in
cp xgboost/src/Makevars.in xgboost/src/Makevars.win
sed -i -e 's/@OPENMP_CXXFLAGS@/$$\(SHLIB_OPENMP_CXXFLAGS\)/g' xgboost/src/Makevars.win
sed -i -e 's/-pthread/$$\(SHLIB_PTHREAD_FLAGS\)/g' xgboost/src/Makevars.win
sed -i -e 's/@OPENMP_CXXFLAGS@/$$\(SHLIB_OPENMP_CFLAGS\)/g' xgboost/src/Makevars.win
bash R-package/remove_warning_suppression_pragma.sh
rm xgboost/remove_warning_suppression_pragma.sh

331
NEWS.md
View File

@@ -3,331 +3,6 @@ XGBoost Change Log
This file records the changes in xgboost library in reverse chronological order.
## v0.82 (2019.03.03)
This release is packed with many new features and bug fixes.
### Roadmap: better performance scaling for multi-core CPUs (#3957)
* Poor performance scaling of the `hist` algorithm for multi-core CPUs has been under investigation (#3810). #3957 marks an important step toward better performance scaling, by using software pre-fetching and replacing STL vectors with C-style arrays. Special thanks to @Laurae2 and @SmirnovEgorRu.
* See #3810 for latest progress on this roadmap.
### New feature: Distributed Fast Histogram Algorithm (`hist`) (#4011, #4102, #4140, #4128)
* It is now possible to run the `hist` algorithm in distributed setting. Special thanks to @CodingCat. The benefits include:
1. Faster local computation via feature binning
2. Support for monotonic constraints and feature interaction constraints
3. Simpler codebase than `approx`, allowing for future improvement
* Depth-wise tree growing is now performed in a separate code path, so that cross-node syncronization is performed only once per level.
### New feature: Multi-Node, Multi-GPU training (#4095)
* Distributed training is now able to utilize clusters equipped with NVIDIA GPUs. In particular, the rabit AllReduce layer will communicate GPU device information. Special thanks to @mt-jones, @RAMitchell, @rongou, @trivialfis, @canonizer, and @jeffdk.
* Resource management systems will be able to assign a rank for each GPU in the cluster.
* In Dask, users will be able to construct a collection of XGBoost processes over an inhomogeneous device cluster (i.e. workers with different number and/or kinds of GPUs).
### New feature: Multiple validation datasets in XGBoost4J-Spark (#3904, #3910)
* You can now track the performance of the model during training with multiple evaluation datasets. By specifying `eval_sets` or call `setEvalSets` over a `XGBoostClassifier` or `XGBoostRegressor`, you can pass in multiple evaluation datasets typed as a `Map` from `String` to `DataFrame`. Special thanks to @CodingCat.
* See the usage of multiple validation datasets [here](https://github.com/dmlc/xgboost/blob/0c1d5f1120c0a159f2567b267f0ec4ffadee00d0/jvm-packages/xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/spark/SparkTraining.scala#L66-L78)
### New feature: Additional metric functions for GPUs (#3952)
* Element-wise metrics have been ported to GPU: `rmse`, `mae`, `logloss`, `poisson-nloglik`, `gamma-deviance`, `gamma-nloglik`, `error`, `tweedie-nloglik`. Special thanks to @trivialfis and @RAMitchell.
* With supported metrics, XGBoost will select the correct devices based on your system and `n_gpus` parameter.
### New feature: Column sampling at individual nodes (splits) (#3971)
* Columns (features) can now be sampled at individual tree nodes, in addition to per-tree and per-level sampling. To enable per-node sampling, set `colsample_bynode` parameter, which represents the fraction of columns sampled at each node. This parameter is set to 1.0 by default (i.e. no sampling per node). Special thanks to @canonizer.
* The `colsample_bynode` parameter works cumulatively with other `colsample_by*` parameters: for example, `{'colsample_bynode':0.5, 'colsample_bytree':0.5}` with 100 columns will give 25 features to choose from at each split.
### Major API change: consistent logging level via `verbosity` (#3982, #4002, #4138)
* XGBoost now allows fine-grained control over logging. You can set `verbosity` to 0 (silent), 1 (warning), 2 (info), and 3 (debug). This is useful for controlling the amount of logging outputs. Special thanks to @trivialfis.
* Parameters `silent` and `debug_verbose` are now deprecated.
* Note: Sometimes XGBoost tries to change configurations based on heuristics, which is displayed as warning message. If there's unexpected behaviour, please try to increase value of verbosity.
### Major bug fix: external memory (#4040, #4193)
* Clarify object ownership in multi-threaded prefetcher, to avoid memory error.
* Correctly merge two column batches (which uses [CSC layout](https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_column_(CSC_or_CCS))).
* Add unit tests for external memory.
* Special thanks to @trivialfis and @hcho3.
### Major bug fix: early stopping fixed in XGBoost4J and XGBoost4J-Spark (#3928, #4176)
* Early stopping in XGBoost4J and XGBoost4J-Spark is now consistent with its counterpart in the Python package. Training stops if the current iteration is `earlyStoppingSteps` away from the best iteration. If there are multiple evaluation sets, only the last one is used to determinate early stop.
* See the updated documentation [here](https://xgboost.readthedocs.io/en/release_0.82/jvm/xgboost4j_spark_tutorial.html#early-stopping)
* Special thanks to @CodingCat, @yanboliang, and @mingyang.
### Major bug fix: infrequent features should not crash distributed training (#4045)
* For infrequently occuring features, some partitions may not get any instance. This scenario used to crash distributed training due to mal-formed ranges. The problem has now been fixed.
* In practice, one-hot-encoded categorical variables tend to produce rare features, particularly when the cardinality is high.
* Special thanks to @CodingCat.
### Performance improvements
* Faster, more space-efficient radix sorting in `gpu_hist` (#3895)
* Subtraction trick in histogram calculation in `gpu_hist` (#3945)
* More performant re-partition in XGBoost4J-Spark (#4049)
### Bug-fixes
* Fix semantics of `gpu_id` when running multiple XGBoost processes on a multi-GPU machine (#3851)
* Fix page storage path for external memory on Windows (#3869)
* Fix configuration setup so that DART utilizes GPU (#4024)
* Eliminate NAN values from SHAP prediction (#3943)
* Prevent empty quantile sketches in `hist` (#4155)
* Enable running objectives with 0 GPU (#3878)
* Parameters are no longer dependent on system locale (#3891, #3907)
* Use consistent data type in the GPU coordinate descent code (#3917)
* Remove undefined behavior in the CLI config parser on the ARM platform (#3976)
* Initialize counters in GPU AllReduce (#3987)
* Prevent deadlocks in GPU AllReduce (#4113)
* Load correct values from sliced NumPy arrays (#4147, #4165)
* Fix incorrect GPU device selection (#4161)
* Make feature binning logic in `hist` aware of query groups when running a ranking task (#4115). For ranking task, query groups are weighted, not individual instances.
* Generate correct C++ exception type for `LOG(FATAL)` macro (#4159)
* Python package
- Python package should run on system without `PATH` environment variable (#3845)
- Fix `coef_` and `intercept_` signature to be compatible with `sklearn.RFECV` (#3873)
- Use UTF-8 encoding in Python package README, to support non-English locale (#3867)
- Add AUC-PR to list of metrics to maximize for early stopping (#3936)
- Allow loading pickles without `self.booster` attribute, for backward compatibility (#3938, #3944)
- White-list DART for feature importances (#4073)
- Update usage of [h2oai/datatable](https://github.com/h2oai/datatable) (#4123)
* XGBoost4J-Spark
- Address scalability issue in prediction (#4033)
- Enforce the use of per-group weights for ranking task (#4118)
- Fix vector size of `rawPredictionCol` in `XGBoostClassificationModel` (#3932)
- More robust error handling in Spark tracker (#4046, #4108)
- Fix return type of `setEvalSets` (#4105)
- Return correct value of `getMaxLeaves` (#4114)
### API changes
* Add experimental parameter `single_precision_histogram` to use single-precision histograms for the `gpu_hist` algorithm (#3965)
* Python package
- Add option to select type of feature importances in the scikit-learn inferface (#3876)
- Add `trees_to_df()` method to dump decision trees as Pandas data frame (#4153)
- Add options to control node shapes in the GraphViz plotting function (#3859)
- Add `xgb_model` option to `XGBClassifier`, to load previously saved model (#4092)
- Passing lists into `DMatrix` is now deprecated (#3970)
* XGBoost4J
- Support multiple feature importance features (#3801)
### Maintenance: Refactor C++ code for legibility and maintainability
* Refactor `hist` algorithm code and add unit tests (#3836)
* Minor refactoring of split evaluator in `gpu_hist` (#3889)
* Removed unused leaf vector field in the tree model (#3989)
* Simplify the tree representation by combining `TreeModel` and `RegTree` classes (#3995)
* Simplify and harden tree expansion code (#4008, #4015)
* De-duplicate parameter classes in the linear model algorithms (#4013)
* Robust handling of ranges with C++20 span in `gpu_exact` and `gpu_coord_descent` (#4020, #4029)
* Simplify tree training code (#3825). Also use Span class for robust handling of ranges.
### Maintenance: testing, continuous integration, build system
* Disallow `std::regex` since it's not supported by GCC 4.8.x (#3870)
* Add multi-GPU tests for coordinate descent algorithm for linear models (#3893, #3974)
* Enforce naming style in Python lint (#3896)
* Refactor Python tests (#3897, #3901): Use pytest exclusively, display full trace upon failure
* Address `DeprecationWarning` when using Python collections (#3909)
* Use correct group for maven site plugin (#3937)
* Jenkins CI is now using on-demand EC2 instances exclusively, due to unreliability of Spot instances (#3948)
* Better GPU performance logging (#3945)
* Fix GPU tests on machines with only 1 GPU (#4053)
* Eliminate CRAN check warnings and notes (#3988)
* Add unit tests for tree serialization (#3989)
* Add unit tests for tree fitting functions in `hist` (#4155)
* Add a unit test for `gpu_exact` algorithm (#4020)
* Correct JVM CMake GPU flag (#4071)
* Fix failing Travis CI on Mac (#4086)
* Speed up Jenkins by not compiling CMake (#4099)
* Analyze C++ and CUDA code using clang-tidy, as part of Jenkins CI pipeline (#4034)
* Fix broken R test: Install Homebrew GCC (#4142)
* Check for empty datasets in GPU unit tests (#4151)
* Fix Windows compilation (#4139)
* Comply with latest convention of cpplint (#4157)
* Fix a unit test in `gpu_hist` (#4158)
* Speed up data generation in Python tests (#4164)
### Usability Improvements
* Add link to [InfoWorld 2019 Technology of the Year Award](https://www.infoworld.com/article/3336072/application-development/infoworlds-2019-technology-of-the-year-award-winners.html) (#4116)
* Remove outdated AWS YARN tutorial (#3885)
* Document current limitation in number of features (#3886)
* Remove unnecessary warning when `gblinear` is selected (#3888)
* Document limitation of CSV parser: header not supported (#3934)
* Log training parameters in XGBoost4J-Spark (#4091)
* Clarify early stopping behavior in the scikit-learn interface (#3967)
* Clarify behavior of `max_depth` parameter (#4078)
* Revise Python docstrings for ranking task (#4121). In particular, weights must be per-group in learning-to-rank setting.
* Document parameter `num_parallel_tree` (#4022)
* Add Jenkins status badge (#4090)
* Warn users against using internal functions of `Booster` object (#4066)
* Reformat `benchmark_tree.py` to comply with Python style convention (#4126)
* Clarify a comment in `objectiveTrait` (#4174)
* Fix typos and broken links in documentation (#3890, #3872, #3902, #3919, #3975, #4027, #4156, #4167)
### Acknowledgement
**Contributors** (in no particular order): Jiaming Yuan (@trivialfis), Hyunsu Cho (@hcho3), Nan Zhu (@CodingCat), Rory Mitchell (@RAMitchell), Yanbo Liang (@yanboliang), Andy Adinets (@canonizer), Tong He (@hetong007), Yuan Tang (@terrytangyuan)
**First-time Contributors** (in no particular order): Jelle Zijlstra (@JelleZijlstra), Jiacheng Xu (@jiachengxu), @ajing, Kashif Rasul (@kashif), @theycallhimavi, Joey Gao (@pjgao), Prabakaran Kumaresshan (@nixphix), Huafeng Wang (@huafengw), @lyxthe, Sam Wilkinson (@scwilkinson), Tatsuhito Kato (@stabacov), Shayak Banerjee (@shayakbanerjee), Kodi Arfer (@Kodiologist), @KyleLi1985, Egor Smirnov (@SmirnovEgorRu), @tmitanitky, Pasha Stetsenko (@st-pasha), Kenichi Nagahara (@keni-chi), Abhai Kollara Dilip (@abhaikollara), Patrick Ford (@pford221), @hshujuan, Matthew Jones (@mt-jones), Thejaswi Rao (@teju85), Adam November (@anovember)
**First-time Reviewers** (in no particular order): Mingyang Hu (@mingyang), Theodore Vasiloudis (@thvasilo), Jakub Troszok (@troszok), Rong Ou (@rongou), @Denisevi4, Matthew Jones (@mt-jones), Jeff Kaplan (@jeffdk)
## v0.81 (2018.11.04)
### New feature: feature interaction constraints
* Users are now able to control which features (independent variables) are allowed to interact by specifying feature interaction constraints (#3466).
* [Tutorial](https://xgboost.readthedocs.io/en/release_0.81/tutorials/feature_interaction_constraint.html) is available, as well as [R](https://github.com/dmlc/xgboost/blob/9254c58e4dfff6a59dc0829a2ceb02e45ed17cd0/R-package/demo/interaction_constraints.R) and [Python](https://github.com/dmlc/xgboost/blob/9254c58e4dfff6a59dc0829a2ceb02e45ed17cd0/tests/python/test_interaction_constraints.py) examples.
### New feature: learning to rank using scikit-learn interface
* Learning to rank task is now available for the scikit-learn interface of the Python package (#3560, #3848). It is now possible to integrate the XGBoost ranking model into the scikit-learn learning pipeline.
* Examples of using `XGBRanker` class is found at [demo/rank/rank_sklearn.py](https://github.com/dmlc/xgboost/blob/24a268a2e3cb17302db3d72da8f04016b7d352d9/demo/rank/rank_sklearn.py).
### New feature: R interface for SHAP interactions
* SHAP (SHapley Additive exPlanations) is a unified approach to explain the output of any machine learning model. Previously, this feature was only available from the Python package; now it is available from the R package as well (#3636).
### New feature: GPU predictor now use multiple GPUs to predict
* GPU predictor is now able to utilize multiple GPUs at once to accelerate prediction (#3738)
### New feature: Scale distributed XGBoost to large-scale clusters
* Fix OS file descriptor limit assertion error on large cluster (#3835, dmlc/rabit#73) by replacing `select()` based AllReduce/Broadcast with `poll()` based implementation.
* Mitigate tracker "thundering herd" issue on large cluster. Add exponential backoff retry when workers connect to tracker.
* With this change, we were able to scale to 1.5k executors on a 12 billion row dataset after some tweaks here and there.
### New feature: Additional objective functions for GPUs
* New objective functions ported to GPU: `hinge`, `multi:softmax`, `multi:softprob`, `count:poisson`, `reg:gamma`, `"reg:tweedie`.
* With supported objectives, XGBoost will select the correct devices based on your system and `n_gpus` parameter.
### Major bug fix: learning to rank with XGBoost4J-Spark
* Previously, `repartitionForData` would shuffle data and lose ordering necessary for ranking task.
* To fix this issue, data points within each RDD partition is explicitly group by their group (query session) IDs (#3654). Also handle empty RDD partition carefully (#3750).
### Major bug fix: early stopping fixed in XGBoost4J-Spark
* Earlier implementation of early stopping had incorrect semantics and didn't let users to specify direction for optimizing (maximize / minimize)
* A parameter `maximize_evaluation_metrics` is defined so as to tell whether a metric should be maximized or minimized as part of early stopping criteria (#3808). Also early stopping now has correct semantics.
### API changes
* Column sampling by level (`colsample_bylevel`) is now functional for `hist` algorithm (#3635, #3862)
* GPU tag `gpu:` for regression objectives are now deprecated. XGBoost will select the correct devices automatically (#3643)
* Add `disable_default_eval_metric` parameter to disable default metric (#3606)
* Experimental AVX support for gradient computation is removed (#3752)
* XGBoost4J-Spark
- Add `rank:ndcg` and `rank:map` to supported objectives (#3697)
* Python package
- Add `callbacks` argument to `fit()` function of sciki-learn API (#3682)
- Add `XGBRanker` to scikit-learn interface (#3560, #3848)
- Add `validate_features` argument to `predict()` function of scikit-learn API (#3653)
- Allow scikit-learn grid search over parameters specified as keyword arguments (#3791)
- Add `coef_` and `intercept_` as properties of scikit-learn wrapper (#3855). Some scikit-learn functions expect these properties.
### Performance improvements
* Address very high GPU memory usage for large data (#3635)
* Fix performance regression within `EvaluateSplits()` of `gpu_hist` algorithm. (#3680)
### Bug-fixes
* Fix a problem in GPU quantile sketch with tiny instance weights. (#3628)
* Fix copy constructor for `HostDeviceVectorImpl` to prevent dangling pointers (#3657)
* Fix a bug in partitioned file loading (#3673)
* Fixed an uninitialized pointer in `gpu_hist` (#3703)
* Reshared data among GPUs when number of GPUs is changed (#3721)
* Add back `max_delta_step` to split evaluation (#3668)
* Do not round up integer thresholds for integer features in JSON dump (#3717)
* Use `dmlc::TemporaryDirectory` to handle temporaries in cross-platform way (#3783)
* Fix accuracy problem with `gpu_hist` when `min_child_weight` and `lambda` are set to 0 (#3793)
* Make sure that `tree_method` parameter is recognized and not silently ignored (#3849)
* XGBoost4J-Spark
- Make sure `thresholds` are considered when executing `predict()` method (#3577)
- Avoid losing precision when computing probabilities by converting to `Double` early (#3576)
- `getTreeLimit()` should return `Int` (#3602)
- Fix checkpoint serialization on HDFS (#3614)
- Throw `ControlThrowable` instead of `InterruptedException` so that it is properly re-thrown (#3632)
- Remove extraneous output to stdout (#3665)
- Allow specification of task type for custom objectives and evaluations (#3646)
- Fix distributed updater check (#3739)
- Fix issue when spark job execution thread cannot return before we execute `first()` (#3758)
* Python package
- Fix accessing `DMatrix.handle` before it is set (#3599)
- `XGBClassifier.predict()` should return margin scores when `output_margin` is set to true (#3651)
- Early stopping callback should maximize metric of form `NDCG@n-` (#3685)
- Preserve feature names when slicing `DMatrix` (#3766)
* R package
- Replace `nround` with `nrounds` to match actual parameter (#3592)
- Amend `xgb.createFolds` to handle classes of a single element (#3630)
- Fix buggy random generator and make `colsample_bytree` functional (#3781)
### Maintenance: testing, continuous integration, build system
* Add sanitizers tests to Travis CI (#3557)
* Add NumPy, Matplotlib, Graphviz as requirements for doc build (#3669)
* Comply with CRAN submission policy (#3660, #3728)
* Remove copy-paste error in JVM test suite (#3692)
* Disable flaky tests in `R-package/tests/testthat/test_update.R` (#3723)
* Make Python tests compatible with scikit-learn 0.20 release (#3731)
* Separate out restricted and unrestricted tasks, so that pull requests don't build downloadable artifacts (#3736)
* Add multi-GPU unit test environment (#3741)
* Allow plug-ins to be built by CMake (#3752)
* Test wheel compatibility on CPU containers for pull requests (#3762)
* Fix broken doc build due to Matplotlib 3.0 release (#3764)
* Produce `xgboost.so` for XGBoost-R on Mac OSX, so that `make install` works (#3767)
* Retry Jenkins CI tests up to 3 times to improve reliability (#3769, #3769, #3775, #3776, #3777)
* Add basic unit tests for `gpu_hist` algorithm (#3785)
* Fix Python environment for distributed unit tests (#3806)
* Test wheels on CUDA 10.0 container for compatibility (#3838)
* Fix JVM doc build (#3853)
### Maintenance: Refactor C++ code for legibility and maintainability
* Merge generic device helper functions into `GPUSet` class (#3626)
* Re-factor column sampling logic into `ColumnSampler` class (#3635, #3637)
* Replace `std::vector` with `HostDeviceVector` in `MetaInfo` and `SparsePage` (#3446)
* Simplify `DMatrix` class (#3395)
* De-duplicate CPU/GPU code using `Transform` class (#3643, #3751)
* Remove obsoleted `QuantileHistMaker` class (#3761)
* Remove obsoleted `NoConstraint` class (#3792)
### Other Features
* C++20-compliant Span class for safe pointer indexing (#3548, #3588)
* Add helper functions to manipulate multiple GPU devices (#3693)
* XGBoost4J-Spark
- Allow specifying host ip from the `xgboost-tracker.properties file` (#3833). This comes in handy when `hosts` files doesn't correctly define localhost.
### Usability Improvements
* Add reference to GitHub repository in `pom.xml` of JVM packages (#3589)
* Add R demo of multi-class classification (#3695)
* Document JSON dump functionality (#3600, #3603)
* Document CUDA requirement and lack of external memory for GPU algorithms (#3624)
* Document LambdaMART objectives, both pairwise and listwise (#3672)
* Document `aucpr` evaluation metric (#3687)
* Document gblinear parameters: `feature_selector` and `top_k` (#3780)
* Add instructions for using MinGW-built XGBoost with Python. (#3774)
* Removed nonexistent parameter `use_buffer` from documentation (#3610)
* Update Python API doc to include all classes and members (#3619, #3682)
* Fix typos and broken links in documentation (#3618, #3640, #3676, #3713, #3759, #3784, #3843, #3852)
* Binary classification demo should produce LIBSVM with 0-based indexing (#3652)
* Process data once for Python and CLI examples of learning to rank (#3666)
* Include full text of Apache 2.0 license in the repository (#3698)
* Save predictor parameters in model file (#3856)
* JVM packages
- Let users specify feature names when calling `getModelDump` and `getFeatureScore` (#3733)
- Warn the user about the lack of over-the-wire encryption (#3667)
- Fix errors in examples (#3719)
- Document choice of trackers (#3831)
- Document that vanilla Apache Spark is required (#3854)
* Python package
- Document that custom objective can't contain colon (:) (#3601)
- Show a better error message for failed library loading (#3690)
- Document that feature importance is unavailable for non-tree learners (#3765)
- Document behavior of `get_fscore()` for zero-importance features (#3763)
- Recommend pickling as the way to save `XGBClassifier` / `XGBRegressor` / `XGBRanker` (#3829)
* R package
- Enlarge variable importance plot to make it more visible (#3820)
### BREAKING CHANGES
* External memory page files have changed, breaking backwards compatibility for temporary storage used during external memory training. This only affects external memory users upgrading their xgboost version - we recommend clearing all `*.page` files before resuming training. Model serialization is unaffected.
### Known issues
* Quantile sketcher fails to produce any quantile for some edge cases (#2943)
* The `hist` algorithm leaks memory when used with learning rate decay callback (#3579)
* Using custom evaluation funciton together with early stopping causes assertion failure in XGBoost4J-Spark (#3595)
* Early stopping doesn't work with `gblinear` learner (#3789)
* Label and weight vectors are not reshared upon the change in number of GPUs (#3794). To get around this issue, delete the `DMatrix` object and re-load.
* The `DMatrix` Python objects are initialized with incorrect values when given array slices (#3841)
* The `gpu_id` parameter is broken and not yet properly supported (#3850)
### Acknowledgement
**Contributors** (in no particular order): Hyunsu Cho (@hcho3), Jiaming Yuan (@trivialfis), Nan Zhu (@CodingCat), Rory Mitchell (@RAMitchell), Andy Adinets (@canonizer), Vadim Khotilovich (@khotilov), Sergei Lebedev (@superbobry)
**First-time Contributors** (in no particular order): Matthew Tovbin (@tovbinm), Jakob Richter (@jakob-r), Grace Lam (@grace-lam), Grant W Schneider (@grantschneider), Andrew Thia (@BlueTea88), Sergei Chipiga (@schipiga), Joseph Bradley (@jkbradley), Chen Qin (@chenqin), Jerry Lin (@linjer), Dmitriy Rybalko (@rdtft), Michael Mui (@mmui), Takahiro Kojima (@515hikaru), Bruce Zhao (@BruceZhaoR), Wei Tian (@weitian), Saumya Bhatnagar (@Sam1301), Juzer Shakir (@JuzerShakir), Zhao Hang (@cleghom), Jonathan Friedman (@jontonsoup), Bruno Tremblay (@meztez), Boris Filippov (@frenzykryger), @Shiki-H, @mrgutkun, @gorogm, @htgeis, @jakehoare, @zengxy, @KOLANICH
**First-time Reviewers** (in no particular order): Nikita Titov (@StrikerRUS), Xiangrui Meng (@mengxr), Nirmal Borah (@Nirmal-Neel)
## v0.80 (2018.08.13)
* **JVM packages received a major upgrade**: To consolidate the APIs and improve the user experience, we refactored the design of XGBoost4J-Spark in a significant manner. (#3387)
- Consolidated APIs: It is now much easier to integrate XGBoost models into a Spark ML pipeline. Users can control behaviors like output leaf prediction results by setting corresponding column names. Training is now more consistent with other Estimators in Spark MLLIB: there is now one single method `fit()` to train decision trees.
@@ -338,7 +13,7 @@ This release is packed with many new features and bug fixes.
- Latest master: https://xgboost.readthedocs.io/en/latest
- 0.80 stable: https://xgboost.readthedocs.io/en/release_0.80
- 0.72 stable: https://xgboost.readthedocs.io/en/release_0.72
* Support for per-group weights in ranking objective (#3379)
* Ranking task now uses instance weights (#3379)
* Fix inaccurate decimal parsing (#3546)
* New functionality
- Query ID column support in LIBSVM data files (#2749). This is convenient for performing ranking task in distributed setting.
@@ -498,7 +173,7 @@ This version is only applicable for the Python package. The content is identical
- Compatibility fix for Python 2.6
- Call `print_evaluation` callback at last iteration
- Use appropriate integer types when calling native code, to prevent truncation and memory error
- Fix shared library loading on Mac OS X
- Fix shared library loading on Mac OS X
* R package:
- New parameters:
- `silent` in `xgb.DMatrix()`
@@ -539,7 +214,7 @@ This version is only applicable for the Python package. The content is identical
- Support instance weights
- Use `SparkParallelismTracker` to prevent jobs from hanging forever
- Expose train-time evaluation metrics via `XGBoostModel.summary`
- Option to specify `host-ip` explicitly in the Rabit tracker
- Option to specify `host-ip` explicitly in the Rabit tracker
* Documentation
- Better math notation for gradient boosting
- Updated build instructions for Mac OS X

View File

@@ -1,7 +1,7 @@
Package: xgboost
Type: Package
Title: Extreme Gradient Boosting
Version: 0.81.0.1
Version: 0.80.1
Date: 2018-08-13
Authors@R: c(
person("Tianqi", "Chen", role = c("aut"),
@@ -61,5 +61,5 @@ Imports:
data.table (>= 1.9.6),
magrittr (>= 1.5),
stringi (>= 0.5.2)
RoxygenNote: 6.1.0
RoxygenNote: 6.0.1
SystemRequirements: GNU make, C++11

View File

@@ -74,19 +74,6 @@ check.booster.params <- function(params, ...) {
params[['monotone_constraints']] = vec2str
}
# interaction constraints parser (convert from list of column indices to string)
if (!is.null(params[['interaction_constraints']]) &&
typeof(params[['interaction_constraints']]) != "character"){
# check input class
if (class(params[['interaction_constraints']]) != 'list') stop('interaction_constraints should be class list')
if (!all(unique(sapply(params[['interaction_constraints']], class)) %in% c('numeric','integer'))) {
stop('interaction_constraints should be a list of numeric/integer vectors')
}
# recast parameter as string
interaction_constraints <- sapply(params[['interaction_constraints']], function(x) paste0('[', paste(x, collapse=','), ']'))
params[['interaction_constraints']] <- paste0('[', paste(interaction_constraints, collapse=','), ']')
}
return(params)
}
@@ -275,8 +262,7 @@ xgb.createFolds <- function(y, k = 10)
## add enough random integers to get length(seqVector) == numInClass[i]
if (numInClass[i] %% k > 0) seqVector <- c(seqVector, sample.int(k, numInClass[i] %% k))
## shuffle the integers for fold assignment and assign to this classes's data
## seqVector[sample.int(length(seqVector))] is used to handle length(seqVector) == 1
foldVector[y == dimnames(numInClass)$y[i]] <- seqVector[sample.int(length(seqVector))]
foldVector[y == dimnames(numInClass)$y[i]] <- sample(seqVector)
}
} else {
foldVector <- seq(along = y)

View File

@@ -129,13 +129,11 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
#' logistic regression would result in predictions for log-odds instead of probabilities.
#' @param ntreelimit limit the number of model's trees or boosting iterations used in prediction (see Details).
#' It will use all the trees by default (\code{NULL} value).
#' @param predleaf whether predict leaf index.
#' @param predcontrib whether to return feature contributions to individual predictions (see Details).
#' @param predleaf whether predict leaf index instead.
#' @param predcontrib whether to return feature contributions to individual predictions instead (see Details).
#' @param approxcontrib whether to use a fast approximation for feature contributions (see Details).
#' @param predinteraction whether to return contributions of feature interactions to individual predictions (see Details).
#' @param reshape whether to reshape the vector of predictions to a matrix form when there are several
#' prediction outputs per case. This option has no effect when either of predleaf, predcontrib,
#' or predinteraction flags is TRUE.
#' prediction outputs per case. This option has no effect when \code{predleaf = TRUE}.
#' @param ... Parameters passed to \code{predict.xgb.Booster}
#'
#' @details
@@ -160,11 +158,6 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
#' Setting \code{approxcontrib = TRUE} approximates these values following the idea explained
#' in \url{http://blog.datadive.net/interpreting-random-forests/}.
#'
#' With \code{predinteraction = TRUE}, SHAP values of contributions of interaction of each pair of features
#' are computed. Note that this operation might be rather expensive in terms of compute and memory.
#' Since it quadratically depends on the number of features, it is recommended to perfom selection
#' of the most important features first. See below about the format of the returned results.
#'
#' @return
#' For regression or binary classification, it returns a vector of length \code{nrows(newdata)}.
#' For multiclass classification, either a \code{num_class * nrows(newdata)} vector or
@@ -180,14 +173,6 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
#' such a matrix. The contribution values are on the scale of untransformed margin
#' (e.g., for binary classification would mean that the contributions are log-odds deviations from bias).
#'
#' When \code{predinteraction = TRUE} and it is not a multiclass setting, the output is a 3d array with
#' dimensions \code{c(nrow, num_features + 1, num_features + 1)}. The off-diagonal (in the last two dimensions)
#' elements represent different features interaction contributions. The array is symmetric WRT the last
#' two dimensions. The "+ 1" columns corresponds to bias. Summing this array along the last dimension should
#' produce practically the same result as predict with \code{predcontrib = TRUE}.
#' For a multiclass case, a list of \code{num_class} elements is returned, where each element is
#' such an array.
#'
#' @seealso
#' \code{\link{xgb.train}}.
#'
@@ -284,8 +269,7 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
#' @rdname predict.xgb.Booster
#' @export
predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FALSE, ntreelimit = NULL,
predleaf = FALSE, predcontrib = FALSE, approxcontrib = FALSE, predinteraction = FALSE,
reshape = FALSE, ...) {
predleaf = FALSE, predcontrib = FALSE, approxcontrib = FALSE, reshape = FALSE, ...) {
object <- xgb.Booster.complete(object, saveraw = FALSE)
if (!inherits(newdata, "xgb.DMatrix"))
@@ -301,8 +285,7 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
if (ntreelimit < 0)
stop("ntreelimit cannot be negative")
option <- 0L + 1L * as.logical(outputmargin) + 2L * as.logical(predleaf) + 4L * as.logical(predcontrib) +
8L * as.logical(approxcontrib) + 16L * as.logical(predinteraction)
option <- 0L + 1L * as.logical(outputmargin) + 2L * as.logical(predleaf) + 4L * as.logical(predcontrib) + 8L * as.logical(approxcontrib)
ret <- .Call(XGBoosterPredict_R, object$handle, newdata, option[1], as.integer(ntreelimit))
@@ -322,28 +305,17 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
} else if (predcontrib) {
n_col1 <- ncol(newdata) + 1
n_group <- npred_per_case / n_col1
cnames <- if (!is.null(colnames(newdata))) c(colnames(newdata), "BIAS") else NULL
dnames <- if (!is.null(colnames(newdata))) list(NULL, c(colnames(newdata), "BIAS")) else NULL
ret <- if (n_ret == n_row) {
matrix(ret, ncol = 1, dimnames = list(NULL, cnames))
matrix(ret, ncol = 1, dimnames = dnames)
} else if (n_group == 1) {
matrix(ret, nrow = n_row, byrow = TRUE, dimnames = list(NULL, cnames))
matrix(ret, nrow = n_row, byrow = TRUE, dimnames = dnames)
} else {
arr <- array(ret, c(n_col1, n_group, n_row),
dimnames = list(cnames, NULL, NULL)) %>% aperm(c(2,3,1)) # [group, row, col]
lapply(seq_len(n_group), function(g) arr[g,,])
}
} else if (predinteraction) {
n_col1 <- ncol(newdata) + 1
n_group <- npred_per_case / n_col1^2
cnames <- if (!is.null(colnames(newdata))) c(colnames(newdata), "BIAS") else NULL
ret <- if (n_ret == n_row) {
matrix(ret, ncol = 1, dimnames = list(NULL, cnames))
} else if (n_group == 1) {
array(ret, c(n_col1, n_col1, n_row), dimnames = list(cnames, cnames, NULL)) %>% aperm(c(3,1,2))
} else {
arr <- array(ret, c(n_col1, n_col1, n_group, n_row),
dimnames = list(cnames, cnames, NULL, NULL)) %>% aperm(c(3,4,1,2)) # [group, row, col1, col2]
lapply(seq_len(n_group), function(g) arr[g,,,])
grp_mask <- rep(seq_len(n_col1), n_row) +
rep((seq_len(n_row) - 1) * n_col1 * n_group, each = n_col1)
lapply(seq_len(n_group), function(g) {
matrix(ret[grp_mask + n_col1 * (g - 1)], nrow = n_row, byrow = TRUE, dimnames = dnames)
})
}
} else if (reshape && npred_per_case > 1) {
ret <- matrix(ret, nrow = n_row, byrow = TRUE)

View File

@@ -22,7 +22,7 @@ xgb.ggplot.importance <- function(importance_matrix = NULL, top_n = NULL, measur
plot <-
ggplot2::ggplot(importance_matrix,
ggplot2::aes(x = factor(Feature, levels = rev(Feature)), y = Importance, width = 0.5),
ggplot2::aes(x = factor(Feature, levels = rev(Feature)), y = Importance, width = 0.05),
environment = environment()) +
ggplot2::geom_bar(ggplot2::aes(fill = Cluster), stat = "identity", position = "identity") +
ggplot2::coord_flip() +

View File

@@ -27,7 +27,7 @@
#' a tree's median absolute leaf weight changes through the iterations.
#'
#' This function was inspired by the blog post
#' \url{https://github.com/aysent/random-forest-leaf-visualization}.
#' \url{http://aysent.github.io/2015/11/08/random-forest-leaf-visualization.html}.
#'
#' @return
#'

View File

@@ -26,7 +26,6 @@
#' \item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
#' \item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
#' \item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
#' \item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
#' }
#'
#' 2.2. Parameter for Linear Booster

4
R-package/configure vendored
View File

@@ -1667,12 +1667,12 @@ OPENMP_CXXFLAGS=""
if test `uname -s` = "Linux"
then
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CXXFLAGS)"
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CFLAGS)"
fi
if test `uname -s` = "Darwin"
then
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CXXFLAGS)"
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CFLAGS)"
ac_pkg_openmp=no
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether OpenMP will work in a package" >&5
$as_echo_n "checking whether OpenMP will work in a package... " >&6; }

View File

@@ -8,12 +8,12 @@ OPENMP_CXXFLAGS=""
if test `uname -s` = "Linux"
then
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CXXFLAGS)"
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CFLAGS)"
fi
if test `uname -s` = "Darwin"
then
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CXXFLAGS)"
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CFLAGS)"
ac_pkg_openmp=no
AC_MSG_CHECKING([whether OpenMP will work in a package])
AC_LANG_CONFTEST(

View File

@@ -11,5 +11,4 @@ early_stopping Early Stop in training
poisson_regression Poisson Regression on count data
tweedie_regression Tweddie Regression
gpu_accelerated GPU-accelerated tree building algorithms
interaction_constraints Interaction constraints among features

View File

@@ -33,7 +33,7 @@ evalerror <- function(preds, dtrain) {
return(list(metric = "error", value = err))
}
param <- list(max_depth=2, eta=1, nthread = 2, verbosity=0,
param <- list(max_depth=2, eta=1, nthread = 2, silent=1,
objective=logregobj, eval_metric=evalerror)
print ('start training with user customized objective')
# training with customized objective, we can also do step by step training
@@ -57,7 +57,7 @@ logregobjattr <- function(preds, dtrain) {
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
param <- list(max_depth=2, eta=1, nthread = 2, verbosity=0,
param <- list(max_depth=2, eta=1, nthread = 2, silent=1,
objective=logregobjattr, eval_metric=evalerror)
print ('start training with user customized objective, with additional attributes in DMatrix')
# training with customized objective, we can also do step by step training

View File

@@ -7,7 +7,7 @@ dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
# note: for customized objective function, we leave objective as default
# note: what we are getting is margin value in prediction
# you must know what you are doing
param <- list(max_depth=2, eta=1, nthread=2, verbosity=0)
param <- list(max_depth=2, eta=1, nthread = 2, silent=1)
watchlist <- list(eval = dtest)
num_round <- 20
# user define objective function, given prediction, return gradient and second order gradient
@@ -32,9 +32,9 @@ evalerror <- function(preds, dtrain) {
}
print ('start training with early Stopping setting')
bst <- xgb.train(param, dtrain, num_round, watchlist,
bst <- xgb.train(param, dtrain, num_round, watchlist,
objective = logregobj, eval_metric = evalerror, maximize = FALSE,
early_stopping_round = 3)
bst <- xgb.cv(param, dtrain, num_round, nfold = 5,
bst <- xgb.cv(param, dtrain, num_round, nfold = 5,
objective = logregobj, eval_metric = evalerror,
maximize = FALSE, early_stopping_rounds = 3)

View File

@@ -1,105 +0,0 @@
library(xgboost)
library(data.table)
set.seed(1024)
# Function to obtain a list of interactions fitted in trees, requires input of maximum depth
treeInteractions <- function(input_tree, input_max_depth){
trees <- copy(input_tree) # copy tree input to prevent overwriting
if (input_max_depth < 2) return(list()) # no interactions if max depth < 2
if (nrow(input_tree) == 1) return(list())
# Attach parent nodes
for (i in 2:input_max_depth){
if (i == 2) trees[, ID_merge:=ID] else trees[, ID_merge:=get(paste0('parent_',i-2))]
parents_left <- trees[!is.na(Split), list(i.id=ID, i.feature=Feature, ID_merge=Yes)]
parents_right <- trees[!is.na(Split), list(i.id=ID, i.feature=Feature, ID_merge=No)]
setorderv(trees, 'ID_merge')
setorderv(parents_left, 'ID_merge')
setorderv(parents_right, 'ID_merge')
trees <- merge(trees, parents_left, by='ID_merge', all.x=T)
trees[!is.na(i.id), c(paste0('parent_', i-1), paste0('parent_feat_', i-1)):=list(i.id, i.feature)]
trees[, c('i.id','i.feature'):=NULL]
trees <- merge(trees, parents_right, by='ID_merge', all.x=T)
trees[!is.na(i.id), c(paste0('parent_', i-1), paste0('parent_feat_', i-1)):=list(i.id, i.feature)]
trees[, c('i.id','i.feature'):=NULL]
}
# Extract nodes with interactions
interaction_trees <- trees[!is.na(Split) & !is.na(parent_1),
c('Feature',paste0('parent_feat_',1:(input_max_depth-1))), with=F]
interaction_trees_split <- split(interaction_trees, 1:nrow(interaction_trees))
interaction_list <- lapply(interaction_trees_split, as.character)
# Remove NAs (no parent interaction)
interaction_list <- lapply(interaction_list, function(x) x[!is.na(x)])
# Remove non-interactions (same variable)
interaction_list <- lapply(interaction_list, unique) # remove same variables
interaction_length <- sapply(interaction_list, length)
interaction_list <- interaction_list[interaction_length > 1]
interaction_list <- unique(lapply(interaction_list, sort))
return(interaction_list)
}
# Generate sample data
x <- list()
for (i in 1:10){
x[[i]] = i*rnorm(1000, 10)
}
x <- as.data.table(x)
y = -1*x[, rowSums(.SD)] + x[['V1']]*x[['V2']] + x[['V3']]*x[['V4']]*x[['V5']] + rnorm(1000, 0.001) + 3*sin(x[['V7']])
train = as.matrix(x)
# Interaction constraint list (column names form)
interaction_list <- list(c('V1','V2'),c('V3','V4','V5'))
# Convert interaction constraint list into feature index form
cols2ids <- function(object, col_names) {
LUT <- seq_along(col_names) - 1
names(LUT) <- col_names
rapply(object, function(x) LUT[x], classes="character", how="replace")
}
interaction_list_fid = cols2ids(interaction_list, colnames(train))
# Fit model with interaction constraints
bst = xgboost(data = train, label = y, max_depth = 4,
eta = 0.1, nthread = 2, nrounds = 1000,
interaction_constraints = interaction_list_fid)
bst_tree <- xgb.model.dt.tree(colnames(train), bst)
bst_interactions <- treeInteractions(bst_tree, 4) # interactions constrained to combinations of V1*V2 and V3*V4*V5
# Fit model without interaction constraints
bst2 = xgboost(data = train, label = y, max_depth = 4,
eta = 0.1, nthread = 2, nrounds = 1000)
bst2_tree <- xgb.model.dt.tree(colnames(train), bst2)
bst2_interactions <- treeInteractions(bst2_tree, 4) # much more interactions
# Fit model with both interaction and monotonicity constraints
bst3 = xgboost(data = train, label = y, max_depth = 4,
eta = 0.1, nthread = 2, nrounds = 1000,
interaction_constraints = interaction_list_fid,
monotone_constraints = c(-1,0,0,0,0,0,0,0,0,0))
bst3_tree <- xgb.model.dt.tree(colnames(train), bst3)
bst3_interactions <- treeInteractions(bst3_tree, 4) # interactions still constrained to combinations of V1*V2 and V3*V4*V5
# Show monotonic constraints still apply by checking scores after incrementing V1
x1 <- sort(unique(x[['V1']]))
for (i in 1:length(x1)){
testdata <- copy(x[, -c('V1')])
testdata[['V1']] <- x1[i]
testdata <- testdata[, paste0('V',1:10), with=F]
pred <- predict(bst3, as.matrix(testdata))
# Should not print out anything due to monotonic constraints
if (i > 1) if (any(pred > prev_pred)) print(i)
prev_pred <- pred
}

View File

@@ -7,8 +7,7 @@
\usage{
\method{predict}{xgb.Booster}(object, newdata, missing = NA,
outputmargin = FALSE, ntreelimit = NULL, predleaf = FALSE,
predcontrib = FALSE, approxcontrib = FALSE,
predinteraction = FALSE, reshape = FALSE, ...)
predcontrib = FALSE, approxcontrib = FALSE, reshape = FALSE, ...)
\method{predict}{xgb.Booster.handle}(object, ...)
}
@@ -27,17 +26,14 @@ logistic regression would result in predictions for log-odds instead of probabil
\item{ntreelimit}{limit the number of model's trees or boosting iterations used in prediction (see Details).
It will use all the trees by default (\code{NULL} value).}
\item{predleaf}{whether predict leaf index.}
\item{predleaf}{whether predict leaf index instead.}
\item{predcontrib}{whether to return feature contributions to individual predictions (see Details).}
\item{predcontrib}{whether to return feature contributions to individual predictions instead (see Details).}
\item{approxcontrib}{whether to use a fast approximation for feature contributions (see Details).}
\item{predinteraction}{whether to return contributions of feature interactions to individual predictions (see Details).}
\item{reshape}{whether to reshape the vector of predictions to a matrix form when there are several
prediction outputs per case. This option has no effect when either of predleaf, predcontrib,
or predinteraction flags is TRUE.}
prediction outputs per case. This option has no effect when \code{predleaf = TRUE}.}
\item{...}{Parameters passed to \code{predict.xgb.Booster}}
}
@@ -55,14 +51,6 @@ When \code{predcontrib = TRUE} and it is not a multiclass setting, the output is
For a multiclass case, a list of \code{num_class} elements is returned, where each element is
such a matrix. The contribution values are on the scale of untransformed margin
(e.g., for binary classification would mean that the contributions are log-odds deviations from bias).
When \code{predinteraction = TRUE} and it is not a multiclass setting, the output is a 3d array with
dimensions \code{c(nrow, num_features + 1, num_features + 1)}. The off-diagonal (in the last two dimensions)
elements represent different features interaction contributions. The array is symmetric WRT the last
two dimensions. The "+ 1" columns corresponds to bias. Summing this array along the last dimension should
produce practically the same result as predict with \code{predcontrib = TRUE}.
For a multiclass case, a list of \code{num_class} elements is returned, where each element is
such an array.
}
\description{
Predicted values based on either xgboost model or model handle object.
@@ -88,11 +76,6 @@ values (Lundberg 2017) that sum to the difference between the expected output
of the model and the current prediction (where the hessian weights are used to compute the expectations).
Setting \code{approxcontrib = TRUE} approximates these values following the idea explained
in \url{http://blog.datadive.net/interpreting-random-forests/}.
With \code{predinteraction = TRUE}, SHAP values of contributions of interaction of each pair of features
are computed. Note that this operation might be rather expensive in terms of compute and memory.
Since it quadratically depends on the number of features, it is recommended to perfom selection
of the most important features first. See below about the format of the returned results.
}
\examples{
## binary classification:

View File

@@ -4,12 +4,11 @@
\alias{xgb.cv}
\title{Cross Validation}
\usage{
xgb.cv(params = list(), data, nrounds, nfold, label = NULL,
missing = NA, prediction = FALSE, showsd = TRUE,
metrics = list(), obj = NULL, feval = NULL, stratified = TRUE,
folds = NULL, verbose = TRUE, print_every_n = 1L,
early_stopping_rounds = NULL, maximize = NULL, callbacks = list(),
...)
xgb.cv(params = list(), data, nrounds, nfold, label = NULL, missing = NA,
prediction = FALSE, showsd = TRUE, metrics = list(), obj = NULL,
feval = NULL, stratified = TRUE, folds = NULL, verbose = TRUE,
print_every_n = 1L, early_stopping_rounds = NULL, maximize = NULL,
callbacks = list(), ...)
}
\arguments{
\item{params}{the list of parameters. Commonly used ones are:

View File

@@ -44,8 +44,8 @@ test <- agaricus.test
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
# save the model in file 'xgb.model.dump'
dump_path = file.path(tempdir(), 'model.dump')
xgb.dump(bst, dump_path, with_stats = TRUE)
dump.path = file.path(tempdir(), 'model.dump')
xgb.dump(bst, dump.path, with_stats = TRUE)
# print the model without saving it to a file
print(xgb.dump(bst, with_stats = TRUE))

View File

@@ -5,11 +5,11 @@
\alias{xgb.plot.deepness}
\title{Plot model trees deepness}
\usage{
xgb.ggplot.deepness(model = NULL, which = c("2x1", "max.depth",
"med.depth", "med.weight"))
xgb.ggplot.deepness(model = NULL, which = c("2x1", "max.depth", "med.depth",
"med.weight"))
xgb.plot.deepness(model = NULL, which = c("2x1", "max.depth",
"med.depth", "med.weight"), plot = TRUE, ...)
xgb.plot.deepness(model = NULL, which = c("2x1", "max.depth", "med.depth",
"med.weight"), plot = TRUE, ...)
}
\arguments{
\item{model}{either an \code{xgb.Booster} model generated by the \code{xgb.train} function
@@ -50,7 +50,7 @@ per tree with respect to tree number are created. And \code{which="med.weight"}
a tree's median absolute leaf weight changes through the iterations.
This function was inspired by the blog post
\url{https://github.com/aysent/random-forest-leaf-visualization}.
\url{http://aysent.github.io/2015/11/08/random-forest-leaf-visualization.html}.
}
\examples{

View File

@@ -9,8 +9,8 @@ xgb.ggplot.importance(importance_matrix = NULL, top_n = NULL,
measure = NULL, rel_to_first = FALSE, n_clusters = c(1:10), ...)
xgb.plot.importance(importance_matrix = NULL, top_n = NULL,
measure = NULL, rel_to_first = FALSE, left_margin = 10,
cex = NULL, plot = TRUE, ...)
measure = NULL, rel_to_first = FALSE, left_margin = 10, cex = NULL,
plot = TRUE, ...)
}
\arguments{
\item{importance_matrix}{a \code{data.table} returned by \code{\link{xgb.importance}}.}

View File

@@ -6,8 +6,8 @@
\usage{
xgb.plot.shap(data, shap_contrib = NULL, features = NULL, top_n = 1,
model = NULL, trees = NULL, target_class = NULL,
approxcontrib = FALSE, subsample = NULL, n_col = 1, col = rgb(0,
0, 1, 0.2), pch = ".", discrete_n_uniq = 5, discrete_jitter = 0.01,
approxcontrib = FALSE, subsample = NULL, n_col = 1, col = rgb(0, 0, 1,
0.2), pch = ".", discrete_n_uniq = 5, discrete_jitter = 0.01,
ylab = "SHAP", plot_NA = TRUE, col_NA = rgb(0.7, 0, 1, 0.6),
pch_NA = ".", pos_NA = 1.07, plot_loess = TRUE, col_loess = 2,
span_loess = 0.5, which = c("1d", "2d"), plot = TRUE, ...)

View File

@@ -5,17 +5,15 @@
\alias{xgboost}
\title{eXtreme Gradient Boosting Training}
\usage{
xgb.train(params = list(), data, nrounds, watchlist = list(),
obj = NULL, feval = NULL, verbose = 1, print_every_n = 1L,
xgb.train(params = list(), data, nrounds, watchlist = list(), obj = NULL,
feval = NULL, verbose = 1, print_every_n = 1L,
early_stopping_rounds = NULL, maximize = NULL, save_period = NULL,
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(),
...)
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(), ...)
xgboost(data = NULL, label = NULL, missing = NA, weight = NULL,
params = list(), nrounds, verbose = 1, print_every_n = 1L,
early_stopping_rounds = NULL, maximize = NULL, save_period = NULL,
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(),
...)
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(), ...)
}
\arguments{
\item{params}{the list of parameters.

View File

@@ -17,8 +17,8 @@ endif
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ -pthread
PKG_LIBS = @OPENMP_CXXFLAGS@ -pthread
PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ $(SHLIB_PTHREAD_FLAGS)
PKG_LIBS = @OPENMP_CXXFLAGS@ $(SHLIB_PTHREAD_FLAGS)
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o\
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o

View File

@@ -29,8 +29,8 @@ endif
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
PKG_CXXFLAGS= $(SHLIB_OPENMP_CXXFLAGS) $(SHLIB_PTHREAD_FLAGS)
PKG_LIBS = $(SHLIB_OPENMP_CXXFLAGS) $(SHLIB_PTHREAD_FLAGS)
PKG_CXXFLAGS= $(SHLIB_OPENMP_CFLAGS) $(SHLIB_PTHREAD_FLAGS)
PKG_LIBS = $(SHLIB_OPENMP_CFLAGS) $(SHLIB_PTHREAD_FLAGS)
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o\
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o

View File

@@ -1,5 +1,5 @@
/* Copyright (c) 2015 by Contributors
*
*
* This file was initially generated using the following R command:
* tools::package_native_routine_registration_skeleton('.', con = 'src/init.c', character_only = F)
* and edited to conform to xgboost C linter requirements. For details, see
@@ -10,7 +10,7 @@
#include <stdlib.h>
#include <R_ext/Rdynload.h>
/* FIXME:
/* FIXME:
Check these declarations against the C/Fortran source code.
*/
@@ -70,7 +70,7 @@ static const R_CallMethodDef CallEntries[] = {
#if defined(_WIN32)
__declspec(dllexport)
#endif // defined(_WIN32)
#endif
void R_init_xgboost(DllInfo *dll) {
R_registerRoutines(dll, NULL, CallEntries, NULL, NULL);
R_useDynamicSymbols(dll, FALSE);

View File

@@ -32,10 +32,7 @@ extern "C" {
namespace xgboost {
ConsoleLogger::~ConsoleLogger() {
if (cur_verbosity_ == LogVerbosity::kIgnore ||
cur_verbosity_ <= global_verbosity_) {
dmlc::CustomLogMessage::Log(log_stream_.str());
}
dmlc::CustomLogMessage::Log(log_stream_.str());
}
TrackerLogger::~TrackerLogger() {
dmlc::CustomLogMessage::Log(log_stream_.str());
@@ -49,11 +46,10 @@ namespace common {
bool CheckNAN(double v) {
return ISNAN(v);
}
#if !defined(XGBOOST_USE_CUDA)
double LogGamma(double v) {
return lgammafn(v);
}
#endif // !defined(XGBOOST_USE_CUDA)
// customize random engine.
void CustomGlobalRandomEngine::seed(CustomGlobalRandomEngine::result_type val) {
// ignore the seed

View File

@@ -223,42 +223,3 @@ test_that("train and predict with non-strict classes", {
expect_error(pr <- predict(bst, train_dense), regexp = NA)
expect_equal(pr0, pr)
})
test_that("max_delta_step works", {
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
watchlist <- list(train = dtrain)
param <- list(objective = "binary:logistic", eval_metric="logloss", max_depth = 2, nthread = 2, eta = 0.5)
nrounds = 5
# model with no restriction on max_delta_step
bst1 <- xgb.train(param, dtrain, nrounds, watchlist, verbose = 1)
# model with restricted max_delta_step
bst2 <- xgb.train(param, dtrain, nrounds, watchlist, verbose = 1, max_delta_step = 1)
# the no-restriction model is expected to have consistently lower loss during the initial interations
expect_true(all(bst1$evaluation_log$train_logloss < bst2$evaluation_log$train_logloss))
expect_lt(mean(bst1$evaluation_log$train_logloss)/mean(bst2$evaluation_log$train_logloss), 0.8)
})
test_that("colsample_bytree works", {
# Randomly generate data matrix by sampling from uniform distribution [-1, 1]
set.seed(1)
train_x <- matrix(runif(1000, min = -1, max = 1), ncol = 100)
train_y <- as.numeric(rowSums(train_x) > 0)
test_x <- matrix(runif(1000, min = -1, max = 1), ncol = 100)
test_y <- as.numeric(rowSums(test_x) > 0)
colnames(train_x) <- paste0("Feature_", sprintf("%03d", 1:100))
colnames(test_x) <- paste0("Feature_", sprintf("%03d", 1:100))
dtrain <- xgb.DMatrix(train_x, label = train_y)
dtest <- xgb.DMatrix(test_x, label = test_y)
watchlist <- list(train = dtrain, eval = dtest)
# Use colsample_bytree = 0.01, so that roughly one out of 100 features is
# chosen for each tree
param <- list(max_depth = 2, eta = 0, silent = 1, nthread = 2,
colsample_bytree = 0.01, objective = "binary:logistic",
eval_metric = "auc")
set.seed(2)
bst <- xgb.train(param, dtrain, nrounds = 100, watchlist, verbose = 0)
xgb.importance(model = bst)
# If colsample_bytree works properly, a variety of features should be used
# in the 100 trees
expect_gte(nrow(xgb.importance(model = bst)), 30)
})

View File

@@ -1,38 +0,0 @@
require(xgboost)
context("interaction constraints")
set.seed(1024)
x1 <- rnorm(1000, 1)
x2 <- rnorm(1000, 1)
x3 <- sample(c(1,2,3), size=1000, replace=TRUE)
y <- x1 + x2 + x3 + x1*x2*x3 + rnorm(1000, 0.001) + 3*sin(x1)
train <- matrix(c(x1,x2,x3), ncol = 3)
test_that("interaction constraints for regression", {
# Fit a model that only allows interaction between x1 and x2
bst <- xgboost(data = train, label = y, max_depth = 3,
eta = 0.1, nthread = 2, nrounds = 100, verbose = 0,
interaction_constraints = list(c(0,1)))
# Set all observations to have the same x3 values then increment
# by the same amount
preds <- lapply(c(1,2,3), function(x){
tmat <- matrix(c(x1,x2,rep(x,1000)), ncol=3)
return(predict(bst, tmat))
})
# Check incrementing x3 has the same effect on all observations
# since x3 is constrained to be independent of x1 and x2
# and all observations start off from the same x3 value
diff1 <- preds[[2]] - preds[[1]]
test1 <- all(abs(diff1 - diff1[1]) < 1e-4)
diff2 <- preds[[3]] - preds[[2]]
test2 <- all(abs(diff2 - diff2[1]) < 1e-4)
expect_true({
test1 & test2
}, "Interaction Contraint Satisfied")
})

View File

@@ -1,141 +0,0 @@
context('Test prediction of feature interactions')
require(xgboost)
require(magrittr)
set.seed(123)
test_that("predict feature interactions works", {
# simulate some binary data and a linear outcome with an interaction term
N <- 1000
P <- 5
X <- matrix(rbinom(N * P, 1, 0.5), ncol=P, dimnames = list(NULL, letters[1:P]))
# center the data (as contributions are computed WRT feature means)
X <- scale(X, scale=FALSE)
# outcome without any interactions, without any noise:
f <- function(x) 2 * x[, 1] - 3 * x[, 2]
# outcome with interactions, without noise:
f_int <- function(x) f(x) + 2 * x[, 2] * x[, 3]
# outcome with interactions, with noise:
#f_int_noise <- function(x) f_int(x) + rnorm(N, 0, 0.3)
y <- f_int(X)
dm <- xgb.DMatrix(X, label = y)
param <- list(eta=0.1, max_depth=4, base_score=mean(y), lambda=0, nthread=2)
b <- xgb.train(param, dm, 100)
pred = predict(b, dm, outputmargin=TRUE)
# SHAP contributions:
cont <- predict(b, dm, predcontrib=TRUE)
expect_equal(dim(cont), c(N, P+1))
# make sure for each row they add up to marginal predictions
max(abs(rowSums(cont) - pred)) %>% expect_lt(0.001)
# Hand-construct the 'ground truth' feature contributions:
gt_cont <- cbind(
2. * X[, 1],
-3. * X[, 2] + 1. * X[, 2] * X[, 3], # attribute a HALF of the interaction term to feature #2
1. * X[, 2] * X[, 3] # and another HALF of the interaction term to feature #3
)
gt_cont <- cbind(gt_cont, matrix(0, nrow=N, ncol=P + 1 - 3))
# These should be relatively close:
expect_lt(max(abs(cont - gt_cont)), 0.05)
# SHAP interaction contributions:
intr <- predict(b, dm, predinteraction=TRUE)
expect_equal(dim(intr), c(N, P+1, P+1))
# check assigned colnames
cn <- c(letters[1:P], "BIAS")
expect_equal(dimnames(intr), list(NULL, cn, cn))
# check the symmetry
max(abs(aperm(intr, c(1,3,2)) - intr)) %>% expect_lt(0.00001)
# sums WRT columns must be close to feature contributions
max(abs(apply(intr, c(1,2), sum) - cont)) %>% expect_lt(0.00001)
# diagonal terms for features 3,4,5 must be close to zero
Reduce(max, sapply(3:P, function(i) max(abs(intr[, i, i])))) %>% expect_lt(0.05)
# BIAS must have no interactions
max(abs(intr[, 1:P, P+1])) %>% expect_lt(0.00001)
# interactions other than 2 x 3 must be close to zero
intr23 <- intr
intr23[,2,3] <- 0
Reduce(max, sapply(1:P, function(i) max(abs(intr23[, i, (i+1):(P+1)])))) %>% expect_lt(0.05)
# Construct the 'ground truth' contributions of interactions directly from the linear terms:
gt_intr <- array(0, c(N, P+1, P+1))
gt_intr[,2,3] <- 1. * X[, 2] * X[, 3] # attribute a HALF of the interaction term to each symmetric element
gt_intr[,3,2] <- gt_intr[, 2, 3]
# merge-in the diagonal based on 'ground truth' feature contributions
intr_diag = gt_cont - apply(gt_intr, c(1,2), sum)
for(j in seq_len(P)) {
gt_intr[,j,j] = intr_diag[,j]
}
# These should be relatively close:
expect_lt(max(abs(intr - gt_intr)), 0.1)
})
test_that("SHAP contribution values are not NAN", {
d <- data.frame(
x1 = c(-2.3, 1.4, 5.9, 2, 2.5, 0.3, -3.6, -0.2, 0.5, -2.8, -4.6, 3.3, -1.2,
-1.1, -2.3, 0.4, -1.5, -0.2, -1, 3.7),
x2 = c(291.179171, 269.198331, 289.942097, 283.191669, 269.673332,
294.158346, 287.255835, 291.530838, 285.899586, 269.290833,
268.649586, 291.530841, 280.074593, 269.484168, 293.94042,
294.327506, 296.20709, 295.441669, 283.16792, 270.227085),
y = c(9, 15, 5.7, 9.2, 22.4, 5, 9, 3.2, 7.2, 13.1, 7.8, 16.9, 6.5, 22.1,
5.3, 10.4, 11.1, 13.9, 11, 20.5),
fold = c(2, 2, 2, 1, 2, 2, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2))
ivs <- c("x1", "x2")
fit <- xgboost(
verbose = 0,
params = list(
objective = "reg:linear",
eval_metric = "rmse"),
data = as.matrix(subset(d, fold == 2)[, ivs]),
label = subset(d, fold == 2)$y,
nthread = 1,
nrounds = 3)
shaps <- as.data.frame(predict(fit,
newdata = as.matrix(subset(d, fold == 1)[, ivs]),
predcontrib = T))
result <- cbind(shaps, sum = rowSums(shaps), pred = predict(fit,
newdata = as.matrix(subset(d, fold == 1)[, ivs])))
expect_true(identical(TRUE, all.equal(result$sum, result$pred, tol = 1e-6)))
})
test_that("multiclass feature interactions work", {
dm <- xgb.DMatrix(as.matrix(iris[,-5]), label=as.numeric(iris$Species)-1)
param <- list(eta=0.1, max_depth=4, objective='multi:softprob', num_class=3)
b <- xgb.train(param, dm, 40)
pred = predict(b, dm, outputmargin=TRUE) %>% array(c(3, 150)) %>% t
# SHAP contributions:
cont <- predict(b, dm, predcontrib=TRUE)
expect_length(cont, 3)
# rewrap them as a 3d array
cont <- unlist(cont) %>% array(c(150, 5, 3))
# make sure for each row they add up to marginal predictions
max(abs(apply(cont, c(1,3), sum) - pred)) %>% expect_lt(0.001)
# SHAP interaction contributions:
intr <- predict(b, dm, predinteraction=TRUE)
expect_length(intr, 3)
# rewrap them as a 4d array
intr <- unlist(intr) %>% array(c(150, 5, 5, 3)) %>% aperm(c(4, 1, 2, 3)) # [grp, row, col, col]
# check the symmetry
max(abs(aperm(intr, c(1,2,4,3)) - intr)) %>% expect_lt(0.00001)
# sums WRT columns must be close to feature contributions
max(abs(apply(intr, c(1,2,3), sum) - aperm(cont, c(3,1,2)))) %>% expect_lt(0.00001)
})

View File

@@ -1,6 +1,5 @@
<img src=https://raw.githubusercontent.com/dmlc/dmlc.github.io/master/img/logo-m/xgboost.png width=135/> eXtreme Gradient Boosting
===========
[![Build Status](https://xgboost-ci.net/job/xgboost/job/master/badge/icon?style=plastic)](https://xgboost-ci.net/blue/organizations/jenkins/xgboost/activity)
[![Build Status](https://travis-ci.org/dmlc/xgboost.svg?branch=master)](https://travis-ci.org/dmlc/xgboost)
[![Build Status](https://ci.appveyor.com/api/projects/status/5ypa8vaed6kpmli8?svg=true)](https://ci.appveyor.com/project/tqchen/xgboost)
[![Documentation Status](https://readthedocs.org/projects/xgboost/badge/?version=latest)](https://xgboost.readthedocs.org)

View File

@@ -48,7 +48,7 @@
#include "../src/tree/tree_model.cc"
#include "../src/tree/tree_updater.cc"
#include "../src/tree/updater_colmaker.cc"
#include "../src/tree/updater_quantile_hist.cc"
#include "../src/tree/updater_fast_hist.cc"
#include "../src/tree/updater_prune.cc"
#include "../src/tree/updater_refresh.cc"
#include "../src/tree/updater_sync.cc"

View File

@@ -44,12 +44,12 @@ install:
- set DO_PYTHON=off
- if /i "%target%" == "mingw" set DO_PYTHON=on
- if /i "%target%_%ver%_%configuration%" == "msvc_2015_Release" set DO_PYTHON=on
- if /i "%DO_PYTHON%" == "on" conda install -y numpy scipy pandas matplotlib pytest scikit-learn graphviz python-graphviz
- if /i "%DO_PYTHON%" == "on" conda install -y numpy scipy pandas matplotlib nose scikit-learn graphviz python-graphviz
# R: based on https://github.com/krlmlr/r-appveyor
- ps: |
if($env:target -eq 'rmingw' -or $env:target -eq 'rmsvc') {
#$ErrorActionPreference = "Stop"
Invoke-WebRequest https://raw.githubusercontent.com/krlmlr/r-appveyor/master/scripts/appveyor-tool.ps1 -OutFile "$Env:TEMP\appveyor-tool.ps1"
Invoke-WebRequest http://raw.github.com/krlmlr/r-appveyor/master/scripts/appveyor-tool.ps1 -OutFile "$Env:TEMP\appveyor-tool.ps1"
Import-Module "$Env:TEMP\appveyor-tool.ps1"
Bootstrap
$DEPS = "c('data.table','magrittr','stringi','ggplot2','DiagrammeR','Ckmeans.1d.dp','vcd','testthat','lintr','knitr','rmarkdown')"
@@ -96,7 +96,7 @@ build_script:
test_script:
- cd %APPVEYOR_BUILD_FOLDER%
- if /i "%DO_PYTHON%" == "on" python -m pytest tests/python
- if /i "%DO_PYTHON%" == "on" python -m nose tests/python
# mingw R package: run the R check (which includes unit tests), and also keep the built binary package
- if /i "%target%" == "rmingw" (
set _R_CHECK_CRAN_INCOMING_=FALSE&&

View File

@@ -1,11 +0,0 @@
/*!
* Copyright 2019 by Contributors
* \file build_config.h
*/
#ifndef XGBOOST_BUILD_CONFIG_H_
#define XGBOOST_BUILD_CONFIG_H_
#cmakedefine XGBOOST_MM_PREFETCH_PRESENT
#cmakedefine XGBOOST_BUILTIN_PREFETCH_PRESENT
#endif // XGBOOST_BUILD_CONFIG_H_

View File

@@ -1,8 +1,8 @@
set(ASan_LIB_NAME ASan)
find_library(ASan_LIBRARY
NAMES libasan.so libasan.so.4 libasan.so.3 libasan.so.2 libasan.so.1 libasan.so.0
PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib)
NAMES libasan.so libasan.so.4
PATHS /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(ASan DEFAULT_MSG

View File

@@ -2,7 +2,7 @@ set(LSan_LIB_NAME lsan)
find_library(LSan_LIBRARY
NAMES liblsan.so liblsan.so.0 liblsan.so.0.0.0
PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib)
PATHS /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(LSan DEFAULT_MSG

View File

@@ -2,7 +2,7 @@ set(TSan_LIB_NAME tsan)
find_library(TSan_LIBRARY
NAMES libtsan.so libtsan.so.0 libtsan.so.0.0.0
PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib)
PATHS /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(TSan DEFAULT_MSG

View File

@@ -135,7 +135,6 @@ Send a PR to add a one sentence description:)
## Awards
- [John Chambers Award](http://stat-computing.org/awards/jmc/winners.html) - 2016 Winner: XGBoost R Package, by Tong He (Simon Fraser University) and Tianqi Chen (University of Washington)
- [InfoWorlds 2019 Technology of the Year Award](https://www.infoworld.com/article/3336072/application-development/infoworlds-2019-technology-of-the-year-award-winners.html)
## Windows Binaries
Unofficial windows binaries and instructions on how to use them are hosted on [Guido Tapia's blog](http://www.picnet.com.au/blogs/guido/post/2016/09/22/xgboost-windows-x64-binaries-for-download/)

View File

@@ -62,7 +62,7 @@ test:data = "agaricus.txt.test"
We use the tree booster and logistic regression objective in our setting. This indicates that we accomplish our task using classic gradient boosting regression tree(GBRT), which is a promising method for binary classification.
The parameters shown in the example gives the most common ones that are needed to use xgboost.
If you are interested in more parameter settings, the complete parameter settings and detailed descriptions are [here](../../doc/parameter.rst). Besides putting the parameters in the configuration file, we can set them by passing them as arguments as below:
If you are interested in more parameter settings, the complete parameter settings and detailed descriptions are [here](../../doc/parameter.md). Besides putting the parameters in the configuration file, we can set them by passing them as arguments as below:
```
../../xgboost mushroom.conf max_depth=6

View File

@@ -18,7 +18,7 @@ def loadfmap( fname ):
if it.strip() == '':
continue
k , v = it.split('=')
fmap[ idx ][ v ] = len(nmap)
fmap[ idx ][ v ] = len(nmap) + 1
nmap[ len(nmap) ] = ftype+'='+k
return fmap, nmap

View File

@@ -33,9 +33,9 @@ def logregobj(preds, dtrain):
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
def evalerror(preds, dtrain):
labels = dtrain.get_label()
# return a pair metric_name, result. The metric name must not contain a colon (:) or a space
# return a pair metric_name, result. The metric name must not contain a colon (:)
# since preds are margin(before logistic transformation, cutoff at 0)
return 'my-error', float(sum(labels != (preds > 0.0))) / len(labels)
return 'error', float(sum(labels != (preds > 0.0))) / len(labels)
# training with customized objective, we can also do step by step training
# simply look at xgboost.py's implementation of train

View File

@@ -1,5 +1,5 @@
#!/bin/bash
export PYTHONPATH=$PYTHONPATH:../../python-package
export PYTHONPATH=PYTHONPATH:../../python-package
python basic_walkthrough.py
python custom_objective.py
python boost_from_prediction.py

View File

@@ -1,4 +1,4 @@
Benchmark for Otto Group Competition
Benckmark for Otto Group Competition
=========
This is a folder containing the benchmark for the [Otto Group Competition on Kaggle](http://www.kaggle.com/c/otto-group-product-classification-challenge).
@@ -20,3 +20,5 @@ devtools::install_github('tqchen/xgboost',subdir='R-package')
```
Windows users may need to install [RTools](http://cran.r-project.org/bin/windows/Rtools/) first.

View File

@@ -1,10 +1,10 @@
Demonstrating how to use XGBoost accomplish Multi-Class classification task on [UCI Dermatology dataset](https://archive.ics.uci.edu/ml/datasets/Dermatology)
Make sure you make xgboost python module in ../../python
Make sure you make make xgboost python module in ../../python
1. Run runexp.sh
```bash
./runexp.sh
```
**R version** please see the `train.R`.

View File

@@ -1,64 +0,0 @@
library(data.table)
library(xgboost)
if (!file.exists("./dermatology.data")) {
download.file(
"https://archive.ics.uci.edu/ml/machine-learning-databases/dermatology/dermatology.data",
"dermatology.data",
method = "curl"
)
}
df <- fread("dermatology.data", sep = ",", header = FALSE)
df[, `:=`(V34 = as.integer(ifelse(V34 == "?", 0L, V34)),
V35 = V35 - 1L)]
idx <- sample(nrow(df), size = round(0.7 * nrow(df)), replace = FALSE)
train <- df[idx,]
test <- df[-idx,]
train_x <- train[, 1:34]
train_y <- train[, V35]
test_x <- test[, 1:34]
test_y <- test[, V35]
xg_train <- xgb.DMatrix(data = as.matrix(train_x), label = train_y)
xg_test = xgb.DMatrix(as.matrix(test_x), label = test_y)
params <- list(
objective = 'multi:softmax',
num_class = 6,
max_depth = 6,
nthread = 4,
eta = 0.1
)
watchlist = list(train = xg_train, test = xg_test)
bst <- xgb.train(
params = params,
data = xg_train,
watchlist = watchlist,
nrounds = 5
)
pred <- predict(bst, xg_test)
error_rate <- sum(pred != test_y) / length(test_y)
print(paste("Test error using softmax =", error_rate))
# do the same thing again, but output probabilities
params$objective <- 'multi:softprob'
bst <- xgb.train(params, xg_train, nrounds = 5, watchlist)
pred_prob <- predict(bst, xg_test)
pred_mat <- matrix(pred_prob, ncol = 6, byrow = TRUE)
# validation
# rowSums(pred_mat)
pred_label <- apply(pred_mat, 1, which.max) - 1L
error_rate = sum(pred_label != test_y) / length(test_y)
print(paste("Test error using softprob =", error_rate))

View File

@@ -1,6 +1,6 @@
Learning to rank
====
XGBoost supports accomplishing ranking tasks. In ranking scenario, data are often grouped and we need the [group information file](../../doc/tutorials/input_format.rst#group-input-format) to specify ranking tasks. The model used in XGBoost for ranking is the LambdaRank, this function is not yet completed. Currently, we provide pairwise rank.
XGBoost supports accomplishing ranking tasks. In ranking scenario, data are often grouped and we need the [group information file](../../doc/input_format.md#group-input-format) to specify ranking tasks. The model used in XGBoost for ranking is the LambdaRank, this function is not yet completed. Currently, we provide pairwise rank.
### Parameters
The configuration setting is similar to the regression and binary classification setting, except user need to specify the objectives:
@@ -15,27 +15,14 @@ For more usage details please refer to the [binary classification demo](../binar
Instructions
====
The dataset for ranking demo is from LETOR04 MQ2008 fold1.
Before running the examples, you need to get the data by running:
You can use the following command to run the example:
Get the data:
```
./wgetdata.sh
```
### Command Line
Run the example:
```
./runexp.sh
```
### Python
There are two ways of doing ranking in python.
Run the example using `xgboost.train`:
```
python rank.py
```
Run the example using `XGBRanker`:
```
python rank_sklearn.py
```

View File

@@ -1,41 +0,0 @@
#!/usr/bin/python
import xgboost as xgb
from xgboost import DMatrix
from sklearn.datasets import load_svmlight_file
# This script demonstrate how to do ranking with xgboost.train
x_train, y_train = load_svmlight_file("mq2008.train")
x_valid, y_valid = load_svmlight_file("mq2008.vali")
x_test, y_test = load_svmlight_file("mq2008.test")
group_train = []
with open("mq2008.train.group", "r") as f:
data = f.readlines()
for line in data:
group_train.append(int(line.split("\n")[0]))
group_valid = []
with open("mq2008.vali.group", "r") as f:
data = f.readlines()
for line in data:
group_valid.append(int(line.split("\n")[0]))
group_test = []
with open("mq2008.test.group", "r") as f:
data = f.readlines()
for line in data:
group_test.append(int(line.split("\n")[0]))
train_dmatrix = DMatrix(x_train, y_train)
valid_dmatrix = DMatrix(x_valid, y_valid)
test_dmatrix = DMatrix(x_test)
train_dmatrix.set_group(group_train)
valid_dmatrix.set_group(group_valid)
params = {'objective': 'rank:pairwise', 'eta': 0.1, 'gamma': 1.0,
'min_child_weight': 0.1, 'max_depth': 6}
xgb_model = xgb.train(params, train_dmatrix, num_boost_round=4,
evals=[(valid_dmatrix, 'validation')])
pred = xgb_model.predict(test_dmatrix)

View File

@@ -1,35 +0,0 @@
#!/usr/bin/python
import xgboost as xgb
from sklearn.datasets import load_svmlight_file
# This script demonstrate how to do ranking with XGBRanker
x_train, y_train = load_svmlight_file("mq2008.train")
x_valid, y_valid = load_svmlight_file("mq2008.vali")
x_test, y_test = load_svmlight_file("mq2008.test")
group_train = []
with open("mq2008.train.group", "r") as f:
data = f.readlines()
for line in data:
group_train.append(int(line.split("\n")[0]))
group_valid = []
with open("mq2008.vali.group", "r") as f:
data = f.readlines()
for line in data:
group_valid.append(int(line.split("\n")[0]))
group_test = []
with open("mq2008.test.group", "r") as f:
data = f.readlines()
for line in data:
group_test.append(int(line.split("\n")[0]))
params = {'objective': 'rank:pairwise', 'learning_rate': 0.1,
'gamma': 1.0, 'min_child_weight': 0.1,
'max_depth': 6, 'n_estimators': 4}
model = xgb.sklearn.XGBRanker(**params)
model.fit(x_train, y_train, group_train,
eval_set=[(x_valid, y_valid)], eval_group=[group_valid])
pred = model.predict(x_test)

View File

@@ -1,5 +1,11 @@
#!/bin/bash
python trans_data.py train.txt mq2008.train mq2008.train.group
python trans_data.py test.txt mq2008.test mq2008.test.group
python trans_data.py vali.txt mq2008.vali mq2008.vali.group
../../xgboost mq2008.conf
../../xgboost mq2008.conf task=pred model_in=0004.model

View File

@@ -2,9 +2,3 @@
wget https://s3-us-west-2.amazonaws.com/xgboost-examples/MQ2008.rar
unrar x MQ2008.rar
mv -f MQ2008/Fold1/*.txt .
python trans_data.py train.txt mq2008.train mq2008.train.group
python trans_data.py test.txt mq2008.test mq2008.test.group
python trans_data.py vali.txt mq2008.vali mq2008.vali.group

View File

@@ -176,7 +176,7 @@ In a *sparse* matrix, cells containing `0` are not stored in memory. Therefore,
We will train decision tree model using the following parameters:
* `objective = "binary:logistic"`: we will train a binary classification model ;
* `max.depth = 2`: the trees won't be deep, because our case is very simple ;
* `max.deph = 2`: the trees won't be deep, because our case is very simple ;
* `nthread = 2`: the number of cpu threads we are going to use;
* `nrounds = 2`: there will be two passes on the data, the second one will enhance the model by further reducing the difference between ground truth and prediction.
@@ -576,8 +576,8 @@ print(class(rawVec))
bst3 <- xgb.load(rawVec)
pred3 <- predict(bst3, test$data)
# pred3 should be identical to pred
print(paste("sum(abs(pred3-pred))=", sum(abs(pred3-pred))))
# pred2 should be identical to pred
print(paste("sum(abs(pred3-pred))=", sum(abs(pred2-pred))))
```
```

View File

@@ -90,11 +90,11 @@ Building on OSX
Install with pip: simple method
--------------------------------
First, obtain ``gcc-7`` with Homebrew (https://brew.sh/) to enable multi-threading (i.e. using multiple CPU threads for training). The default Apple Clang compiler does not support OpenMP, so using the default compiler would have disabled multi-threading.
First, make sure you obtained ``gcc-5`` (newer version does not work with this method yet). Note: installation of ``gcc`` can take a while (~ 30 minutes).
.. code-block:: bash
brew install gcc@7
brew install gcc@5
Then install XGBoost with ``pip``:
@@ -102,30 +102,42 @@ Then install XGBoost with ``pip``:
pip3 install xgboost
You might need to run the command with ``--user`` flag if you run into permission errors.
You might need to run the command with ``sudo`` if you run into permission errors.
Build from the source code - advanced method
--------------------------------------------
Obtain ``gcc-7`` from Homebrew:
First, obtain ``gcc-7`` with homebrew (https://brew.sh/) if you want multi-threaded version. Clang is okay if multithreading is not required. Note: installation of ``gcc`` can take a while (~ 30 minutes).
.. code-block:: bash
brew install gcc@7
Now clone the repository:
Now, clone the repository:
.. code-block:: bash
git clone --recursive https://github.com/dmlc/xgboost
cd xgboost; cp make/config.mk ./config.mk
Create the ``build/`` directory and invoke CMake. Make sure to add ``CC=gcc-7 CXX=g++-7`` so that Homebrew GCC is selected. After invoking CMake, you can build XGBoost with ``make``:
Open ``config.mk`` and uncomment these two lines:
.. code-block:: bash
export CC = gcc
export CXX = g++
and replace these two lines as follows: (specify the GCC version)
.. code-block:: bash
export CC = gcc-7
export CXX = g++-7
Now, you may build XGBoost using the following command:
.. code-block:: bash
mkdir build
cd build
CC=gcc-7 CXX=g++-7 cmake ..
make -j4
You may now continue to `Python Package Installation`_.
@@ -161,8 +173,6 @@ To build with MinGW, type:
cp make/mingw64.mk config.mk; make -j4
See :ref:`mingw_python` for buildilng XGBoost for Python.
Compile XGBoost with Microsoft Visual Studio
--------------------------------------------
To build with Visual Studio, we will need CMake. Make sure to install a recent version of CMake. Then run the following from the root of the XGBoost directory:
@@ -194,7 +204,7 @@ From the command line on Linux starting from the XGBoost directory:
mkdir build
cd build
cmake .. -DUSE_CUDA=ON
make -j4
make -j
.. note:: Enabling multi-GPU training
@@ -204,8 +214,8 @@ From the command line on Linux starting from the XGBoost directory:
mkdir build
cd build
cmake .. -DUSE_CUDA=ON -DUSE_NCCL=ON -DNCCL_ROOT=/path/to/nccl2
make -j4
cmake .. -DUSE_CUDA=ON -DUSE_NCCL=ON
make -j
On Windows, see what options for generators you have for CMake, and choose one with ``[arch]`` replaced with Win64:
@@ -248,12 +258,10 @@ The configuration file ``config.mk`` modifies several compilation flags:
To customize, first copy ``make/config.mk`` to the project root and then modify the copy.
Alternatively, use CMake.
Python Package Installation
===========================
The Python package is located at ``python-package/``.
The python package is located at ``python-package/``.
There are several ways to install the package:
1. Install system-wide, which requires root permission:
@@ -263,7 +271,7 @@ There are several ways to install the package:
cd python-package; sudo python setup.py install
You will however need Python ``distutils`` module for this to
work. It is often part of the core Python package or it can be installed using your
work. It is often part of the core python package or it can be installed using your
package manager, e.g. in Debian use
.. code-block:: bash
@@ -274,7 +282,7 @@ package manager, e.g. in Debian use
If you recompiled XGBoost, then you need to reinstall it again to make the new library take effect.
2. Only set the environment variable ``PYTHONPATH`` to tell Python where to find
2. Only set the environment variable ``PYTHONPATH`` to tell python where to find
the library. For example, assume we cloned `xgboost` on the home directory
`~`. then we can added the following line in `~/.bashrc`.
This option is **recommended for developers** who change the code frequently. The changes will be immediately reflected once you pulled the code and rebuild the project (no need to call ``setup`` again)
@@ -296,25 +304,6 @@ package manager, e.g. in Debian use
import os
os.environ['PATH'] = os.environ['PATH'] + ';C:\\Program Files\\mingw-w64\\x86_64-5.3.0-posix-seh-rt_v4-rev0\\mingw64\\bin'
.. _mingw_python:
Building XGBoost library for Python for Windows with MinGW-w64
--------------------------------------------------------------
Windows versions of Python are built with Microsoft Visual Studio. Usually Python binary modules are built with the same compiler the interpreter is built with, raising several potential concerns.
1. VS is proprietary and commercial software. Microsoft provides a freeware "Community" edition, but its licensing terms are unsuitable for many organizations.
2. Visual Studio contains telemetry, as documented in `Microsoft Visual Studio Licensing Terms <https://visualstudio.microsoft.com/license-terms/mt736442/>`_. It `has been inserting telemetry <https://old.reddit.com/r/cpp/comments/4ibauu/visual_studio_adding_telemetry_function_calls_to/>`_ into apps for some time. In order to download VS distribution from MS servers one has to run the application containing telemetry. These facts have raised privacy and security concerns among some users and system administrators. Running software with telemetry may be against the policy of your organization.
3. g++ usually generates faster code on ``-O3``.
So you may want to build XGBoost with g++ own your own risk. This opens a can of worms, because MSVC uses Microsoft runtime and MinGW-w64 uses own runtime, and the runtimes have different incompatible memory allocators. But in fact this setup is usable if you know how to deal with it. Here is some experience.
1. The Python interpreter will crash on exit if XGBoost was used. This is usually not a big issue.
2. ``-O3`` is OK.
3. ``-mtune=native`` is also OK.
4. Don't use ``-march=native`` gcc flag. Using it causes the Python interpreter to crash if the dll was actually used.
5. You may need to provide the lib with the runtime libs. If ``mingw32/bin`` is not in ``PATH``, build a wheel (``python setup.py bdist_wheel``), open it with an archiver and put the needed dlls to the directory where ``xgboost.dll`` is situated. Then you can install the wheel with ``pip``.
R Package Installation
======================
@@ -327,13 +316,35 @@ You can install xgboost from CRAN just like any other R package:
install.packages("xgboost")
For OSX users, single-threaded version will be installed. So only one thread will be used for training. To enable use of multiple threads (and utilize capacity of multi-core CPUs), see the section :ref:`osx_multithread` to install XGBoost from source.
Or you can install it from our weekly updated drat repo:
.. code-block:: R
install.packages("drat", repos="https://cran.rstudio.com")
drat:::addRepo("dmlc")
install.packages("xgboost", repos="http://dmlc.ml/drat/", type = "source")
For OSX users, single threaded version will be installed. To install multi-threaded version,
first follow `Building on OSX`_ to get the OpenMP enabled compiler. Then
- Set the ``Makevars`` file in highest piority for R.
The point is, there are three ``Makevars`` : ``~/.R/Makevars``, ``xgboost/R-package/src/Makevars``, and ``/usr/local/Cellar/r/3.2.0/R.framework/Resources/etc/Makeconf`` (the last one obtained by running ``file.path(R.home("etc"), "Makeconf")`` in R), and ``SHLIB_OPENMP_CXXFLAGS`` is not set by default!! After trying, it seems that the first one has highest piority (surprise!).
Then inside R, run
.. code-block:: R
install.packages("drat", repos="https://cran.rstudio.com")
drat:::addRepo("dmlc")
install.packages("xgboost", repos="http://dmlc.ml/drat/", type = "source")
Installing the development version
----------------------------------
Make sure you have installed git and a recent C++ compiler supporting C++11 (e.g., g++-4.8 or higher).
On Windows, Rtools must be installed, and its bin directory has to be added to ``PATH`` during the installation.
On Windows, Rtools must be installed, and its bin directory has to be added to PATH during the installation.
And see the previous subsection for an OSX tip.
Due to the use of git-submodules, ``devtools::install_github`` can no longer be used to install the latest version of R package.
Thus, one has to run git to check out the code first:
@@ -359,33 +370,6 @@ The package could also be built and installed with cmake (and Visual C++ 2015 on
If all fails, try `Building the shared library`_ to see whether a problem is specific to R package or not.
.. _osx_multithread:
Installing R package on Mac OSX with multi-threading
----------------------------------------------------
First, obtain ``gcc-7`` with Homebrew (https://brew.sh/) to enable multi-threading (i.e. using multiple CPU threads for training). The default Apple Clang compiler does not support OpenMP, so using the default compiler would have disabled multi-threading.
.. code-block:: bash
brew install gcc@7
Now, clone the repository:
.. code-block:: bash
git clone --recursive https://github.com/dmlc/xgboost
Create the ``build/`` directory and invoke CMake with option ``R_LIB=ON``. Make sure to add ``CC=gcc-7 CXX=g++-7`` so that Homebrew GCC is selected. After invoking CMake, you can install the R package by running ``make`` and ``make install``:
.. code-block:: bash
mkdir build
cd build
CC=gcc-7 CXX=g++-7 cmake .. -DR_LIB=ON
make -j4
make install
Installing R package with GPU support
-------------------------------------
@@ -403,7 +387,7 @@ On Linux, starting from the XGBoost directory type:
When default target is used, an R package shared library would be built in the ``build`` area.
The ``install`` target, in addition, assembles the package files with this shared library under ``build/R-package``, and runs ``R CMD INSTALL``.
On Windows, CMake with Visual C++ Build Tools (or Visual Studio) has to be used to build an R package with GPU support. Rtools must also be installed (perhaps, some other MinGW distributions with ``gendef.exe`` and ``dlltool.exe`` would work, but that was not tested).
On Windows, cmake with Visual C++ Build Tools (or Visual Studio) has to be used to build an R package with GPU support. Rtools must also be installed (perhaps, some other MinGW distributions with ``gendef.exe`` and ``dlltool.exe`` would work, but that was not tested).
.. code-block:: bash

View File

@@ -41,7 +41,7 @@ sys.path.insert(0, curr_path)
# -- mock out modules
import mock
MOCK_MODULES = ['scipy', 'scipy.sparse', 'sklearn', 'pandas']
MOCK_MODULES = ['numpy', 'scipy', 'scipy.sparse', 'sklearn', 'matplotlib', 'pandas', 'graphviz']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
@@ -62,7 +62,6 @@ release = xgboost.__version__
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
'matplotlib.sphinxext.plot_directive',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
@@ -70,11 +69,6 @@ extensions = [
'breathe'
]
graphviz_output_format = 'png'
plot_formats = [('svg', 300), ('png', 100), ('hires.png', 300)]
plot_html_show_source_link = False
plot_html_show_formats = False
# Breathe extension variables
breathe_projects = {"xgboost": "doxyxml/"}
breathe_default_project = "xgboost"
@@ -156,7 +150,7 @@ extensions.append("guzzle_sphinx_theme")
# Guzzle theme options (see theme.conf for more information)
html_theme_options = {
# Set the name of the project to appear in the sidebar
"project_nav_name": "XGBoost"
"project_nav_name": "XGBoost (0.80)"
}
html_sidebars = {

View File

@@ -19,7 +19,6 @@ Everyone is more than welcome to contribute. It is a way to make the project bet
* `Documents`_
* `Testcases`_
* `Sanitizers`_
* `clang-tidy`_
* `Examples`_
* `Core Library`_
* `Python Package`_
@@ -150,14 +149,6 @@ sanitizer is not compatible with the other two sanitizers.
cmake -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak" /path/to/xgboost
By default, CMake will search regular system paths for sanitizers, you can also
supply a specified SANITIZER_PATH.
.. code-block:: bash
cmake -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak" \
-DSANITIZER_PATH=/path/to/sanitizers /path/to/xgboost
How to use sanitizers with CUDA support
=======================================
Runing XGBoost on CUDA with address sanitizer (asan) will raise memory error.
@@ -170,31 +161,6 @@ environment variable:
For details, please consult `official documentation <https://github.com/google/sanitizers/wiki>`_ for sanitizers.
**********
clang-tidy
**********
To run clang-tidy on both C++ and CUDA source code, run the following command
from the top level source tree:
.. code-black:: bash
cd /path/to/xgboost/
python3 tests/ci_build/tidy.py --gtest-path=/path/to/google-test
The script requires the full path of Google Test library via the ``--gtest-path`` argument.
Also, the script accepts two optional integer arguments, namely ``--cpp`` and ``--cuda``.
By default they are both set to 1. If you want to exclude CUDA source from
linting, use:
.. code-black:: bash
cd /path/to/xgboost/
python3 tests/ci_build/tidy.py --cuda=0
Similarly, if you want to exclude C++ source from linting:
.. code-black:: bash
cd /path/to/xgboost/
python3 tests/ci_build/tidy.py --cpp=0
********
Examples

View File

@@ -18,7 +18,7 @@ Tree construction (training) and prediction can be accelerated with CUDA-capable
Usage
=====
Specify the ``tree_method`` parameter as one of the following algorithms.
Specify the ``tree_method`` parameter as one of the following algorithms.
Algorithms
----------
@@ -31,43 +31,39 @@ Algorithms
| gpu_hist | Equivalent to the XGBoost fast histogram algorithm. Much faster and uses considerably less memory. NOTE: Will run very slowly on GPUs older than Pascal architecture. |
+--------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
Supported parameters
Supported parameters
--------------------
.. |tick| unicode:: U+2714
.. |cross| unicode:: U+2718
.. |tick| unicode:: U+2714
.. |cross| unicode:: U+2718
+--------------------------------+---------------+--------------+
| parameter | ``gpu_exact`` | ``gpu_hist`` |
+================================+===============+==============+
| ``subsample`` | |cross| | |tick| |
+--------------------------------+---------------+--------------+
| ``colsample_bytree`` | |cross| | |tick| |
+--------------------------------+---------------+--------------+
| ``colsample_bylevel`` | |cross| | |tick| |
+--------------------------------+---------------+--------------+
| ``max_bin`` | |cross| | |tick| |
+--------------------------------+---------------+--------------+
| ``gpu_id`` | |tick| | |tick| |
+--------------------------------+---------------+--------------+
| ``n_gpus`` | |cross| | |tick| |
+--------------------------------+---------------+--------------+
| ``predictor`` | |tick| | |tick| |
+--------------------------------+---------------+--------------+
| ``grow_policy`` | |cross| | |tick| |
+--------------------------------+---------------+--------------+
| ``monotone_constraints`` | |cross| | |tick| |
+--------------------------------+---------------+--------------+
| ``single_precision_histogram`` | |cross| | |tick| |
+--------------------------------+---------------+--------------+
+--------------------------+---------------+--------------+
| parameter | ``gpu_exact`` | ``gpu_hist`` |
+==========================+===============+==============+
| ``subsample`` | |cross| | |tick| |
+--------------------------+---------------+--------------+
| ``colsample_bytree`` | |cross| | |tick| |
+--------------------------+---------------+--------------+
| ``colsample_bylevel`` | |cross| | |tick| |
+--------------------------+---------------+--------------+
| ``max_bin`` | |cross| | |tick| |
+--------------------------+---------------+--------------+
| ``gpu_id`` | |tick| | |tick| |
+--------------------------+---------------+--------------+
| ``n_gpus`` | |cross| | |tick| |
+--------------------------+---------------+--------------+
| ``predictor`` | |tick| | |tick| |
+--------------------------+---------------+--------------+
| ``grow_policy`` | |cross| | |tick| |
+--------------------------+---------------+--------------+
| ``monotone_constraints`` | |cross| | |tick| |
+--------------------------+---------------+--------------+
GPU accelerated prediction is enabled by default for the above mentioned ``tree_method`` parameters but can be switched to CPU prediction by setting ``predictor`` to ``cpu_predictor``. This could be useful if you want to conserve GPU memory. Likewise when using CPU algorithms, GPU accelerated prediction can be enabled by setting ``predictor`` to ``gpu_predictor``.
The experimental parameter ``single_precision_histogram`` can be set to True to enable building histograms using single precision. This may improve speed, in particular on older architectures.
The device ordinal can be selected using the ``gpu_id`` parameter, which defaults to 0.
Multiple GPUs can be used with the ``gpu_hist`` tree method using the ``n_gpus`` parameter. which defaults to 1. If this is set to -1 all available GPUs will be used. If ``gpu_id`` is specified as non-zero, the selected gpu devices will be from ``gpu_id`` to ``gpu_id+n_gpus``, please note that ``gpu_id+n_gpus`` must be less than or equal to the number of available GPUs on your system. As with GPU vs. CPU, multi-GPU will not always be faster than a single GPU due to PCI bus bandwidth that can limit performance.
Multiple GPUs can be used with the ``gpu_hist`` tree method using the ``n_gpus`` parameter. which defaults to 1. If this is set to -1 all available GPUs will be used. If ``gpu_id`` is specified as non-zero, the gpu device order is ``mod(gpu_id + i) % n_visible_devices`` for ``i=0`` to ``n_gpus-1``. As with GPU vs. CPU, multi-GPU will not always be faster than a single GPU due to PCI bus bandwidth that can limit performance.
.. note:: Enabling multi-GPU training
@@ -82,95 +78,6 @@ The GPU algorithms currently work with CLI, Python and R packages. See :doc:`/bu
param['max_bin'] = 16
param['tree_method'] = 'gpu_hist'
Objective functions
===================
Most of the objective functions implemented in XGBoost can be run on GPU. Following table shows current support status.
.. |tick| unicode:: U+2714
.. |cross| unicode:: U+2718
+-----------------+-------------+
| Objectives | GPU support |
+-----------------+-------------+
| reg:linear | |tick| |
+-----------------+-------------+
| reg:logistic | |tick| |
+-----------------+-------------+
| binary:logistic | |tick| |
+-----------------+-------------+
| binary:logitraw | |tick| |
+-----------------+-------------+
| binary:hinge | |tick| |
+-----------------+-------------+
| count:poisson | |tick| |
+-----------------+-------------+
| reg:gamma | |tick| |
+-----------------+-------------+
| reg:tweedie | |tick| |
+-----------------+-------------+
| multi:softmax | |tick| |
+-----------------+-------------+
| multi:softprob | |tick| |
+-----------------+-------------+
| survival:cox | |cross| |
+-----------------+-------------+
| rank:pairwise | |cross| |
+-----------------+-------------+
| rank:ndcg | |cross| |
+-----------------+-------------+
| rank:map | |cross| |
+-----------------+-------------+
For multi-gpu support, objective functions also honor the ``n_gpus`` parameter,
which, by default is set to 1. To disable running objectives on GPU, just set
``n_gpus`` to 0.
Metric functions
===================
Following table shows current support status for evaluation metrics on the GPU.
.. |tick| unicode:: U+2714
.. |cross| unicode:: U+2718
+-----------------+-------------+
| Metric | GPU Support |
+=================+=============+
| rmse | |tick| |
+-----------------+-------------+
| mae | |tick| |
+-----------------+-------------+
| logloss | |tick| |
+-----------------+-------------+
| error | |tick| |
+-----------------+-------------+
| merror | |cross| |
+-----------------+-------------+
| mlogloss | |cross| |
+-----------------+-------------+
| auc | |cross| |
+-----------------+-------------+
| aucpr | |cross| |
+-----------------+-------------+
| ndcg | |cross| |
+-----------------+-------------+
| map | |cross| |
+-----------------+-------------+
| poisson-nloglik | |tick| |
+-----------------+-------------+
| gamma-nloglik | |tick| |
+-----------------+-------------+
| cox-nloglik | |cross| |
+-----------------+-------------+
| gamma-deviance | |tick| |
+-----------------+-------------+
| tweedie-nloglik | |tick| |
+-----------------+-------------+
As for objective functions, metrics honor the ``n_gpus`` parameter,
which, by default is set to 1. To disable running metrics on GPU, just set
``n_gpus`` to 0.
Benchmarks
==========
You can run benchmarks on synthetic data for binary classification:
@@ -202,16 +109,13 @@ References
`Nvidia Parallel Forall: Gradient Boosting, Decision Trees and XGBoost with CUDA <https://devblogs.nvidia.com/parallelforall/gradient-boosting-decision-trees-xgboost-cuda/>`_
Contributors
Authors
=======
Many thanks to the following contributors (alphabetical order):
* Andrey Adinets
* Jiaming Yuan
* Jonathan C. McKinney
* Matthew Jones
* Philip Cho
* Rory Mitchell
* Jonathan C. McKinney
* Shankara Rao Thejaswi Nanditale
* Vinay Deshpande
* ... and the rest of the H2O.ai and NVIDIA team.
Please report bugs to the user forum https://discuss.xgboost.ai/.

View File

@@ -58,11 +58,9 @@ For sbt, please add the repository and dependency in build.sbt as following:
If you want to use XGBoost4J-Spark, replace ``xgboost4j`` with ``xgboost4j-spark``.
.. note:: XGBoost4J-Spark requires Apache Spark 2.3+
.. note:: XGBoost4J-Spark requires Spark 2.3+
XGBoost4J-Spark now requires **Apache Spark 2.3+**. Latest versions of XGBoost4J-Spark uses facilities of `org.apache.spark.ml.param.shared` extensively to provide for a tight integration with Spark MLLIB framework, and these facilities are not fully available on earlier versions of Spark.
Also, make sure to install Spark directly from `Apache website <https://spark.apache.org/>`_. **Upstream XGBoost is not guaranteed to work with third-party distributions of Spark, such as Cloudera Spark.** Consult appropriate third parties to obtain their distribution of XGBoost.
XGBoost4J-Spark now requires Spark 2.3+. Latest versions of XGBoost4J-Spark uses facilities of `org.apache.spark.ml.param.shared` extensively to provide for a tight integration with Spark MLLIB framework, and these facilities are not fully available on earlier versions of Spark.
Installation from maven repo
============================

View File

@@ -57,21 +57,13 @@ and then refer to the snapshot dependency by adding:
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j-spark</artifactId>
<artifactId>xgboost4j</artifactId>
<version>next_version_num-SNAPSHOT</version>
</dependency>
.. note:: XGBoost4J-Spark requires Apache Spark 2.3+
.. note:: XGBoost4J-Spark requires Spark 2.3+
XGBoost4J-Spark now requires **Apache Spark 2.3+**. Latest versions of XGBoost4J-Spark uses facilities of `org.apache.spark.ml.param.shared` extensively to provide for a tight integration with Spark MLLIB framework, and these facilities are not fully available on earlier versions of Spark.
Also, make sure to install Spark directly from `Apache website <https://spark.apache.org/>`_. **Upstream XGBoost is not guaranteed to work with third-party distributions of Spark, such as Cloudera Spark.** Consult appropriate third parties to obtain their distribution of XGBoost.
Installation from maven repo
.. note:: Use of Python in XGBoost4J-Spark
By default, we use the tracker in `dmlc-core <https://github.com/dmlc/dmlc-core/tree/master/tracker>`_ to drive the training with XGBoost4J-Spark. It requires Python 2.7+. We also have an experimental Scala version of tracker which can be enabled by passing the parameter ``tracker_conf`` as ``scala``.
XGBoost4J-Spark now requires Spark 2.3+. Latest versions of XGBoost4J-Spark uses facilities of `org.apache.spark.ml.param.shared` extensively to provide for a tight integration with Spark MLLIB framework, and these facilities are not fully available on earlier versions of Spark.
Data Preparation
================
@@ -191,20 +183,6 @@ After we set XGBoostClassifier parameters and feature/label column, we can build
val xgbClassificationModel = xgbClassifier.fit(xgbInput)
Early Stopping
----------------
Early stopping is a feature to prevent the unnecessary training iterations. By specifying ``num_early_stopping_rounds`` or directly call ``setNumEarlyStoppingRounds`` over a XGBoostClassifier or XGBoostRegressor, we can define number of rounds if the evaluation metric going away from the best iteration and early stop training iterations.
In additional to ``num_early_stopping_rounds``, you also need to define ``maximize_evaluation_metrics`` or call ``setMaximizeEvaluationMetrics`` to specify whether you want to maximize or minimize the metrics in training.
For example, we need to maximize the evaluation metrics (set ``maximize_evaluation_metrics`` with true), and set ``num_early_stopping_rounds`` with 5. The evaluation metric of 10th iteration is the maximum one until now. In the following iterations, if there is no evaluation metric greater than the 10th iteration's (best one), the traning would be early stopped at 15th iteration.
Training with Evaluation Sets
----------------
You can also monitor the performance of the model during training with multiple evaluation datasets. By specifying ``eval_sets`` or call ``setEvalSets`` over a XGBoostClassifier or XGBoostRegressor, you can pass in multiple evaluation datasets typed as a Map from String to DataFrame.
Prediction
==========

View File

@@ -23,25 +23,14 @@ General Parameters
- Which booster to use. Can be ``gbtree``, ``gblinear`` or ``dart``; ``gbtree`` and ``dart`` use tree based models while ``gblinear`` uses linear functions.
* ``silent`` [default=0] [Deprecated]
* ``silent`` [default=0]
- Deprecated. Please use ``verbosity`` instead.
* ``verbosity`` [default=1]
- Verbosity of printing messages. Valid values are 0 (silent),
1 (warning), 2 (info), 3 (debug). Sometimes XGBoost tries to change
configurations based on heuristics, which is displayed as warning message.
If there's unexpected behaviour, please try to increase value of verbosity.
- 0 means printing running messages, 1 means silent mode
* ``nthread`` [default to maximum number of threads available if not set]
- Number of parallel threads used to run XGBoost
* ``disable_default_eval_metric`` [default=0]
- Flag to disable default metric. Set to >0 to disable.
* ``num_pbuffer`` [set automatically by XGBoost, no need to be set by user]
- Size of prediction buffer, normally set to number of training instances. The buffers are used to save the prediction results of last boosting step.
@@ -64,8 +53,8 @@ Parameters for Tree Booster
* ``max_depth`` [default=6]
- Maximum depth of a tree. Increasing this value will make the model more complex and more likely to overfit. 0 is only accepted in ``lossguided`` growing policy when tree_method is set as ``hist`` and it indicates no limit on depth. Beware that XGBoost aggressively consumes memory when training a deep tree.
- range: [0,∞] (0 is only accepted in ``lossguided`` growing policy when tree_method is set as ``hist``)
- Maximum depth of a tree. Increasing this value will make the model more complex and more likely to overfit. 0 indicates no limit. Note that limit is required when ``grow_policy`` is set of ``depthwise``.
- range: [0,∞]
* ``min_child_weight`` [default=1]
@@ -82,22 +71,15 @@ Parameters for Tree Booster
- Subsample ratio of the training instances. Setting it to 0.5 means that XGBoost would randomly sample half of the training data prior to growing trees. and this will prevent overfitting. Subsampling will occur once in every boosting iteration.
- range: (0,1]
* ``colsample_bytree``, ``colsample_bylevel``, ``colsample_bynode`` [default=1]
- This is a family of parameters for subsampling of columns.
- All ``colsample_by*`` parameters have a range of (0, 1], the default value of 1, and
specify the fraction of columns to be subsampled.
- ``colsample_bytree`` is the subsample ratio of columns when constructing each
tree. Subsampling occurs once for every tree constructed.
- ``colsample_bylevel`` is the subsample ratio of columns for each level. Subsampling
occurs once for every new depth level reached in a tree. Columns are subsampled from
the set of columns chosen for the current tree.
- ``colsample_bynode`` is the subsample ratio of columns for each node
(split). Subsampling occurs once every time a new split is evaluated. Columns are
subsampled from the set of columns chosen for the current level.
- ``colsample_by*`` parameters work cumulatively. For instance,
the combination ``{'colsample_bytree':0.5, 'colsample_bylevel':0.5,
'colsample_bynode':0.5}`` with 64 features will leave 4 features to choose from at
each split.
* ``colsample_bytree`` [default=1]
- Subsample ratio of columns when constructing each tree. Subsampling will occur once in every boosting iteration.
- range: (0,1]
* ``colsample_bylevel`` [default=1]
- Subsample ratio of columns for each split, in each level. Subsampling will occur each time a new split is made. This paramter has no effect when ``tree_method`` is set to ``hist``.
- range: (0,1]
* ``lambda`` [default=1, alias: ``reg_lambda``]
@@ -110,7 +92,7 @@ Parameters for Tree Booster
* ``tree_method`` string [default= ``auto``]
- The tree construction algorithm used in XGBoost. See description in the `reference paper <http://arxiv.org/abs/1603.02754>`_.
- XGBoost supports ``hist`` and ``approx`` for distributed training and only support ``approx`` for external memory version.
- Distributed and external memory version only support ``tree_method=approx``.
- Choices: ``auto``, ``exact``, ``approx``, ``hist``, ``gpu_exact``, ``gpu_hist``
- ``auto``: Use heuristic to choose the fastest method.
@@ -152,7 +134,7 @@ Parameters for Tree Booster
- ``refresh``: refreshes tree's statistics and/or leaf values based on the current data. Note that no random subsampling of data rows is performed.
- ``prune``: prunes the splits where loss < min_split_loss (or gamma).
- In a distributed setting, the implicit updater sequence value would be adjusted to ``grow_histmaker,prune`` by default, and you can set ``tree_method`` as ``hist`` to use ``grow_histmaker``.
- In a distributed setting, the implicit updater sequence value would be adjusted to ``grow_histmaker,prune``.
* ``refresh_leaf`` [default=1]
@@ -170,7 +152,7 @@ Parameters for Tree Booster
- Controls a way new nodes are added to the tree.
- Currently supported only if ``tree_method`` is set to ``hist``.
- Choices: ``depthwise``, ``lossguide``
- Choices: ``depthwise``, ```lossguide``
- ``depthwise``: split at nodes closest to the root.
- ``lossguide``: split at nodes with highest loss change.
@@ -192,10 +174,6 @@ Parameters for Tree Booster
- ``cpu_predictor``: Multicore CPU prediction algorithm.
- ``gpu_predictor``: Prediction using GPU. Default when ``tree_method`` is ``gpu_exact`` or ``gpu_hist``.
* ``num_parallel_tree``, [default=1]
- Number of parallel trees constructed during each iteration. This
option is used to support boosted random forest
Additional parameters for Dart Booster (``booster=dart``)
=========================================================
@@ -263,22 +241,8 @@ Parameters for Linear Booster (``booster=gblinear``)
- Choice of algorithm to fit linear model
- ``shotgun``: Parallel coordinate descent algorithm based on shotgun algorithm. Uses 'hogwild' parallelism and therefore produces a nondeterministic solution on each run.
- ``coord_descent``: Ordinary coordinate descent algorithm. Also multithreaded but still produces a deterministic solution.
* ``feature_selector`` [default= ``cyclic``]
- Feature selection and ordering method
* ``cyclic``: Deterministic selection by cycling through features one at a time.
* ``shuffle``: Similar to ``cyclic`` but with random feature shuffling prior to each update.
* ``random``: A random (with replacement) coordinate selector.
* ``greedy``: Select coordinate with the greatest gradient magnitude. It has ``O(num_feature^2)`` complexity. It is fully deterministic. It allows restricting the selection to ``top_k`` features per group with the largest magnitude of univariate weight change, by setting the ``top_k`` parameter. Doing so would reduce the complexity to ``O(num_feature*top_k)``.
* ``thrifty``: Thrifty, approximately-greedy feature selector. Prior to cyclic updates, reorders features in descending magnitude of their univariate weight changes. This operation is multithreaded and is a linear complexity approximation of the quadratic greedy selection. It allows restricting the selection to ``top_k`` features per group with the largest magnitude of univariate weight change, by setting the ``top_k`` parameter.
* ``top_k`` [default=0]
- The number of top features to select in ``greedy`` and ``thrifty`` feature selector. The value of 0 means using all the features.
- ``shotgun``: Parallel coordinate descent algorithm based on shotgun algorithm. Uses 'hogwild' parallelism and therefore produces a nondeterministic solution on each run.
- ``coord_descent``: Ordinary coordinate descent algorithm. Also multithreaded but still produces a deterministic solution.
Parameters for Tweedie Regression (``objective=reg:tweedie``)
=============================================================
@@ -301,6 +265,9 @@ Specify the learning task and the corresponding learning objective. The objectiv
- ``binary:logistic``: logistic regression for binary classification, output probability
- ``binary:logitraw``: logistic regression for binary classification, output score before logistic transformation
- ``binary:hinge``: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
- ``gpu:reg:linear``, ``gpu:reg:logistic``, ``gpu:binary:logistic``, ``gpu:binary:logitraw``: versions
of the corresponding objective functions evaluated on the GPU; note that like the GPU histogram algorithm,
they can only be used when the entire training session uses the same dataset
- ``count:poisson`` --poisson regression for count data, output mean of poisson distribution
- ``max_delta_step`` is set to 0.7 by default in poisson regression (used to safeguard optimization)
@@ -309,9 +276,7 @@ Specify the learning task and the corresponding learning objective. The objectiv
Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional hazard function ``h(t) = h0(t) * HR``).
- ``multi:softmax``: set XGBoost to do multiclass classification using the softmax objective, you also need to set num_class(number of classes)
- ``multi:softprob``: same as softmax, but output a vector of ``ndata * nclass``, which can be further reshaped to ``ndata * nclass`` matrix. The result contains predicted probability of each data point belonging to each class.
- ``rank:pairwise``: Use LambdaMART to perform pairwise ranking where the pairwise loss is minimized
- ``rank:ndcg``: Use LambdaMART to perform list-wise ranking where `Normalized Discounted Cumulative Gain (NDCG) <http://en.wikipedia.org/wiki/NDCG>`_ is maximized
- ``rank:map``: Use LambdaMART to perform list-wise ranking where `Mean Average Precision (MAP) <http://en.wikipedia.org/wiki/Mean_average_precision#Mean_average_precision>`_ is maximized
- ``rank:pairwise``: set XGBoost to do ranking task by minimizing the pairwise loss
- ``reg:gamma``: gamma regression with log-link. Output is a mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be `gamma-distributed <https://en.wikipedia.org/wiki/Gamma_distribution#Applications>`_.
- ``reg:tweedie``: Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be `Tweedie-distributed <https://en.wikipedia.org/wiki/Tweedie_distribution#Applications>`_.
@@ -334,9 +299,8 @@ Specify the learning task and the corresponding learning objective. The objectiv
- ``merror``: Multiclass classification error rate. It is calculated as ``#(wrong cases)/#(all cases)``.
- ``mlogloss``: `Multiclass logloss <http://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html>`_.
- ``auc``: `Area under the curve <http://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_curve>`_
- ``aucpr``: `Area under the PR curve <https://en.wikipedia.org/wiki/Precision_and_recall>`_
- ``ndcg``: `Normalized Discounted Cumulative Gain <http://en.wikipedia.org/wiki/NDCG>`_
- ``map``: `Mean Average Precision <http://en.wikipedia.org/wiki/Mean_average_precision#Mean_average_precision>`_
- ``map``: `Mean average precision <http://en.wikipedia.org/wiki/Mean_average_precision#Mean_average_precision>`_
- ``ndcg@n``, ``map@n``: 'n' can be assigned as an integer to cut off the top positions in the lists for evaluation.
- ``ndcg-``, ``map-``, ``ndcg@n-``, ``map@n-``: In XGBoost, NDCG and MAP will evaluate the score of a list without any positive samples as 1. By adding "-" in the evaluation metric XGBoost will evaluate these score as 0 to be consistent under some conditions.
- ``poisson-nloglik``: negative log-likelihood for Poisson regression

View File

@@ -39,10 +39,6 @@ Scikit-Learn API
:members:
:inherited-members:
:show-inheritance:
.. autoclass:: xgboost.XGBRanker
:members:
:inherited-members:
:show-inheritance:
Plotting API
------------
@@ -53,15 +49,3 @@ Plotting API
.. autofunction:: xgboost.plot_tree
.. autofunction:: xgboost.to_graphviz
.. _callback_api:
Callback API
------------
.. autofunction:: xgboost.callback.print_evaluation
.. autofunction:: xgboost.callback.record_evaluation
.. autofunction:: xgboost.callback.reset_learning_rate
.. autofunction:: xgboost.callback.early_stop

View File

@@ -48,15 +48,9 @@ The data is stored in a :py:class:`DMatrix <xgboost.DMatrix>` object.
dtrain = xgb.DMatrix('train.csv?format=csv&label_column=0')
dtest = xgb.DMatrix('test.csv?format=csv&label_column=0')
.. note:: Categorical features not supported
Note that XGBoost does not support categorical features; if your data contains
categorical features, load it as a NumPy array first and then perform
`one-hot encoding <http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html>`_.
.. note:: Use Pandas to load CSV files with headers
Currently, the DMLC data parser cannot parse CSV files with headers. Use Pandas (see below) to read CSV files with headers.
(Note that XGBoost does not support categorical features; if your data contains
categorical features, load it as a NumPy array first and then perform
`one-hot encoding <http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html>`_.)
* To load a NumPy array into :py:class:`DMatrix <xgboost.DMatrix>`:
@@ -101,10 +95,6 @@ The data is stored in a :py:class:`DMatrix <xgboost.DMatrix>` object.
w = np.random.rand(5, 1)
dtrain = xgb.DMatrix(data, label=label, missing=-999.0, weight=w)
When performing ranking tasks, the number of weights should be equal
to number of groups.
Setting Parameters
------------------
XGBoost can use either a list of pairs or a dictionary to set :doc:`parameters </parameter>`. For instance:
@@ -165,10 +155,6 @@ A saved model can be loaded as follows:
bst = xgb.Booster({'nthread': 4}) # init model
bst.load_model('model.bin') # load data
Methods including `update` and `boost` from `xgboost.Booster` are designed for
internal usage only. The wrapper function `xgboost.train` does some
pre-configuration including setting up caches and some other parameters.
Early Stopping
--------------
If you have a validation set, you can use early stopping to find the optimal number of boosting rounds.
@@ -223,3 +209,4 @@ When you use ``IPython``, you can use the :py:meth:`xgboost.to_graphviz` functio
.. code-block:: python
xgb.to_graphviz(bst, num_trees=2)

View File

@@ -3,6 +3,3 @@ mock
guzzle_sphinx_theme
breathe
sh>=1.12.14
matplotlib>=2.1
graphviz
numpy

View File

@@ -1,8 +1,216 @@
###############################
Distributed XGBoost YARN on AWS
###############################
[This page is under construction.]
This is a step-by-step tutorial on how to setup and run distributed `XGBoost <https://github.com/dmlc/xgboost>`_
on an AWS EC2 cluster. Distributed XGBoost runs on various platforms such as MPI, SGE and Hadoop YARN.
In this tutorial, we use YARN as an example since this is a widely used solution for distributed computing.
.. note:: XGBoost with Spark
If you are preprocessing training data with Spark, consider using :doc:`XGBoost4J-Spark </jvm/xgboost4j_spark_tutorial>`.
************
Prerequisite
************
We need to get a `AWS key-pair <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html>`_
to access the AWS services. Let us assume that we are using a key ``mykey`` and the corresponding permission file ``mypem.pem``.
We also need `AWS credentials <https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html>`_,
which includes an ``ACCESS_KEY_ID`` and a ``SECRET_ACCESS_KEY``.
Finally, we will need a S3 bucket to host the data and the model, ``s3://mybucket/``
***************************
Setup a Hadoop YARN Cluster
***************************
This sections shows how to start a Hadoop YARN cluster from scratch.
You can skip this step if you have already have one.
We will be using `yarn-ec2 <https://github.com/tqchen/yarn-ec2>`_ to start the cluster.
We can first clone the yarn-ec2 script by the following command.
.. code-block:: bash
git clone https://github.com/tqchen/yarn-ec2
To use the script, we must set the environment variables ``AWS_ACCESS_KEY_ID`` and
``AWS_SECRET_ACCESS_KEY`` properly. This can be done by adding the following two lines in
``~/.bashrc`` (replacing the strings with the correct ones)
.. code-block:: bash
export AWS_ACCESS_KEY_ID=[your access ID]
export AWS_SECRET_ACCESS_KEY=[your secret access key]
Now we can launch a master machine of the cluster from EC2:
.. code-block:: bash
./yarn-ec2 -k mykey -i mypem.pem launch xgboost
Wait a few mininutes till the master machine gets up.
After the master machine gets up, we can query the public DNS of the master machine using the following command.
.. code-block:: bash
./yarn-ec2 -k mykey -i mypem.pem get-master xgboost
It will show the public DNS of the master machine like ``ec2-xx-xx-xx.us-west-2.compute.amazonaws.com``
Now we can open the browser, and type (replace the DNS with the master DNS)
.. code-block:: none
ec2-xx-xx-xx.us-west-2.compute.amazonaws.com:8088
This will show the job tracker of the YARN cluster. Note that we may have to wait a few minutes before the master finishes bootstrapping and starts the
job tracker.
After the master machine gets up, we can freely add more slave machines to the cluster.
The following command add m3.xlarge instances to the cluster.
.. code-block:: bash
./yarn-ec2 -k mykey -i mypem.pem -t m3.xlarge -s 2 addslave xgboost
We can also choose to add two spot instances
.. code-block:: bash
./yarn-ec2 -k mykey -i mypem.pem -t m3.xlarge -s 2 addspot xgboost
The slave machines will start up, bootstrap and report to the master.
You can check if the slave machines are connected by clicking on the Nodes link on the job tracker.
Or simply type the following URL (replace DNS ith the master DNS)
.. code-block:: none
ec2-xx-xx-xx.us-west-2.compute.amazonaws.com:8088/cluster/nodes
One thing we should note is that not all the links in the job tracker work.
This is due to that many of them use the private IP of AWS, which can only be accessed by EC2.
We can use ssh proxy to access these packages.
Now that we have set up a cluster with one master and two slaves, we are ready to run the experiment.
*********************
Build XGBoost with S3
*********************
We can log into the master machine by the following command.
.. code-block:: bash
./yarn-ec2 -k mykey -i mypem.pem login xgboost
We will be using S3 to host the data and the result model, so the data won't get lost after the cluster shutdown.
To do so, we will need to build XGBoost with S3 support. The only thing we need to do is to set ``USE_S3``
variable to be true. This can be achieved by the following command.
.. code-block:: bash
git clone --recursive https://github.com/dmlc/xgboost
cd xgboost
cp make/config.mk config.mk
echo "USE_S3=1" >> config.mk
make -j4
Now we have built the XGBoost with S3 support. You can also enable HDFS support if you plan to store data on HDFS by turning on ``USE_HDFS`` option.
XGBoost also relies on the environment variable to access S3, so you will need to add the following two lines to ``~/.bashrc`` (replacing the strings with the correct ones)
on the master machine as well.
.. code-block:: bash
export AWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE
export AWS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
export BUCKET=mybucket
*******************
Host the Data on S3
*******************
In this example, we will copy the example dataset in XGBoost to the S3 bucket as input.
In normal usecases, the dataset is usually created from existing distributed processing pipeline.
We can use `s3cmd <http://s3tools.org/s3cmd>`_ to copy the data into mybucket (replace ``${BUCKET}`` with the real bucket name).
.. code-block:: bash
cd xgboost
s3cmd put demo/data/agaricus.txt.train s3://${BUCKET}/xgb-demo/train/
s3cmd put demo/data/agaricus.txt.test s3://${BUCKET}/xgb-demo/test/
***************
Submit the Jobs
***************
Now everything is ready, we can submit the XGBoost distributed job to the YARN cluster.
We will use the `dmlc-submit <https://github.com/dmlc/dmlc-core/tree/master/tracker>`_ script to submit the job.
Now we can run the following script in the distributed training folder (replace ``${BUCKET}`` with the real bucket name)
.. code-block:: bash
cd xgboost/demo/distributed-training
# Use dmlc-submit to submit the job.
../../dmlc-core/tracker/dmlc-submit --cluster=yarn --num-workers=2 --worker-cores=2\
../../xgboost mushroom.aws.conf nthread=2\
data=s3://${BUCKET}/xgb-demo/train\
eval[test]=s3://${BUCKET}/xgb-demo/test\
model_dir=s3://${BUCKET}/xgb-demo/model
All the configurations such as ``data`` and ``model_dir`` can also be directly written into the configuration file.
Note that we only specified the folder path to the file, instead of the file name.
XGBoost will read in all the files in that folder as the training and evaluation data.
In this command, we are using two workers, and each worker uses two running threads.
XGBoost can benefit from using multiple cores in each worker.
A common choice of working cores can range from 4 to 8.
The trained model will be saved into the specified model folder. You can browse the model folder.
.. code-block:: bash
s3cmd ls s3://${BUCKET}/xgb-demo/model/
The following is an example output from distributed training.
.. code-block:: none
16/02/26 05:41:59 INFO dmlc.Client: jobname=DMLC[nworker=2]:xgboost,username=ubuntu
16/02/26 05:41:59 INFO dmlc.Client: Submitting application application_1456461717456_0015
16/02/26 05:41:59 INFO impl.YarnClientImpl: Submitted application application_1456461717456_0015
2016-02-26 05:42:05,230 INFO @tracker All of 2 nodes getting started
2016-02-26 05:42:14,027 INFO [05:42:14] [0] test-error:0.016139 train-error:0.014433
2016-02-26 05:42:14,186 INFO [05:42:14] [1] test-error:0.000000 train-error:0.001228
2016-02-26 05:42:14,947 INFO @tracker All nodes finishes job
2016-02-26 05:42:14,948 INFO @tracker 9.71754479408 secs between node start and job finish
Application application_1456461717456_0015 finished with state FINISHED at 1456465335961
*****************
Analyze the Model
*****************
After the model is trained, we can analyse the learnt model and use it for future prediction tasks.
XGBoost is a portable framework, meaning the models in all platforms are *exchangeable*.
This means we can load the trained model in python/R/Julia and take benefit of data science pipelines
in these languages to do model analysis and prediction.
For example, you can use `this IPython notebook <https://github.com/dmlc/xgboost/tree/master/demo/distributed-training/plot_model.ipynb>`_
to plot feature importance and visualize the learnt model.
***************
Troubleshooting
***************
If you encounter a problem, the best way might be to use the following command
to get logs of stdout and stderr of the containers and check what causes the problem.
.. code-block:: bash
yarn logs -applicationId yourAppId
*****************
Future Directions
*****************
You have learned to use distributed XGBoost on YARN in this tutorial.
XGBoost is a portable and scalable framework for gradient boosting.
You can check out more examples and resources in the `resources page <https://github.com/dmlc/xgboost/blob/master/demo/README.md>`_.
The project goal is to make the best scalable machine learning solution available to all platforms.
The API is designed to be able to portable, and the same code can also run on other platforms such as MPI and SGE.
XGBoost is actively evolving and we are working on even more exciting features
such as distributed XGBoost python/R package.

View File

@@ -1,177 +0,0 @@
###############################
Feature Interaction Constraints
###############################
The decision tree is a powerful tool to discover interaction among independent
variables (features). Variables that appear together in a traversal path
are interacting with one another, since the condition of a child node is
predicated on the condition of the parent node. For example, the highlighted
red path in the diagram below contains three variables: :math:`x_1`, :math:`x_7`,
and :math:`x_{10}`, so the highlighted prediction (at the highlighted leaf node)
is the product of interaction between :math:`x_1`, :math:`x_7`, and
:math:`x_{10}`.
.. plot::
:nofigs:
from graphviz import Source
source = r"""
digraph feature_interaction_illustration1 {
graph [fontname = "helvetica"];
node [fontname = "helvetica"];
edge [fontname = "helvetica"];
0 [label=<x<SUB><FONT POINT-SIZE="11">10</FONT></SUB> &lt; -1.5 ?>, shape=box, color=red, fontcolor=red];
1 [label=<x<SUB><FONT POINT-SIZE="11">2</FONT></SUB> &lt; 2 ?>, shape=box];
2 [label=<x<SUB><FONT POINT-SIZE="11">7</FONT></SUB> &lt; 0.3 ?>, shape=box, color=red, fontcolor=red];
3 [label="...", shape=none];
4 [label="...", shape=none];
5 [label=<x<SUB><FONT POINT-SIZE="11">1</FONT></SUB> &lt; 0.5 ?>, shape=box, color=red, fontcolor=red];
6 [label="...", shape=none];
7 [label="...", shape=none];
8 [label="Predict +1.3", color=red, fontcolor=red];
0 -> 1 [labeldistance=2.0, labelangle=45, headlabel="Yes/Missing "];
0 -> 2 [labeldistance=2.0, labelangle=-45,
headlabel="No", color=red, fontcolor=red];
1 -> 3 [labeldistance=2.0, labelangle=45, headlabel="Yes"];
1 -> 4 [labeldistance=2.0, labelangle=-45, headlabel=" No/Missing"];
2 -> 5 [labeldistance=2.0, labelangle=-45, headlabel="Yes",
color=red, fontcolor=red];
2 -> 6 [labeldistance=2.0, labelangle=-45, headlabel=" No/Missing"];
5 -> 7;
5 -> 8 [color=red];
}
"""
Source(source, format='png').render('../_static/feature_interaction_illustration1', view=False)
Source(source, format='svg').render('../_static/feature_interaction_illustration1', view=False)
.. raw:: html
<p>
<img src="../_static/feature_interaction_illustration1.svg"
onerror="this.src='../_static/feature_interaction_illustration1.png'; this.onerror=null;">
</p>
When the tree depth is larger than one, many variables interact on
the sole basis of minimizing training loss, and the resulting decision tree may
capture a spurious relationship (noise) rather than a legitimate relationship
that generalizes across different datasets. **Feature interaction constraints**
allow users to decide which variables are allowed to interact and which are not.
Potential benefits include:
* Better predictive performance from focusing on interactions that work --
whether through domain specific knowledge or algorithms that rank interactions
* Less noise in predictions; better generalization
* More control to the user on what the model can fit. For example, the user may
want to exclude some interactions even if they perform well due to regulatory
constraints
****************
A Simple Example
****************
Feature interaction constraints are expressed in terms of groups of variables
that are allowed to interact. For example, the constraint
``[0, 1]`` indicates that variables :math:`x_0` and :math:`x_1` are allowed to
interact with each other but with no other variable. Similarly, ``[2, 3, 4]``
indicates that :math:`x_2`, :math:`x_3`, and :math:`x_4` are allowed to
interact with one another but with no other variable. A set of feature
interaction constraints is expressed as a nested list, e.g.
``[[0, 1], [2, 3, 4]]``, where each inner list is a group of indices of features
that are allowed to interact with each other.
In the following diagram, the left decision tree is in violation of the first
constraint (``[0, 1]``), whereas the right decision tree complies with both the
first and second constraints (``[0, 1]``, ``[2, 3, 4]``).
.. plot::
:nofigs:
from graphviz import Source
source = r"""
digraph feature_interaction_illustration2 {
graph [fontname = "helvetica"];
node [fontname = "helvetica"];
edge [fontname = "helvetica"];
0 [label=<x<SUB><FONT POINT-SIZE="11">0</FONT></SUB> &lt; 5.0 ?>, shape=box];
1 [label=<x<SUB><FONT POINT-SIZE="11">2</FONT></SUB> &lt; -3.0 ?>, shape=box];
2 [label="+0.6"];
3 [label="-0.4"];
4 [label="+1.2"];
0 -> 1 [labeldistance=2.0, labelangle=45, headlabel="Yes/Missing "];
0 -> 2 [labeldistance=2.0, labelangle=-45, headlabel="No"];
1 -> 3 [labeldistance=2.0, labelangle=45, headlabel="Yes"];
1 -> 4 [labeldistance=2.0, labelangle=-45, headlabel=" No/Missing"];
}
"""
Source(source, format='png').render('../_static/feature_interaction_illustration2', view=False)
Source(source, format='svg').render('../_static/feature_interaction_illustration2', view=False)
.. plot::
:nofigs:
from graphviz import Source
source = r"""
digraph feature_interaction_illustration3 {
graph [fontname = "helvetica"];
node [fontname = "helvetica"];
edge [fontname = "helvetica"];
0 [label=<x<SUB><FONT POINT-SIZE="11">3</FONT></SUB> &lt; 2.5 ?>, shape=box];
1 [label="+1.6"];
2 [label=<x<SUB><FONT POINT-SIZE="11">2</FONT></SUB> &lt; -1.2 ?>, shape=box];
3 [label="+0.1"];
4 [label="-0.3"];
0 -> 1 [labeldistance=2.0, labelangle=45, headlabel="Yes"];
0 -> 2 [labeldistance=2.0, labelangle=-45, headlabel=" No/Missing"];
2 -> 3 [labeldistance=2.0, labelangle=45, headlabel="Yes/Missing "];
2 -> 4 [labeldistance=2.0, labelangle=-45, headlabel="No"];
}
"""
Source(source, format='png').render('../_static/feature_interaction_illustration3', view=False)
Source(source, format='svg').render('../_static/feature_interaction_illustration3', view=False)
.. raw:: html
<p>
<img src="../_static/feature_interaction_illustration2.svg"
onerror="this.src='../_static/feature_interaction_illustration2.png'; this.onerror=null;">
<img src="../_static/feature_interaction_illustration3.svg"
onerror="this.src='../_static/feature_interaction_illustration3.png'; this.onerror=null;">
</p>
****************************************************
Enforcing Feature Interaction Constraints in XGBoost
****************************************************
It is very simple to enforce feature interaction constraints in XGBoost. Here we will
give an example using Python, but the same general idea generalizes to other
platforms.
Suppose the following code fits your model without feature interaction constraints:
.. code-block:: python
model_no_constraints = xgb.train(params, dtrain,
num_boost_round = 1000, evals = evallist,
early_stopping_rounds = 10)
Then fitting with feature interaction constraints only requires adding a single
parameter:
.. code-block:: python
params_constrained = params.copy()
# Use nested list to define feature interaction constraints
params_constrained['interaction_constraints'] = '[[0, 2], [1, 3, 4], [5, 6]]'
# Features 0 and 2 are allowed to interact with each other but with no other feature
# Features 1, 3, 4 are allowed to interact with one another but with no other feature
# Features 5 and 6 are allowed to interact with each other but with no other feature
model_with_constraints = xgb.train(params_constrained, dtrain,
num_boost_round = 1000, evals = evallist,
early_stopping_rounds = 10)
**Choice of tree construction algorithm**. To use feature interaction
constraints, be sure to set the ``tree_method`` parameter to either ``exact``
or ``hist``. Currently, GPU algorithms (``gpu_hist``, ``gpu_exact``) do not
support feature interaction constraints.

View File

@@ -14,7 +14,6 @@ See `Awesome XGBoost <https://github.com/dmlc/xgboost/tree/master/demo>`_ for mo
Distributed XGBoost with XGBoost4J-Spark <https://xgboost.readthedocs.io/en/latest/jvm/xgboost4j_spark_tutorial.html>
dart
monotonic
feature_interaction_constraint
input_format
param_tuning
external_memory

View File

@@ -82,7 +82,7 @@ Some other examples:
- ``(1,0)``: An increasing constraint on the first predictor and no constraint on the second.
- ``(0,-1)``: No constraint on the first predictor and a decreasing constraint on the second.
**Choice of tree construction algorithm**. To use monotonic constraints, be
**Choise of tree construction algorithm**. To use monotonic constraints, be
sure to set the ``tree_method`` parameter to one of ``exact``, ``hist``, and
``gpu_hist``.

View File

@@ -16,7 +16,7 @@
*/
#ifndef XGBOOST_STRICT_R_MODE
#define XGBOOST_STRICT_R_MODE 0
#endif // XGBOOST_STRICT_R_MODE
#endif
/*!
* \brief Whether always log console message with time.
@@ -26,21 +26,21 @@
*/
#ifndef XGBOOST_LOG_WITH_TIME
#define XGBOOST_LOG_WITH_TIME 1
#endif // XGBOOST_LOG_WITH_TIME
#endif
/*!
* \brief Whether customize the logger outputs.
*/
#ifndef XGBOOST_CUSTOMIZE_LOGGER
#define XGBOOST_CUSTOMIZE_LOGGER XGBOOST_STRICT_R_MODE
#endif // XGBOOST_CUSTOMIZE_LOGGER
#endif
/*!
* \brief Whether to customize global PRNG.
*/
#ifndef XGBOOST_CUSTOMIZE_GLOBAL_PRNG
#define XGBOOST_CUSTOMIZE_GLOBAL_PRNG XGBOOST_STRICT_R_MODE
#endif // XGBOOST_CUSTOMIZE_GLOBAL_PRNG
#endif
/*!
* \brief Check if alignas(*) keyword is supported. (g++ 4.8 or higher)
@@ -49,7 +49,7 @@
#define XGBOOST_ALIGNAS(X) alignas(X)
#else
#define XGBOOST_ALIGNAS(X)
#endif // defined(__GNUC__) && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ > 4)
#endif
#if defined(__GNUC__) && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ > 4) && \
!defined(__CUDACC__)
@@ -64,7 +64,7 @@
#else
#define XGBOOST_PARALLEL_SORT(X, Y, Z) std::sort((X), (Y), (Z))
#define XGBOOST_PARALLEL_STABLE_SORT(X, Y, Z) std::stable_sort((X), (Y), (Z))
#endif // GLIBC VERSION
#endif
/*!
* \brief Tag function as usable by device
@@ -73,7 +73,7 @@
#define XGBOOST_DEVICE __host__ __device__
#else
#define XGBOOST_DEVICE
#endif // defined (__CUDA__) || defined(__NVCC__)
#endif
/*! \brief namespace of xgboost*/
namespace xgboost {
@@ -215,11 +215,7 @@ using bst_omp_uint = dmlc::omp_uint; // NOLINT
#if __GNUC__ == 4 && __GNUC_MINOR__ < 8
#define override
#define final
#endif // __GNUC__ == 4 && __GNUC_MINOR__ < 8
#endif // DMLC_USE_CXX11 && defined(__GNUC__) && !defined(__clang_version__)
#endif
#endif
} // namespace xgboost
/* Always keep this #include at the bottom of xgboost/base.h */
#include <xgboost/build_config.h>
#endif // XGBOOST_BASE_H_

View File

@@ -1,18 +0,0 @@
/*!
* Copyright 2019 by Contributors
* \file build_config.h
*/
#ifndef XGBOOST_BUILD_CONFIG_H_
#define XGBOOST_BUILD_CONFIG_H_
/* default logic for software pre-fetching */
#if (defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_AMD64))) || defined(__INTEL_COMPILER)
// Enable _mm_prefetch for Intel compiler and MSVC+x86
#define XGBOOST_MM_PREFETCH_PRESENT
#define XGBOOST_BUILTIN_PREFETCH_PRESENT
#elif defined(__GNUC__)
// Enable __builtin_prefetch for GCC
#define XGBOOST_BUILTIN_PREFETCH_PRESENT
#endif // GUARDS
#endif // XGBOOST_BUILD_CONFIG_H_

View File

@@ -10,12 +10,11 @@
#ifdef __cplusplus
#define XGB_EXTERN_C extern "C"
#include <cstdio>
#include <cstdint>
#else
#define XGB_EXTERN_C
#include <stdio.h>
#include <stdint.h>
#endif // __cplusplus
#endif
// XGBoost C API will include APIs in Rabit C API
#include <rabit/c_api.h>
@@ -24,7 +23,7 @@
#define XGB_DLL XGB_EXTERN_C __declspec(dllexport)
#else
#define XGB_DLL XGB_EXTERN_C
#endif // defined(_MSC_VER) || defined(_WIN32)
#endif
// manually define unsigned long
typedef uint64_t bst_ulong; // NOLINT(*)
@@ -50,7 +49,7 @@ typedef struct { // NOLINT(*)
long* offset; // NOLINT(*)
#else
int64_t* offset; // NOLINT(*)
#endif // __APPLE__
#endif
/*! \brief labels of each instance */
float* label;
/*! \brief weight of each instance, can be NULL */
@@ -563,7 +562,7 @@ XGB_DLL int XGBoosterGetAttr(BoosterHandle handle,
*
* \param handle handle
* \param key The key of the attribute.
* \param value The value to be saved.
* \param value The value to be saved.
* If nullptr, the attribute would be deleted.
* \return 0 when success, -1 when failure happens
*/

View File

@@ -9,18 +9,12 @@
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <rabit/rabit.h>
#include <cstring>
#include <memory>
#include <numeric>
#include <algorithm>
#include <string>
#include <vector>
#include "./base.h"
#include "../../src/common/span.h"
#include "../../src/common/group_data.h"
#include "../../src/common/host_device_vector.h"
namespace xgboost {
// forward declare learner.
@@ -46,7 +40,7 @@ class MetaInfo {
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0};
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_;
std::vector<bst_float> labels_;
/*!
* \brief specified root index of each instance,
* can be used for multi task setting
@@ -58,7 +52,7 @@ class MetaInfo {
*/
std::vector<bst_uint> group_ptr_;
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_;
std::vector<bst_float> weights_;
/*! \brief session-id of each instance, optional */
std::vector<uint64_t> qids_;
/*!
@@ -66,7 +60,7 @@ class MetaInfo {
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
HostDeviceVector<bst_float> base_margin_;
std::vector<bst_float> base_margin_;
/*! \brief version flag, used to check version of this info */
static const int kVersion = 2;
/*! \brief version that introduced qid field */
@@ -79,7 +73,7 @@ class MetaInfo {
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
return weights_.size() != 0 ? weights_[i] : 1.0f;
}
/*!
* \brief Get the root index of i-th instance.
@@ -91,12 +85,12 @@ class MetaInfo {
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
if (label_order_cache_.size() == labels_.size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
label_order_cache_.resize(labels_.size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
const auto l = labels_;
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
@@ -139,7 +133,7 @@ struct Entry {
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
* \param fvalue THe feature value.
*/
Entry(bst_uint index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
@@ -152,34 +146,33 @@ struct Entry {
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
* \brief in-memory storage unit of sparse batch
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<size_t> offset;
std::vector<size_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
std::vector<Entry> data;
size_t base_rowid;
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
struct Inst {
/*! \brief pointer to the elements*/
const Entry *data{nullptr};
/*! \brief length of the instance */
bst_uint length{0};
/*! \brief constructor */
Inst() = default;
Inst(const Entry *data, bst_uint length) : data(data), length(length) {}
/*! \brief get i-th pair in the sparse vector*/
inline const Entry& operator[](size_t i) const {
return data[i];
}
};
/*! \brief get i-th row from the batch */
inline Inst operator[](size_t i) const {
const auto& data_vec = data.HostVector();
const auto& offset_vec = offset.HostVector();
size_t size;
// in distributed mode, some partitions may not get any instance for a feature. Therefore
// we should set the size as zero
if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) {
size = 0;
} else {
size = offset_vec[i + 1] - offset_vec[i];
}
return {data_vec.data() + offset_vec[i],
static_cast<Inst::index_type>(size)};
return {data.data() + offset[i], static_cast<bst_uint>(offset[i + 1] - offset[i])};
}
/*! \brief constructor */
@@ -188,153 +181,72 @@ class SparsePage {
}
/*! \return number of instance in the page */
inline size_t Size() const {
return offset.Size() - 1;
return offset.size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
return offset.size() * sizeof(size_t) + data.size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
SparsePage GetTranspose(int num_columns) const {
SparsePage transpose;
common::ParallelGroupBuilder<Entry> builder(&transpose.offset.HostVector(),
&transpose.data.HostVector());
const int nthread = omp_get_max_threads();
builder.InitBudget(num_columns, nthread);
long batch_size = static_cast<long>(this->Size()); // NOLINT(*)
#pragma omp parallel for schedule(static)
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
int tid = omp_get_thread_num();
auto inst = (*this)[i];
for (bst_uint j = 0; j < inst.size(); ++j) {
builder.AddBudget(inst[j].index, tid);
}
}
builder.InitStorage();
#pragma omp parallel for schedule(static)
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
int tid = omp_get_thread_num();
auto inst = (*this)[i];
for (bst_uint j = 0; j < inst.size(); ++j) {
builder.Push(
inst[j].index,
Entry(static_cast<bst_uint>(this->base_rowid + i), inst[j].fvalue),
tid);
}
}
return transpose;
}
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
#pragma omp parallel for schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
}
offset.clear();
offset.push_back(0);
data.clear();
}
/*!
* \brief Push row block into the page.
* \param batch the row batch.
*/
void Push(const dmlc::RowBlock<uint32_t>& batch);
inline void Push(const dmlc::RowBlock<uint32_t>& batch) {
data.reserve(data.size() + batch.offset[batch.size] - batch.offset[0]);
offset.reserve(offset.size() + batch.size);
CHECK(batch.index != nullptr);
for (size_t i = 0; i < batch.size; ++i) {
offset.push_back(offset.back() + batch.offset[i + 1] - batch.offset[i]);
}
for (size_t i = batch.offset[0]; i < batch.offset[batch.size]; ++i) {
uint32_t index = batch.index[i];
bst_float fvalue = batch.value == nullptr ? 1.0f : batch.value[i];
data.emplace_back(index, fvalue);
}
CHECK_EQ(offset.back(), data.size());
}
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
inline void Push(const SparsePage &batch) {
size_t top = offset.back();
data.resize(top + batch.data.size());
std::memcpy(dmlc::BeginPtr(data) + top,
dmlc::BeginPtr(batch.data),
sizeof(Entry) * batch.data.size());
size_t begin = offset.size();
offset.resize(begin + batch.Size());
for (size_t i = 0; i < batch.Size(); ++i) {
offset[i + begin] = top + batch.offset[i + 1];
}
}
/*!
* \brief Push one instance into page
* \param inst an instance row
*/
inline void Push(const Inst &inst) {
auto& data_vec = data.HostVector();
auto& offset_vec = offset.HostVector();
offset_vec.push_back(offset_vec.back() + inst.size());
size_t begin = data_vec.size();
data_vec.resize(begin + inst.size());
if (inst.size() != 0) {
std::memcpy(dmlc::BeginPtr(data_vec) + begin, inst.data(),
sizeof(Entry) * inst.size());
offset.push_back(offset.back() + inst.length);
size_t begin = data.size();
data.resize(begin + inst.length);
if (inst.length != 0) {
std::memcpy(dmlc::BeginPtr(data) + begin, inst.data,
sizeof(Entry) * inst.length);
}
}
size_t Size() { return offset.Size() - 1; }
size_t Size() { return offset.size() - 1; }
};
class BatchIteratorImpl {
public:
virtual ~BatchIteratorImpl() {}
virtual BatchIteratorImpl* Clone() = 0;
virtual const SparsePage& operator*() const = 0;
virtual void operator++() = 0;
virtual bool AtEnd() const = 0;
};
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag;
explicit BatchIterator(BatchIteratorImpl* impl) { impl_.reset(impl); }
BatchIterator(const BatchIterator& other) {
if (other.impl_) {
impl_.reset(other.impl_->Clone());
} else {
impl_.reset();
}
}
void operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
}
const SparsePage& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator& rhs) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
private:
std::unique_ptr<BatchIteratorImpl> impl_;
};
class BatchSet {
public:
explicit BatchSet(BatchIterator begin_iter) : begin_iter_(begin_iter) {}
BatchIterator begin() { return begin_iter_; }
BatchIterator end() { return BatchIterator(nullptr); }
private:
BatchIterator begin_iter_;
};
/*!
* \brief This is data structure that user can pass to DMatrix::Create
@@ -405,17 +317,32 @@ class DMatrix {
virtual MetaInfo& Info() = 0;
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/**
* \brief Gets row batches. Use range based for loop over BatchSet to access individual batches.
/*!
* \brief get the row iterator, reset to beginning position
* \note Only either RowIterator or column Iterator can be active.
*/
virtual BatchSet GetRowBatches() = 0;
virtual BatchSet GetSortedColumnBatches() = 0;
virtual BatchSet GetColumnBatches() = 0;
virtual dmlc::DataIter<SparsePage>* RowIterator() = 0;
/*!\brief get column iterator, reset to the beginning position */
virtual dmlc::DataIter<SparsePage>* ColIterator() = 0;
/*!
* \brief check if column access is supported, if not, initialize column access.
* \param max_row_perbatch auxiliary information, maximum row used in each column batch.
* this is a hint information that can be ignored by the implementation.
* \param sorted If column features should be in sorted order
* \return Number of column blocks in the column access.
*/
virtual void InitColAccess(size_t max_row_perbatch, bool sorted) = 0;
// the following are column meta data, should be able to answer them fast.
/*! \return whether column access is enabled */
virtual bool HaveColAccess(bool sorted) const = 0;
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief get number of non-missing entries in column */
virtual size_t GetColSize(size_t cidx) const = 0;
/*! \brief get column density */
virtual float GetColDensity(size_t cidx) = 0;
virtual float GetColDensity(size_t cidx) const = 0;
/*! \return reference of buffered rowset, in column access */
virtual const RowSet& BufferedRowset() const = 0;
/*! \brief virtual destructor */
virtual ~DMatrix() = default;
/*!
@@ -462,6 +389,12 @@ class DMatrix {
*/
static DMatrix* Create(dmlc::Parser<uint32_t>* parser,
const std::string& cache_prefix = "");
private:
// allow learner class to access this field.
friend class LearnerImpl;
/*! \brief public field to back ref cached matrix. */
LearnerImpl* cache_learner_ptr_{nullptr};
};
// implementation of inline functions

View File

@@ -10,8 +10,6 @@
#include <rabit/rabit.h>
#include <utility>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "./base.h"
@@ -180,12 +178,6 @@ class Learner : public rabit::Serializable {
*/
static Learner* Create(const std::vector<std::shared_ptr<DMatrix> >& cache_data);
/*!
* \brief Get configuration arguments currently stored by the learner
* \return Key-value pairs representing configuration arguments
*/
virtual const std::map<std::string, std::string>& GetConfigurationArguments() const = 0;
protected:
/*! \brief internal base score of the model */
bst_float base_score_;

View File

@@ -9,13 +9,8 @@
#define XGBOOST_LOGGING_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <dmlc/thread_local.h>
#include <sstream>
#include <map>
#include <string>
#include <utility>
#include <vector>
#include "./base.h"
namespace xgboost {
@@ -25,7 +20,7 @@ class BaseLogger {
BaseLogger() {
#if XGBOOST_LOG_WITH_TIME
log_stream_ << "[" << dmlc::DateLogger().HumanDate() << "] ";
#endif // XGBOOST_LOG_WITH_TIME
#endif
}
std::ostream& stream() { return log_stream_; } // NOLINT
@@ -33,55 +28,8 @@ class BaseLogger {
std::ostringstream log_stream_;
};
// Parsing both silent and debug_verbose is to provide backward compatibility.
struct ConsoleLoggerParam : public dmlc::Parameter<ConsoleLoggerParam> {
bool silent; // deprecated.
int verbosity;
DMLC_DECLARE_PARAMETER(ConsoleLoggerParam) {
DMLC_DECLARE_FIELD(silent)
.set_default(false)
.describe("Do not print information during training.");
DMLC_DECLARE_FIELD(verbosity)
.set_range(0, 3)
.set_default(1) // shows only warning
.describe("Flag to print out detailed breakdown of runtime.");
DMLC_DECLARE_ALIAS(verbosity, debug_verbose);
}
};
class ConsoleLogger : public BaseLogger {
public:
enum class LogVerbosity {
kSilent = 0,
kWarning = 1,
kInfo = 2, // information may interests users.
kDebug = 3, // information only interesting to developers.
kIgnore = 4 // ignore global setting
};
using LV = LogVerbosity;
private:
static LogVerbosity global_verbosity_;
static ConsoleLoggerParam param_;
LogVerbosity cur_verbosity_;
static void Configure(const std::map<std::string, std::string>& args);
public:
template <typename ArgIter>
static void Configure(ArgIter begin, ArgIter end) {
std::map<std::string, std::string> args(begin, end);
Configure(args);
}
static LogVerbosity GlobalVerbosity();
static LogVerbosity DefaultVerbosity();
static bool ShouldLog(LogVerbosity verbosity);
ConsoleLogger() = delete;
explicit ConsoleLogger(LogVerbosity cur_verb);
ConsoleLogger(const std::string& file, int line, LogVerbosity cur_verb);
~ConsoleLogger();
};
@@ -116,47 +64,17 @@ class LogCallbackRegistry {
return nullptr;
}
};
#endif // !defined(XGBOOST_STRICT_R_MODE) || XGBOOST_STRICT_R_MODE == 0
#endif
using LogCallbackRegistryStore = dmlc::ThreadLocalStore<LogCallbackRegistry>;
// Redefines LOG_WARNING for controling verbosity
#if defined(LOG_WARNING)
#undef LOG_WARNING
#endif // defined(LOG_WARNING)
#define LOG_WARNING \
if (::xgboost::ConsoleLogger::ShouldLog( \
::xgboost::ConsoleLogger::LV::kWarning)) \
::xgboost::ConsoleLogger(__FILE__, __LINE__, \
::xgboost::ConsoleLogger::LogVerbosity::kWarning)
// Redefines LOG_INFO for controling verbosity
#if defined(LOG_INFO)
#undef LOG_INFO
#endif // defined(LOG_INFO)
#define LOG_INFO \
if (::xgboost::ConsoleLogger::ShouldLog( \
::xgboost::ConsoleLogger::LV::kInfo)) \
::xgboost::ConsoleLogger(__FILE__, __LINE__, \
::xgboost::ConsoleLogger::LogVerbosity::kInfo)
#if defined(LOG_DEBUG)
#undef LOG_DEBUG
#endif // defined(LOG_DEBUG)
#define LOG_DEBUG \
if (::xgboost::ConsoleLogger::ShouldLog( \
::xgboost::ConsoleLogger::LV::kDebug)) \
::xgboost::ConsoleLogger(__FILE__, __LINE__, \
::xgboost::ConsoleLogger::LogVerbosity::kDebug)
// redefines the logging macro if not existed
#ifndef LOG
#define LOG(severity) LOG_##severity.stream()
#endif // LOG
#endif
// Enable LOG(CONSOLE) for print messages to console.
#define LOG_CONSOLE ::xgboost::ConsoleLogger( \
::xgboost::ConsoleLogger::LogVerbosity::kIgnore)
#define LOG_CONSOLE ::xgboost::ConsoleLogger()
// Enable LOG(TRACKER) for print messages to tracker
#define LOG_TRACKER ::xgboost::TrackerLogger()
} // namespace xgboost.

View File

@@ -11,11 +11,8 @@
#include <vector>
#include <string>
#include <functional>
#include <utility>
#include "./data.h"
#include "./base.h"
#include "../../src/common/host_device_vector.h"
namespace xgboost {
/*!
@@ -24,23 +21,6 @@ namespace xgboost {
*/
class Metric {
public:
/*!
* \brief Configure the Metric with the specified parameters.
* \param args arguments to the objective function.
*/
virtual void Configure(
const std::vector<std::pair<std::string, std::string> >& args) {}
/*!
* \brief set configuration from pair iterators.
* \param begin The beginning iterator.
* \param end The end iterator.
* \tparam PairIter iterator<std::pair<std::string, std::string> >
*/
template<typename PairIter>
inline void Configure(PairIter begin, PairIter end) {
std::vector<std::pair<std::string, std::string> > vec(begin, end);
this->Configure(vec);
}
/*!
* \brief evaluate a specific metric
* \param preds prediction
@@ -49,9 +29,9 @@ class Metric {
* the average statistics across all the node,
* this is only supported by some metrics
*/
virtual bst_float Eval(const HostDeviceVector<bst_float>& preds,
virtual bst_float Eval(const std::vector<bst_float>& preds,
const MetaInfo& info,
bool distributed) = 0;
bool distributed) const = 0;
/*! \return name of metric */
virtual const char* Name() const = 0;
/*! \brief virtual destructor */

View File

@@ -44,7 +44,7 @@ class ObjFunction {
* \param iteration current iteration number.
* \param out_gpair output of get gradient, saves gradient and second order gradient in
*/
virtual void GetGradient(const HostDeviceVector<bst_float>& preds,
virtual void GetGradient(HostDeviceVector<bst_float>* preds,
const MetaInfo& info,
int iteration,
HostDeviceVector<GradientPair>* out_gpair) = 0;

View File

@@ -7,14 +7,11 @@
#pragma once
#include <xgboost/base.h>
#include <xgboost/data.h>
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "../../src/gbm/gbtree_model.h"
#include "../../src/common/host_device_vector.h"

File diff suppressed because it is too large Load Diff

View File

@@ -19,7 +19,7 @@ CONFIG = {
"USE_AZURE": "OFF",
"USE_S3": "OFF",
"USE_CUDA": "OFF",
"PLUGIN_UPDATER_GPU": "OFF",
"JVM_BINDINGS": "ON"
}

View File

@@ -17,5 +17,5 @@ rm /usr/bin/python
ln -s /opt/rh/python27/root/usr/bin/python /usr/bin/python
# build xgboost
cd /xgboost/jvm-packages;ulimit -c unlimited;mvn package
cd /xgboost/jvm-packages;mvn package

View File

@@ -6,7 +6,7 @@
<groupId>ml.dmlc</groupId>
<artifactId>xgboost-jvm</artifactId>
<version>0.82-SNAPSHOT</version>
<version>0.80</version>
<packaging>pom</packaging>
<name>XGBoost JVM Package</name>
<description>JVM Package for XGBoost</description>
@@ -23,19 +23,14 @@
<email>codingcat@apache.org</email>
</developer>
</developers>
<scm>
<connection>scm:git:git:/github.com/dmlc/xgboost.git</connection>
<developerConnection>scm:git:ssh://github.com/dmlc/xgboost.git</developerConnection>
<url>https://github.com/dmlc/xgboost</url>
</scm>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<maven.compiler.source>1.7</maven.compiler.source>
<maven.compiler.target>1.7</maven.compiler.target>
<flink.version>1.5.0</flink.version>
<spark.version>2.3.3</spark.version>
<scala.version>2.11.12</scala.version>
<flink.version>0.10.2</flink.version>
<spark.version>2.3.0</spark.version>
<scala.version>2.11.8</scala.version>
<scala.binary.version>2.11</scala.binary.version>
</properties>
<repositories>
@@ -237,7 +232,7 @@
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<groupId>net.alchim31.maven</groupId>
<artifactId>maven-site-plugin</artifactId>
<version>3.0</version>
<configuration>
@@ -319,7 +314,6 @@
<version>2.19.1</version>
<configuration>
<skipTests>false</skipTests>
<useSystemClassLoader>false</useSystemClassLoader>
</configuration>
</plugin>
<plugin>
@@ -335,6 +329,25 @@
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<version>0.7.9</version>
<executions>
<execution>
<goals>
<goal>prepare-agent</goal>
</goals>
</execution>
<execution>
<id>report</id>
<phase>test</phase>
<goals>
<goal>report</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
<dependencies>

View File

@@ -23,7 +23,7 @@ XGBoost4J Code Examples
* [External Memory](src/main/scala/ml/dmlc/xgboost4j/scala/example/ExternalMemory.scala)
## Spark API
* [Distributed Training with Spark](src/main/scala/ml/dmlc/xgboost4j/scala/example/spark/SparkMLlibPipeline.scala)
* [Distributed Training with Spark](src/main/scala/ml/dmlc/xgboost4j/scala/example/spark/SparkWithDataFrame.scala)
## Flink API
* [Distributed Training with Flink](src/main/scala/ml/dmlc/xgboost4j/scala/example/flink/DistTrainWithFlink.scala)

View File

@@ -6,10 +6,10 @@
<parent>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost-jvm</artifactId>
<version>0.82-SNAPSHOT</version>
<version>0.80</version>
</parent>
<artifactId>xgboost4j-example</artifactId>
<version>0.82-SNAPSHOT</version>
<version>0.80</version>
<packaging>jar</packaging>
<build>
<plugins>
@@ -26,7 +26,7 @@
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j-spark</artifactId>
<version>0.82-SNAPSHOT</version>
<version>0.80</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
@@ -37,7 +37,7 @@
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j-flink</artifactId>
<version>0.82-SNAPSHOT</version>
<version>0.80</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>

View File

@@ -31,7 +31,7 @@ object SparkMLlibPipeline {
def main(args: Array[String]): Unit = {
if (args.length != 3) {
if (args.length != 1) {
println("Usage: SparkMLlibPipeline input_path native_model_path pipeline_model_path")
sys.exit(1)
}
@@ -79,8 +79,6 @@ object SparkMLlibPipeline {
"num_workers" -> 2
)
)
booster.setFeaturesCol("features")
booster.setLabelCol("classIndex")
val labelConverter = new IndexToString()
.setInputCol("prediction")
.setOutputCol("realLabel")
@@ -96,8 +94,6 @@ object SparkMLlibPipeline {
// Model evaluation
val evaluator = new MulticlassClassificationEvaluator()
evaluator.setLabelCol("classIndex")
evaluator.setPredictionCol("prediction")
val accuracy = evaluator.evaluate(prediction)
println("The model accuracy is : " + accuracy)

View File

@@ -31,6 +31,7 @@ object SparkTraining {
println("Usage: program input_path")
sys.exit(1)
}
val spark = SparkSession.builder().getOrCreate()
val inputPath = args(0)
val schema = new StructType(Array(
@@ -39,7 +40,7 @@ object SparkTraining {
StructField("petal length", DoubleType, true),
StructField("petal width", DoubleType, true),
StructField("class", StringType, true)))
val rawInput = spark.read.schema(schema).csv(inputPath)
val rawInput = spark.read.schema(schema).csv(args(0))
// transform class to index to make xgboost happy
val stringIndexer = new StringIndexer()
@@ -54,8 +55,6 @@ object SparkTraining {
val xgbInput = vectorAssembler.transform(labelTransformed).select("features",
"classIndex")
val Array(train, eval1, eval2, test) = xgbInput.randomSplit(Array(0.6, 0.2, 0.1, 0.1))
/**
* setup "timeout_request_workers" -> 60000L to make this application if it cannot get enough resources
* to get 2 workers within 60000 ms
@@ -68,13 +67,12 @@ object SparkTraining {
"objective" -> "multi:softprob",
"num_class" -> 3,
"num_round" -> 100,
"num_workers" -> 2,
"eval_sets" -> Map("eval1" -> eval1, "eval2" -> eval2))
"num_workers" -> 2)
val xgbClassifier = new XGBoostClassifier(xgbParam).
setFeaturesCol("features").
setLabelCol("classIndex")
val xgbClassificationModel = xgbClassifier.fit(train)
val results = xgbClassificationModel.transform(test)
val xgbClassificationModel = xgbClassifier.fit(xgbInput)
val results = xgbClassificationModel.transform(xgbInput)
results.show()
}
}

View File

@@ -6,10 +6,10 @@
<parent>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost-jvm</artifactId>
<version>0.82-SNAPSHOT</version>
<version>0.80</version>
</parent>
<artifactId>xgboost4j-flink</artifactId>
<version>0.82-SNAPSHOT</version>
<version>0.80</version>
<build>
<plugins>
<plugin>
@@ -26,7 +26,7 @@
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j</artifactId>
<version>0.82-SNAPSHOT</version>
<version>0.80</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
@@ -48,11 +48,6 @@
<artifactId>flink-ml_${scala.binary.version}</artifactId>
<version>${flink.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.7.3</version>
</dependency>
</dependencies>
</project>

View File

@@ -6,7 +6,7 @@
<parent>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost-jvm</artifactId>
<version>0.82-SNAPSHOT</version>
<version>0.80</version>
</parent>
<artifactId>xgboost4j-spark</artifactId>
<build>
@@ -24,7 +24,7 @@
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j</artifactId>
<version>0.82-SNAPSHOT</version>
<version>0.80</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>

View File

@@ -63,9 +63,8 @@ private[spark] class CheckpointManager(sc: SparkContext, checkpointPath: String)
if (versions.nonEmpty) {
val version = versions.max
val fullPath = getPath(version)
val inputStream = FileSystem.get(sc.hadoopConfiguration).open(new Path(fullPath))
logger.info(s"Start training from previous booster at $fullPath")
val booster = SXGBoost.loadModel(inputStream)
val booster = SXGBoost.loadModel(fullPath)
booster.booster.setVersion(version)
booster
} else {
@@ -82,9 +81,8 @@ private[spark] class CheckpointManager(sc: SparkContext, checkpointPath: String)
val fs = FileSystem.get(sc.hadoopConfiguration)
val prevModelPaths = getExistingVersions.map(version => new Path(getPath(version)))
val fullPath = getPath(checkpoint.getVersion)
val outputStream = fs.create(new Path(fullPath), true)
logger.info(s"Saving checkpoint model with version ${checkpoint.getVersion} to $fullPath")
checkpoint.saveModel(outputStream)
checkpoint.saveModel(fullPath)
prevModelPaths.foreach(path => fs.delete(path, true))
}

View File

@@ -20,11 +20,6 @@ import ml.dmlc.xgboost4j.{LabeledPoint => XGBLabeledPoint}
import org.apache.spark.ml.feature.{LabeledPoint => MLLabeledPoint}
import org.apache.spark.ml.linalg.{DenseVector, SparseVector, Vector, Vectors}
import org.apache.spark.ml.param.Param
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Column, DataFrame, Row}
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types.{FloatType, IntegerType}
object DataUtils extends Serializable {
private[spark] implicit class XGBLabeledPointFeatures(
@@ -72,38 +67,4 @@ object DataUtils extends Serializable {
XGBLabeledPoint(0.0f, v.indices, v.values.map(_.toFloat))
}
}
private[spark] def convertDataFrameToXGBLabeledPointRDDs(
labelCol: Column,
featuresCol: Column,
weight: Column,
baseMargin: Column,
group: Option[Column],
dataFrames: DataFrame*): Array[RDD[XGBLabeledPoint]] = {
val selectedColumns = group.map(groupCol => Seq(labelCol.cast(FloatType),
featuresCol,
weight.cast(FloatType),
groupCol.cast(IntegerType),
baseMargin.cast(FloatType))).getOrElse(Seq(labelCol.cast(FloatType),
featuresCol,
weight.cast(FloatType),
baseMargin.cast(FloatType)))
dataFrames.toArray.map {
df => df.select(selectedColumns: _*).rdd.map {
case Row(label: Float, features: Vector, weight: Float, group: Int, baseMargin: Float) =>
val (indices, values) = features match {
case v: SparseVector => (v.indices, v.values.map(_.toFloat))
case v: DenseVector => (null, v.values.map(_.toFloat))
}
XGBLabeledPoint(label, indices, values, weight, group, baseMargin)
case Row(label: Float, features: Vector, weight: Float, baseMargin: Float) =>
val (indices, values) = features match {
case v: SparseVector => (v.indices, v.values.map(_.toFloat))
case v: DenseVector => (null, v.values.map(_.toFloat))
}
XGBLabeledPoint(label, indices, values, weight, baseMargin = baseMargin)
}
}
}
}

View File

@@ -19,20 +19,18 @@ package ml.dmlc.xgboost4j.scala.spark
import java.io.File
import java.nio.file.Files
import scala.collection.mutable.ListBuffer
import scala.collection.{AbstractIterator, mutable}
import scala.collection.mutable
import scala.util.Random
import ml.dmlc.xgboost4j.java.{IRabitTracker, Rabit, XGBoostError, RabitTracker => PyRabitTracker}
import ml.dmlc.xgboost4j.scala.rabit.RabitTracker
import ml.dmlc.xgboost4j.scala.{XGBoost => SXGBoost, _}
import ml.dmlc.xgboost4j.{LabeledPoint => XGBLabeledPoint}
import org.apache.commons.io.FileUtils
import org.apache.commons.logging.LogFactory
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkContext, SparkParallelismTracker, TaskContext}
import org.apache.spark.sql.{DataFrame, SparkSession}
/**
@@ -54,17 +52,6 @@ object TrackerConf {
def apply(): TrackerConf = TrackerConf(0L, "python")
}
/**
* Traing data group in a RDD partition.
* @param groupId The group id
* @param points Array of XGBLabeledPoint within the same group.
* @param isEdgeGroup whether it is a frist or last group in a RDD partition.
*/
private[spark] case class XGBLabeledPointGroup(
groupId: Int,
points: Array[XGBLabeledPoint],
isEdgeGroup: Boolean)
object XGBoost extends Serializable {
private val logger = LogFactory.getLog("XGBoostSpark")
@@ -86,66 +73,78 @@ object XGBoost extends Serializable {
}
}
private def removeMissingValuesWithGroup(
xgbLabelPointGroups: Iterator[Array[XGBLabeledPoint]],
missing: Float): Iterator[Array[XGBLabeledPoint]] = {
if (!missing.isNaN) {
xgbLabelPointGroups.map {
labeledPoints => XGBoost.removeMissingValues(labeledPoints.iterator, missing).toArray
private def fromBaseMarginsToArray(baseMargins: Iterator[Float]): Option[Array[Float]] = {
val builder = new mutable.ArrayBuilder.ofFloat()
var nTotal = 0
var nUndefined = 0
while (baseMargins.hasNext) {
nTotal += 1
val baseMargin = baseMargins.next()
if (baseMargin.isNaN) {
nUndefined += 1 // don't waste space for all-NaNs.
} else {
builder += baseMargin
}
} else {
xgbLabelPointGroups
}
}
private def getCacheDirName(useExternalMemory: Boolean): Option[String] = {
val taskId = TaskContext.getPartitionId().toString
if (useExternalMemory) {
val dir = Files.createTempDirectory(s"${TaskContext.get().stageId()}-cache-$taskId")
Some(dir.toAbsolutePath.toString)
} else {
if (nUndefined == nTotal) {
None
} else if (nUndefined == 0) {
Some(builder.result())
} else {
throw new IllegalArgumentException(
s"Encountered a partition with $nUndefined NaN base margin values. " +
s"If you want to specify base margin, ensure all values are non-NaN.")
}
}
private def buildDistributedBooster(
watches: Watches,
private[spark] def buildDistributedBoosters(
data: RDD[XGBLabeledPoint],
params: Map[String, Any],
rabitEnv: java.util.Map[String, String],
round: Int,
obj: ObjectiveTrait,
eval: EvalTrait,
prevBooster: Booster): Iterator[(Booster, Map[String, Array[Float]])] = {
useExternalMemory: Boolean,
missing: Float,
prevBooster: Booster
): RDD[(Booster, Map[String, Array[Float]])] = {
val partitionedBaseMargin = data.map(_.baseMargin)
// to workaround the empty partitions in training dataset,
// this might not be the best efficient implementation, see
// (https://github.com/dmlc/xgboost/issues/1277)
if (watches.toMap("train").rowNum == 0) {
throw new XGBoostError(
s"detected an empty partition in the training data, partition ID:" +
s" ${TaskContext.getPartitionId()}")
}
val taskId = TaskContext.getPartitionId().toString
rabitEnv.put("DMLC_TASK_ID", taskId)
Rabit.init(rabitEnv)
try {
val numEarlyStoppingRounds = params.get("num_early_stopping_rounds")
.map(_.toString.toInt).getOrElse(0)
if (numEarlyStoppingRounds > 0) {
if (!params.contains("maximize_evaluation_metrics")) {
throw new IllegalArgumentException("maximize_evaluation_metrics has to be specified")
}
data.zipPartitions(partitionedBaseMargin) { (labeledPoints, baseMargins) =>
if (labeledPoints.isEmpty) {
throw new XGBoostError(
s"detected an empty partition in the training data, partition ID:" +
s" ${TaskContext.getPartitionId()}")
}
val metrics = Array.tabulate(watches.size)(_ => Array.ofDim[Float](round))
val booster = SXGBoost.train(watches.toMap("train"), params, round,
watches.toMap, metrics, obj, eval,
earlyStoppingRound = numEarlyStoppingRounds, prevBooster)
Iterator(booster -> watches.toMap.keys.zip(metrics).toMap)
} finally {
Rabit.shutdown()
watches.delete()
}
val taskId = TaskContext.getPartitionId().toString
val cacheDirName = if (useExternalMemory) {
val dir = Files.createTempDirectory(s"${TaskContext.get().stageId()}-cache-$taskId")
Some(dir.toAbsolutePath.toString)
} else {
None
}
rabitEnv.put("DMLC_TASK_ID", taskId)
Rabit.init(rabitEnv)
val watches = Watches(params,
removeMissingValues(labeledPoints, missing),
fromBaseMarginsToArray(baseMargins), cacheDirName)
try {
val numEarlyStoppingRounds = params.get("num_early_stopping_rounds")
.map(_.toString.toInt).getOrElse(0)
val metrics = Array.tabulate(watches.size)(_ => Array.ofDim[Float](round))
val booster = SXGBoost.train(watches.train, params, round,
watches.toMap, metrics, obj, eval,
earlyStoppingRound = numEarlyStoppingRounds, prevBooster)
Iterator(booster -> watches.toMap.keys.zip(metrics).toMap)
} finally {
Rabit.shutdown()
watches.delete()
}
}.cache()
}
private def overrideParamsAccordingToTaskCPUs(
@@ -175,109 +174,28 @@ object XGBoost extends Serializable {
tracker
}
class IteratorWrapper[T](arrayOfXGBLabeledPoints: Array[(String, Iterator[T])])
extends Iterator[(String, Iterator[T])] {
private var currentIndex = 0
override def hasNext: Boolean = currentIndex <= arrayOfXGBLabeledPoints.length - 1
override def next(): (String, Iterator[T]) = {
currentIndex += 1
arrayOfXGBLabeledPoints(currentIndex - 1)
}
}
private def coPartitionNoGroupSets(
trainingData: RDD[XGBLabeledPoint],
evalSets: Map[String, RDD[XGBLabeledPoint]],
nWorkers: Int) = {
// eval_sets is supposed to be set by the caller of [[trainDistributed]]
val allDatasets = Map("train" -> trainingData) ++ evalSets
val repartitionedDatasets = allDatasets.map{case (name, rdd) =>
if (rdd.getNumPartitions != nWorkers) {
(name, rdd.repartition(nWorkers))
} else {
(name, rdd)
}
}
repartitionedDatasets.foldLeft(trainingData.sparkContext.parallelize(
Array.fill[(String, Iterator[XGBLabeledPoint])](nWorkers)(null), nWorkers)){
case (rddOfIterWrapper, (name, rddOfIter)) =>
rddOfIterWrapper.zipPartitions(rddOfIter){
(itrWrapper, itr) =>
if (!itr.hasNext) {
logger.error("when specifying eval sets as dataframes, you have to ensure that " +
"the number of elements in each dataframe is larger than the number of workers")
throw new Exception("too few elements in evaluation sets")
}
val itrArray = itrWrapper.toArray
if (itrArray.head != null) {
new IteratorWrapper(itrArray :+ (name -> itr))
} else {
new IteratorWrapper(Array(name -> itr))
}
}
}
}
/**
* Check to see if Spark expects SSL encryption (`spark.ssl.enabled` set to true).
* If so, throw an exception unless this safety measure has been explicitly overridden
* via conf `xgboost.spark.ignoreSsl`.
*
* @param sc SparkContext for the training dataset. When looking for the confs, this method
* first checks for an active SparkSession. If one is not available, it falls back
* to this SparkContext.
* @return A tuple of the booster and the metrics used to build training summary
*/
private def validateSparkSslConf(sc: SparkContext): Unit = {
val (sparkSslEnabled: Boolean, xgboostSparkIgnoreSsl: Boolean) =
SparkSession.getActiveSession match {
case Some(ss) =>
(ss.conf.getOption("spark.ssl.enabled").getOrElse("false").toBoolean,
ss.conf.getOption("xgboost.spark.ignoreSsl").getOrElse("false").toBoolean)
case None =>
(sc.getConf.getBoolean("spark.ssl.enabled", false),
sc.getConf.getBoolean("xgboost.spark.ignoreSsl", false))
}
if (sparkSslEnabled) {
if (xgboostSparkIgnoreSsl) {
logger.warn(s"spark-xgboost is being run without encrypting data in transit! " +
s"Spark Conf spark.ssl.enabled=true was overridden with xgboost.spark.ignoreSsl=true.")
} else {
throw new Exception("xgboost-spark found spark.ssl.enabled=true to encrypt data " +
"in transit, but xgboost-spark sends non-encrypted data over the wire for efficiency. " +
"To override this protection and still use xgboost-spark at your own risk, " +
"you can set the SparkSession conf to use xgboost.spark.ignoreSsl=true.")
}
}
}
private def parameterFetchAndValidation(params: Map[String, Any], sparkContext: SparkContext) = {
val nWorkers = params("num_workers").asInstanceOf[Int]
val round = params("num_round").asInstanceOf[Int]
val useExternalMemory = params("use_external_memory").asInstanceOf[Boolean]
val obj = params.getOrElse("custom_obj", null).asInstanceOf[ObjectiveTrait]
val eval = params.getOrElse("custom_eval", null).asInstanceOf[EvalTrait]
val missing = params.getOrElse("missing", Float.NaN).asInstanceOf[Float]
validateSparkSslConf(sparkContext)
@throws(classOf[XGBoostError])
private[spark] def trainDistributed(
trainingData: RDD[XGBLabeledPoint],
params: Map[String, Any],
round: Int,
nWorkers: Int,
obj: ObjectiveTrait = null,
eval: EvalTrait = null,
useExternalMemory: Boolean = false,
missing: Float = Float.NaN): (Booster, Map[String, Array[Float]]) = {
if (params.contains("tree_method")) {
require(params("tree_method") == "hist" ||
params("tree_method") == "approx" ||
params("tree_method") == "auto", "xgboost4j-spark only supports tree_method as 'hist'," +
" 'approx' and 'auto'")
}
if (params.contains("train_test_ratio")) {
logger.warn("train_test_ratio is deprecated since XGBoost 0.82, we recommend to explicitly" +
" pass a training and multiple evaluation datasets by passing 'eval_sets' and " +
"'eval_set_names'")
require(params("tree_method") != "hist", "xgboost4j-spark does not support fast histogram" +
" for now")
}
require(nWorkers > 0, "you must specify more than 0 workers")
if (obj != null) {
require(params.get("objective_type").isDefined, "parameter \"objective_type\" is not" +
" defined, you have to specify the objective type as classification or regression" +
" with a customized objective function")
require(params.get("obj_type").isDefined, "parameter \"obj_type\" is not defined," +
" you have to specify the objective type as classification or regression with a" +
" customized objective function")
}
val trackerConf = params.get("tracker_conf") match {
case None => TrackerConf()
@@ -292,89 +210,12 @@ object XGBoost extends Serializable {
" an instance of Long.")
}
val (checkpointPath, checkpointInterval) = CheckpointManager.extractParams(params)
(nWorkers, round, useExternalMemory, obj, eval, missing, trackerConf, timeoutRequestWorkers,
checkpointPath, checkpointInterval)
}
private def trainForNonRanking(
trainingData: RDD[XGBLabeledPoint],
params: Map[String, Any],
rabitEnv: java.util.Map[String, String],
checkpointRound: Int,
prevBooster: Booster,
evalSetsMap: Map[String, RDD[XGBLabeledPoint]]): RDD[(Booster, Map[String, Array[Float]])] = {
val (nWorkers, _, useExternalMemory, obj, eval, missing, _, _, _, _) =
parameterFetchAndValidation(params, trainingData.sparkContext)
val partitionedData = repartitionForTraining(trainingData, nWorkers)
if (evalSetsMap.isEmpty) {
partitionedData.mapPartitions(labeledPoints => {
val watches = Watches.buildWatches(params,
removeMissingValues(labeledPoints, missing),
getCacheDirName(useExternalMemory))
buildDistributedBooster(watches, params, rabitEnv, checkpointRound,
obj, eval, prevBooster)
}).cache()
} else {
coPartitionNoGroupSets(partitionedData, evalSetsMap, nWorkers).mapPartitions {
nameAndLabeledPointSets =>
val watches = Watches.buildWatches(
nameAndLabeledPointSets.map {
case (name, iter) => (name, removeMissingValues(iter, missing))},
getCacheDirName(useExternalMemory))
buildDistributedBooster(watches, params, rabitEnv, checkpointRound,
obj, eval, prevBooster)
}.cache()
}
}
private def trainForRanking(
trainingData: RDD[XGBLabeledPoint],
params: Map[String, Any],
rabitEnv: java.util.Map[String, String],
checkpointRound: Int,
prevBooster: Booster,
evalSetsMap: Map[String, RDD[XGBLabeledPoint]]): RDD[(Booster, Map[String, Array[Float]])] = {
val (nWorkers, _, useExternalMemory, obj, eval, missing, _, _, _, _) =
parameterFetchAndValidation(params, trainingData.sparkContext)
val partitionedTrainingSet = repartitionForTrainingGroup(trainingData, nWorkers)
if (evalSetsMap.isEmpty) {
partitionedTrainingSet.mapPartitions(labeledPointGroups => {
val watches = Watches.buildWatchesWithGroup(params,
removeMissingValuesWithGroup(labeledPointGroups, missing),
getCacheDirName(useExternalMemory))
buildDistributedBooster(watches, params, rabitEnv, checkpointRound, obj, eval, prevBooster)
}).cache()
} else {
coPartitionGroupSets(partitionedTrainingSet, evalSetsMap, nWorkers).mapPartitions(
labeledPointGroupSets => {
val watches = Watches.buildWatchesWithGroup(
labeledPointGroupSets.map {
case (name, iter) => (name, removeMissingValuesWithGroup(iter, missing))
},
getCacheDirName(useExternalMemory))
buildDistributedBooster(watches, params, rabitEnv, checkpointRound, obj, eval,
prevBooster)
}).cache()
}
}
/**
* @return A tuple of the booster and the metrics used to build training summary
*/
@throws(classOf[XGBoostError])
private[spark] def trainDistributed(
trainingData: RDD[XGBLabeledPoint],
params: Map[String, Any],
hasGroup: Boolean = false,
evalSetsMap: Map[String, RDD[XGBLabeledPoint]] = Map()):
(Booster, Map[String, Array[Float]]) = {
logger.info(s"XGBoost training with parameters:\n${params.mkString("\n")}")
val (nWorkers, round, _, _, _, _, trackerConf, timeoutRequestWorkers,
checkpointPath, checkpointInterval) = parameterFetchAndValidation(params,
trainingData.sparkContext)
val sc = trainingData.sparkContext
val checkpointManager = new CheckpointManager(sc, checkpointPath)
checkpointManager.cleanUpHigherVersions(round.asInstanceOf[Int])
checkpointManager.cleanUpHigherVersions(round)
var prevBooster = checkpointManager.loadCheckpointAsBooster
// Train for every ${savingRound} rounds and save the partially completed booster
checkpointManager.getCheckpointRounds(checkpointInterval, round).map {
@@ -383,14 +224,9 @@ object XGBoost extends Serializable {
try {
val overriddenParams = overrideParamsAccordingToTaskCPUs(params, sc)
val parallelismTracker = new SparkParallelismTracker(sc, timeoutRequestWorkers, nWorkers)
val rabitEnv = tracker.getWorkerEnvs
val boostersAndMetrics = if (hasGroup) {
trainForRanking(trainingData, overriddenParams, rabitEnv, checkpointRound,
prevBooster, evalSetsMap)
} else {
trainForNonRanking(trainingData, overriddenParams, rabitEnv, checkpointRound,
prevBooster, evalSetsMap)
}
val boostersAndMetrics = buildDistributedBoosters(partitionedData, overriddenParams,
tracker.getWorkerEnvs, checkpointRound, obj, eval, useExternalMemory, missing,
prevBooster)
val sparkJobThread = new Thread() {
override def run() {
// force the job
@@ -408,12 +244,13 @@ object XGBoost extends Serializable {
checkpointManager.updateCheckpoint(prevBooster)
}
(booster, metrics)
} finally {
tracker.stop()
}
} finally {
tracker.stop()
}
}.last
}
private[spark] def repartitionForTraining(trainingData: RDD[XGBLabeledPoint], nWorkers: Int) = {
if (trainingData.getNumPartitions != nWorkers) {
logger.info(s"repartitioning training set to $nWorkers partitions")
@@ -423,68 +260,6 @@ object XGBoost extends Serializable {
}
}
private def aggByGroupInfo(trainingData: RDD[XGBLabeledPoint]) = {
val normalGroups: RDD[Array[XGBLabeledPoint]] = trainingData.mapPartitions(
// LabeledPointGroupIterator returns (Boolean, Array[XGBLabeledPoint])
new LabeledPointGroupIterator(_)).filter(!_.isEdgeGroup).map(_.points)
// edge groups with partition id.
val edgeGroups: RDD[(Int, XGBLabeledPointGroup)] = trainingData.mapPartitions(
new LabeledPointGroupIterator(_)).filter(_.isEdgeGroup).map(
group => (TaskContext.getPartitionId(), group))
// group chunks from different partitions together by group id in XGBLabeledPoint.
// use groupBy instead of aggregateBy since all groups within a partition have unique group ids.
val stitchedGroups: RDD[Array[XGBLabeledPoint]] = edgeGroups.groupBy(_._2.groupId).map(
groups => {
val it: Iterable[(Int, XGBLabeledPointGroup)] = groups._2
// sorted by partition id and merge list of Array[XGBLabeledPoint] into one array
it.toArray.sortBy(_._1).flatMap(_._2.points)
})
normalGroups.union(stitchedGroups)
}
private[spark] def repartitionForTrainingGroup(
trainingData: RDD[XGBLabeledPoint], nWorkers: Int): RDD[Array[XGBLabeledPoint]] = {
val allGroups = aggByGroupInfo(trainingData)
logger.info(s"repartitioning training group set to $nWorkers partitions")
allGroups.repartition(nWorkers)
}
private def coPartitionGroupSets(
aggedTrainingSet: RDD[Array[XGBLabeledPoint]],
evalSets: Map[String, RDD[XGBLabeledPoint]],
nWorkers: Int): RDD[(String, Iterator[Array[XGBLabeledPoint]])] = {
val repartitionedDatasets = Map("train" -> aggedTrainingSet) ++ evalSets.map {
case (name, rdd) => {
val aggedRdd = aggByGroupInfo(rdd)
if (aggedRdd.getNumPartitions != nWorkers) {
name -> aggedRdd.repartition(nWorkers)
} else {
name -> aggedRdd
}
}
}
repartitionedDatasets.foldLeft(aggedTrainingSet.sparkContext.parallelize(
Array.fill[(String, Iterator[Array[XGBLabeledPoint]])](nWorkers)(null), nWorkers)){
case (rddOfIterWrapper, (name, rddOfIter)) =>
rddOfIterWrapper.zipPartitions(rddOfIter){
(itrWrapper, itr) =>
if (!itr.hasNext) {
logger.error("when specifying eval sets as dataframes, you have to ensure that " +
"the number of elements in each dataframe is larger than the number of workers")
throw new Exception("too few elements in evaluation sets")
}
val itrArray = itrWrapper.toArray
if (itrArray.head != null) {
new IteratorWrapper(itrArray :+ (name -> itr))
} else {
new IteratorWrapper(Array(name -> itr))
}
}
}
}
private def postTrackerReturnProcessing(
trackerReturnVal: Int,
distributedBoostersAndMetrics: RDD[(Booster, Map[String, Array[Float]])],
@@ -493,9 +268,6 @@ object XGBoost extends Serializable {
// Copies of the final booster and the corresponding metrics
// reside in each partition of the `distributedBoostersAndMetrics`.
// Any of them can be used to create the model.
// it's safe to block here forever, as the tracker has returned successfully, and the Spark
// job should have finished, there is no reason for the thread cannot return
sparkJobThread.join()
val (booster, metrics) = distributedBoostersAndMetrics.first()
distributedBoostersAndMetrics.unpersist(false)
(booster, metrics)
@@ -515,13 +287,12 @@ object XGBoost extends Serializable {
}
private class Watches private(
val datasets: Array[DMatrix],
val names: Array[String],
val cacheDirName: Option[String]) {
val train: DMatrix,
val test: DMatrix,
private val cacheDirName: Option[String]) {
def toMap: Map[String, DMatrix] = {
names.zip(datasets).toMap.filter { case (_, matrix) => matrix.rowNum > 0 }
}
def toMap: Map[String, DMatrix] = Map("train" -> train, "test" -> test)
.filter { case (_, matrix) => matrix.rowNum > 0 }
def size: Int = toMap.size
@@ -537,239 +308,59 @@ private class Watches private(
private object Watches {
private def fromBaseMarginsToArray(baseMargins: Iterator[Float]): Option[Array[Float]] = {
val builder = new mutable.ArrayBuilder.ofFloat()
var nTotal = 0
var nUndefined = 0
while (baseMargins.hasNext) {
nTotal += 1
val baseMargin = baseMargins.next()
if (baseMargin.isNaN) {
nUndefined += 1 // don't waste space for all-NaNs.
def buildGroups(groups: Seq[Int]): Seq[Int] = {
val output = mutable.ArrayBuffer.empty[Int]
var count = 1
var lastGroup = groups.head
for (group <- groups.tail) {
if (group != lastGroup) {
lastGroup = group
output += count
count = 1
} else {
builder += baseMargin
count += 1
}
}
if (nUndefined == nTotal) {
None
} else if (nUndefined == 0) {
Some(builder.result())
} else {
throw new IllegalArgumentException(
s"Encountered a partition with $nUndefined NaN base margin values. " +
s"If you want to specify base margin, ensure all values are non-NaN.")
}
output += count
output
}
def buildWatches(
nameAndLabeledPointSets: Iterator[(String, Iterator[XGBLabeledPoint])],
cachedDirName: Option[String]): Watches = {
val dms = nameAndLabeledPointSets.map {
case (name, labeledPoints) =>
val baseMargins = new mutable.ArrayBuilder.ofFloat
val duplicatedItr = labeledPoints.map(labeledPoint => {
baseMargins += labeledPoint.baseMargin
labeledPoint
})
val dMatrix = new DMatrix(duplicatedItr, cachedDirName.map(_ + s"/$name").orNull)
val baseMargin = fromBaseMarginsToArray(baseMargins.result().iterator)
if (baseMargin.isDefined) {
dMatrix.setBaseMargin(baseMargin.get)
}
(name, dMatrix)
}.toArray
new Watches(dms.map(_._2), dms.map(_._1), cachedDirName)
}
def buildWatches(
def apply(
params: Map[String, Any],
labeledPoints: Iterator[XGBLabeledPoint],
baseMarginsOpt: Option[Array[Float]],
cacheDirName: Option[String]): Watches = {
val trainTestRatio = params.get("train_test_ratio").map(_.toString.toDouble).getOrElse(1.0)
val seed = params.get("seed").map(_.toString.toLong).getOrElse(System.nanoTime())
val r = new Random(seed)
val testPoints = mutable.ArrayBuffer.empty[XGBLabeledPoint]
val trainBaseMargins = new mutable.ArrayBuilder.ofFloat
val testBaseMargins = new mutable.ArrayBuilder.ofFloat
val trainPoints = labeledPoints.filter { labeledPoint =>
val accepted = r.nextDouble() <= trainTestRatio
if (!accepted) {
testPoints += labeledPoint
testBaseMargins += labeledPoint.baseMargin
} else {
trainBaseMargins += labeledPoint.baseMargin
}
accepted
}
val trainMatrix = new DMatrix(trainPoints, cacheDirName.map(_ + "/train").orNull)
val (trainIter1, trainIter2) = trainPoints.duplicate
val trainMatrix = new DMatrix(trainIter1, cacheDirName.map(_ + "/train").orNull)
val trainGroups = buildGroups(trainIter2.map(_.group).toSeq).toArray
trainMatrix.setGroup(trainGroups)
val testMatrix = new DMatrix(testPoints.iterator, cacheDirName.map(_ + "/test").orNull)
val trainMargin = fromBaseMarginsToArray(trainBaseMargins.result().iterator)
val testMargin = fromBaseMarginsToArray(testBaseMargins.result().iterator)
if (trainMargin.isDefined) trainMatrix.setBaseMargin(trainMargin.get)
if (testMargin.isDefined) testMatrix.setBaseMargin(testMargin.get)
new Watches(Array(trainMatrix, testMatrix), Array("train", "test"), cacheDirName)
}
def buildWatchesWithGroup(
nameAndlabeledPointGroupSets: Iterator[(String, Iterator[Array[XGBLabeledPoint]])],
cachedDirName: Option[String]): Watches = {
val dms = nameAndlabeledPointGroupSets.map {
case (name, labeledPointsGroups) =>
val baseMargins = new mutable.ArrayBuilder.ofFloat
val groupsInfo = new mutable.ArrayBuilder.ofInt
val weights = new mutable.ArrayBuilder.ofFloat
val iter = labeledPointsGroups.filter(labeledPointGroup => {
var groupWeight = -1.0f
var groupSize = 0
labeledPointGroup.map { labeledPoint => {
if (groupWeight < 0) {
groupWeight = labeledPoint.weight
} else if (groupWeight != labeledPoint.weight) {
throw new IllegalArgumentException("the instances in the same group have to be" +
s" assigned with the same weight (unexpected weight ${labeledPoint.weight}")
}
baseMargins += labeledPoint.baseMargin
groupSize += 1
labeledPoint
}
}
weights += groupWeight
groupsInfo += groupSize
true
})
val dMatrix = new DMatrix(iter.flatMap(_.iterator), cachedDirName.map(_ + s"/$name").orNull)
val baseMargin = fromBaseMarginsToArray(baseMargins.result().iterator)
if (baseMargin.isDefined) {
dMatrix.setBaseMargin(baseMargin.get)
}
dMatrix.setGroup(groupsInfo.result())
dMatrix.setWeight(weights.result())
(name, dMatrix)
}.toArray
new Watches(dms.map(_._2), dms.map(_._1), cachedDirName)
}
def buildWatchesWithGroup(
params: Map[String, Any],
labeledPointGroups: Iterator[Array[XGBLabeledPoint]],
cacheDirName: Option[String]): Watches = {
val trainTestRatio = params.get("train_test_ratio").map(_.toString.toDouble).getOrElse(1.0)
val seed = params.get("seed").map(_.toString.toLong).getOrElse(System.nanoTime())
val r = new Random(seed)
val testPoints = mutable.ArrayBuilder.make[XGBLabeledPoint]
val trainBaseMargins = new mutable.ArrayBuilder.ofFloat
val testBaseMargins = new mutable.ArrayBuilder.ofFloat
val trainGroups = new mutable.ArrayBuilder.ofInt
val testGroups = new mutable.ArrayBuilder.ofInt
val trainWeights = new mutable.ArrayBuilder.ofFloat
val testWeights = new mutable.ArrayBuilder.ofFloat
val trainLabelPointGroups = labeledPointGroups.filter { labeledPointGroup =>
val accepted = r.nextDouble() <= trainTestRatio
if (!accepted) {
var groupWeight = -1.0f
var groupSize = 0
labeledPointGroup.foreach(labeledPoint => {
testPoints += labeledPoint
testBaseMargins += labeledPoint.baseMargin
if (groupWeight < 0) {
groupWeight = labeledPoint.weight
} else if (labeledPoint.weight != groupWeight) {
throw new IllegalArgumentException("the instances in the same group have to be" +
s" assigned with the same weight (unexpected weight ${labeledPoint.weight}")
}
groupSize += 1
})
testWeights += groupWeight
testGroups += groupSize
} else {
var groupWeight = -1.0f
var groupSize = 0
labeledPointGroup.foreach { labeledPoint => {
if (groupWeight < 0) {
groupWeight = labeledPoint.weight
} else if (labeledPoint.weight != groupWeight) {
throw new IllegalArgumentException("the instances in the same group have to be" +
s" assigned with the same weight (unexpected weight ${labeledPoint.weight}")
}
trainBaseMargins += labeledPoint.baseMargin
groupSize += 1
}}
trainWeights += groupWeight
trainGroups += groupSize
}
accepted
}
val trainPoints = trainLabelPointGroups.flatMap(_.iterator)
val trainMatrix = new DMatrix(trainPoints, cacheDirName.map(_ + "/train").orNull)
trainMatrix.setGroup(trainGroups.result())
trainMatrix.setWeight(trainWeights.result())
val testMatrix = new DMatrix(testPoints.result().iterator, cacheDirName.map(_ + "/test").orNull)
if (trainTestRatio < 1.0) {
testMatrix.setGroup(testGroups.result())
testMatrix.setWeight(testWeights.result())
val testGroups = buildGroups(testPoints.map(_.group)).toArray
testMatrix.setGroup(testGroups)
}
val trainMargin = fromBaseMarginsToArray(trainBaseMargins.result().iterator)
val testMargin = fromBaseMarginsToArray(testBaseMargins.result().iterator)
if (trainMargin.isDefined) trainMatrix.setBaseMargin(trainMargin.get)
if (testMargin.isDefined) testMatrix.setBaseMargin(testMargin.get)
r.setSeed(seed)
for (baseMargins <- baseMarginsOpt) {
val (trainMargin, testMargin) = baseMargins.partition(_ => r.nextDouble() <= trainTestRatio)
trainMatrix.setBaseMargin(trainMargin)
testMatrix.setBaseMargin(testMargin)
}
new Watches(Array(trainMatrix, testMatrix), Array("train", "test"), cacheDirName)
new Watches(trainMatrix, testMatrix, cacheDirName)
}
}
/**
* Within each RDD partition, group the <code>XGBLabeledPoint</code> by group id.</p>
* And the first and the last groups may not have all the items due to the data partition.
* <code>LabeledPointGroupIterator</code> orginaizes data in a tuple format:
* (isFistGroup || isLastGroup, Array[XGBLabeledPoint]).</p>
* The edge groups across partitions can be stitched together later.
* @param base collection of <code>XGBLabeledPoint</code>
*/
private[spark] class LabeledPointGroupIterator(base: Iterator[XGBLabeledPoint])
extends AbstractIterator[XGBLabeledPointGroup] {
private var firstPointOfNextGroup: XGBLabeledPoint = null
private var isNewGroup = false
override def hasNext: Boolean = {
base.hasNext || isNewGroup
}
override def next(): XGBLabeledPointGroup = {
val builder = mutable.ArrayBuilder.make[XGBLabeledPoint]
var isFirstGroup = true
if (firstPointOfNextGroup != null) {
builder += firstPointOfNextGroup
isFirstGroup = false
}
isNewGroup = false
while (!isNewGroup && base.hasNext) {
val point = base.next()
val groupId = if (firstPointOfNextGroup != null) firstPointOfNextGroup.group else point.group
firstPointOfNextGroup = point
if (point.group == groupId) {
// add to current group
builder += point
} else {
// start a new group
isNewGroup = true
}
}
val isLastGroup = !isNewGroup
val result = builder.result()
val group = XGBLabeledPointGroup(result(0).group, result, isFirstGroup || isLastGroup)
group
}
}

View File

@@ -43,7 +43,7 @@ import org.apache.spark.broadcast.Broadcast
private[spark] trait XGBoostClassifierParams extends GeneralParams with LearningTaskParams
with BoosterParams with HasWeightCol with HasBaseMarginCol with HasNumClass with ParamMapFuncs
with HasLeafPredictionCol with HasContribPredictionCol with NonParamVariables
with HasLeafPredictionCol with HasContribPredictionCol
class XGBoostClassifier (
override val uid: String,
@@ -130,8 +130,6 @@ class XGBoostClassifier (
// setters for learning params
def setObjective(value: String): this.type = set(objective, value)
def setObjectiveType(value: String): this.type = set(objectiveType, value)
def setBaseScore(value: Double): this.type = set(baseScore, value)
def setEvalMetric(value: String): this.type = set(evalMetric, value)
@@ -140,9 +138,6 @@ class XGBoostClassifier (
def setNumEarlyStoppingRounds(value: Int): this.type = set(numEarlyStoppingRounds, value)
def setMaximizeEvaluationMetrics(value: Boolean): this.type =
set(maximizeEvaluationMetrics, value)
def setCustomObj(value: ObjectiveTrait): this.type = set(customObj, value)
def setCustomEval(value: EvalTrait): this.type = set(customEval, value)
@@ -165,10 +160,6 @@ class XGBoostClassifier (
set(evalMetric, setupDefaultEvalMetric())
}
if (isDefined(customObj) && $(customObj) != null) {
set(objectiveType, "classification")
}
val _numClasses = getNumClasses(dataset)
if (isDefined(numClass) && $(numClass) != _numClasses) {
throw new Exception("The number of classes in dataset doesn't match " +
@@ -182,19 +173,24 @@ class XGBoostClassifier (
col($(baseMarginCol))
}
val trainingSet: RDD[XGBLabeledPoint] = DataUtils.convertDataFrameToXGBLabeledPointRDDs(
col($(labelCol)), col($(featuresCol)), weight, baseMargin,
None, dataset.asInstanceOf[DataFrame]).head
val evalRDDMap = getEvalSets(xgboostParams).map {
case (name, dataFrame) => (name,
DataUtils.convertDataFrameToXGBLabeledPointRDDs(col($(labelCol)), col($(featuresCol)),
weight, baseMargin, None, dataFrame).head)
val instances: RDD[XGBLabeledPoint] = dataset.select(
col($(featuresCol)),
col($(labelCol)).cast(FloatType),
baseMargin.cast(FloatType),
weight.cast(FloatType)
).rdd.map { case Row(features: Vector, label: Float, baseMargin: Float, weight: Float) =>
val (indices, values) = features match {
case v: SparseVector => (v.indices, v.values.map(_.toFloat))
case v: DenseVector => (null, v.values.map(_.toFloat))
}
XGBLabeledPoint(label, indices, values, baseMargin = baseMargin, weight = weight)
}
transformSchema(dataset.schema, logging = true)
val derivedXGBParamMap = MLlib2XGBoostParams
// All non-null param maps in XGBoostClassifier are in derivedXGBParamMap.
val (_booster, _metrics) = XGBoost.trainDistributed(trainingSet, derivedXGBParamMap,
hasGroup = false, evalRDDMap)
val (_booster, _metrics) = XGBoost.trainDistributed(instances, derivedXGBParamMap,
$(numRound), $(numWorkers), $(customObj), $(customEval), $(useExternalMemory),
$(missing))
val model = new XGBoostClassificationModel(uid, _numClasses, _booster)
val summary = XGBoostTrainingSummary(_metrics)
model.setSummary(summary)
@@ -255,11 +251,11 @@ class XGBoostClassificationModel private[ml](
override def predict(features: Vector): Double = {
import DataUtils._
val dm = new DMatrix(XGBoost.removeMissingValues(Iterator(features.asXGB), $(missing)))
val probability = _booster.predict(data = dm)(0).map(_.toDouble)
val probability = _booster.predict(data = dm)(0)
if (numClasses == 2) {
math.round(probability(0))
} else {
probability2prediction(Vectors.dense(probability))
Vectors.dense(probability.map(_.toDouble)).argmax
}
}
@@ -285,12 +281,12 @@ class XGBoostClassificationModel private[ml](
val bBooster = dataset.sparkSession.sparkContext.broadcast(_booster)
val appName = dataset.sparkSession.sparkContext.appName
val inputRDD = dataset.asInstanceOf[Dataset[Row]].rdd
val predictionRDD = dataset.asInstanceOf[Dataset[Row]].rdd.mapPartitions { rowIterator =>
val rdd = dataset.asInstanceOf[Dataset[Row]].rdd.mapPartitions { rowIterator =>
if (rowIterator.hasNext) {
val rabitEnv = Array("DMLC_TASK_ID" -> TaskContext.getPartitionId().toString).toMap
Rabit.init(rabitEnv.asJava)
val featuresIterator = rowIterator.map(row => row.getAs[Vector](
val (rowItr1, rowItr2) = rowIterator.duplicate
val featuresIterator = rowItr2.map(row => row.getAs[Vector](
$(featuresCol))).toList.iterator
import DataUtils._
val cacheInfo = {
@@ -307,27 +303,19 @@ class XGBoostClassificationModel private[ml](
val Array(rawPredictionItr, probabilityItr, predLeafItr, predContribItr) =
producePredictionItrs(bBooster, dm)
Rabit.shutdown()
Iterator(rawPredictionItr, probabilityItr, predLeafItr,
produceResultIterator(rowItr1, rawPredictionItr, probabilityItr, predLeafItr,
predContribItr)
} finally {
dm.delete()
}
} else {
Iterator()
Iterator[Row]()
}
}
val resultRDD = inputRDD.zipPartitions(predictionRDD, preservesPartitioning = true) {
case (inputIterator, predictionItr) =>
if (inputIterator.hasNext) {
produceResultIterator(inputIterator, predictionItr.next(), predictionItr.next(),
predictionItr.next(), predictionItr.next())
} else {
Iterator()
}
}
bBooster.unpersist(blocking = false)
dataset.sparkSession.createDataFrame(resultRDD, generateResultSchema(schema))
dataset.sparkSession.createDataFrame(rdd, generateResultSchema(schema))
}
private def produceResultIterator(
@@ -419,21 +407,24 @@ class XGBoostClassificationModel private[ml](
var numColsOutput = 0
val rawPredictionUDF = udf { rawPrediction: mutable.WrappedArray[Float] =>
val raw = rawPrediction.map(_.toDouble).toArray
val rawPredictions = if (numClasses == 2) Array(-raw(0), raw(0)) else raw
Vectors.dense(rawPredictions)
Vectors.dense(rawPrediction.map(_.toDouble).toArray)
}
val probabilityUDF = udf { probability: mutable.WrappedArray[Float] =>
val prob = probability.map(_.toDouble).toArray
val probabilities = if (numClasses == 2) Array(1.0 - prob(0), prob(0)) else prob
Vectors.dense(probabilities)
if (numClasses == 2) {
Vectors.dense(Array(1 - probability(0), probability(0)).map(_.toDouble))
} else {
Vectors.dense(probability.map(_.toDouble).toArray)
}
}
val predictUDF = udf { probability: mutable.WrappedArray[Float] =>
// From XGBoost probability to MLlib prediction
val prob = probability.map(_.toDouble).toArray
val probabilities = if (numClasses == 2) Array(1.0 - prob(0), prob(0)) else prob
val probabilities = if (numClasses == 2) {
Array(1 - probability(0), probability(0)).map(_.toDouble)
} else {
probability.map(_.toDouble).toArray
}
probability2prediction(Vectors.dense(probabilities))
}
@@ -525,4 +516,3 @@ object XGBoostClassificationModel extends MLReadable[XGBoostClassificationModel]
}
}
}

View File

@@ -43,7 +43,7 @@ import org.apache.spark.broadcast.Broadcast
private[spark] trait XGBoostRegressorParams extends GeneralParams with BoosterParams
with LearningTaskParams with HasBaseMarginCol with HasWeightCol with HasGroupCol
with ParamMapFuncs with HasLeafPredictionCol with HasContribPredictionCol with NonParamVariables
with ParamMapFuncs with HasLeafPredictionCol with HasContribPredictionCol
class XGBoostRegressor (
override val uid: String,
@@ -130,8 +130,6 @@ class XGBoostRegressor (
// setters for learning params
def setObjective(value: String): this.type = set(objective, value)
def setObjectiveType(value: String): this.type = set(objectiveType, value)
def setBaseScore(value: Double): this.type = set(baseScore, value)
def setEvalMetric(value: String): this.type = set(evalMetric, value)
@@ -140,9 +138,6 @@ class XGBoostRegressor (
def setNumEarlyStoppingRounds(value: Int): this.type = set(numEarlyStoppingRounds, value)
def setMaximizeEvaluationMetrics(value: Boolean): this.type =
set(maximizeEvaluationMetrics, value)
def setCustomObj(value: ObjectiveTrait): this.type = set(customObj, value)
def setCustomEval(value: EvalTrait): this.type = set(customEval, value)
@@ -163,10 +158,6 @@ class XGBoostRegressor (
set(evalMetric, setupDefaultEvalMetric())
}
if (isDefined(customObj) && $(customObj) != null) {
set(objectiveType, "regression")
}
val weight = if (!isDefined(weightCol) || $(weightCol).isEmpty) lit(1.0) else col($(weightCol))
val baseMargin = if (!isDefined(baseMarginCol) || $(baseMarginCol).isEmpty) {
lit(Float.NaN)
@@ -174,19 +165,27 @@ class XGBoostRegressor (
col($(baseMarginCol))
}
val group = if (!isDefined(groupCol) || $(groupCol).isEmpty) lit(-1) else col($(groupCol))
val trainingSet: RDD[XGBLabeledPoint] = DataUtils.convertDataFrameToXGBLabeledPointRDDs(
col($(labelCol)), col($(featuresCol)), weight, baseMargin, Some(group),
dataset.asInstanceOf[DataFrame]).head
val evalRDDMap = getEvalSets(xgboostParams).map {
case (name, dataFrame) => (name,
DataUtils.convertDataFrameToXGBLabeledPointRDDs(col($(labelCol)), col($(featuresCol)),
weight, baseMargin, Some(group), dataFrame).head)
val instances: RDD[XGBLabeledPoint] = dataset.select(
col($(labelCol)).cast(FloatType),
col($(featuresCol)),
weight.cast(FloatType),
group.cast(IntegerType),
baseMargin.cast(FloatType)
).rdd.map {
case Row(label: Float, features: Vector, weight: Float, group: Int, baseMargin: Float) =>
val (indices, values) = features match {
case v: SparseVector => (v.indices, v.values.map(_.toFloat))
case v: DenseVector => (null, v.values.map(_.toFloat))
}
XGBLabeledPoint(label, indices, values, weight, group, baseMargin)
}
transformSchema(dataset.schema, logging = true)
val derivedXGBParamMap = MLlib2XGBoostParams
// All non-null param maps in XGBoostRegressor are in derivedXGBParamMap.
val (_booster, _metrics) = XGBoost.trainDistributed(trainingSet, derivedXGBParamMap,
hasGroup = group != lit(-1), evalRDDMap)
val (_booster, _metrics) = XGBoost.trainDistributed(instances, derivedXGBParamMap,
$(numRound), $(numWorkers), $(customObj), $(customEval), $(useExternalMemory),
$(missing))
val model = new XGBoostRegressionModel(uid, _booster)
val summary = XGBoostTrainingSummary(_metrics)
model.setSummary(summary)
@@ -257,12 +256,13 @@ class XGBoostRegressionModel private[ml] (
val bBooster = dataset.sparkSession.sparkContext.broadcast(_booster)
val appName = dataset.sparkSession.sparkContext.appName
val inputRDD = dataset.asInstanceOf[Dataset[Row]].rdd
val predictionRDD = dataset.asInstanceOf[Dataset[Row]].rdd.mapPartitions { rowIterator =>
val rdd = dataset.asInstanceOf[Dataset[Row]].rdd.mapPartitions { rowIterator =>
if (rowIterator.hasNext) {
val rabitEnv = Array("DMLC_TASK_ID" -> TaskContext.getPartitionId().toString).toMap
Rabit.init(rabitEnv.asJava)
val featuresIterator = rowIterator.map(row => row.getAs[Vector](
val (rowItr1, rowItr2) = rowIterator.duplicate
val featuresIterator = rowItr2.map(row => row.getAs[Vector](
$(featuresCol))).toList.iterator
import DataUtils._
val cacheInfo = {
@@ -272,6 +272,7 @@ class XGBoostRegressionModel private[ml] (
null
}
}
val dm = new DMatrix(
XGBoost.removeMissingValues(featuresIterator.map(_.asXGB), $(missing)),
cacheInfo)
@@ -279,25 +280,16 @@ class XGBoostRegressionModel private[ml] (
val Array(originalPredictionItr, predLeafItr, predContribItr) =
producePredictionItrs(bBooster, dm)
Rabit.shutdown()
Iterator(originalPredictionItr, predLeafItr, predContribItr)
produceResultIterator(rowItr1, originalPredictionItr, predLeafItr, predContribItr)
} finally {
dm.delete()
}
} else {
Iterator()
Iterator[Row]()
}
}
val resultRDD = inputRDD.zipPartitions(predictionRDD, preservesPartitioning = true) {
case (inputIterator, predictionItr) =>
if (inputIterator.hasNext) {
produceResultIterator(inputIterator, predictionItr.next(), predictionItr.next(),
predictionItr.next())
} else {
Iterator()
}
}
bBooster.unpersist(blocking = false)
dataset.sparkSession.createDataFrame(resultRDD, generateResultSchema(schema))
dataset.sparkSession.createDataFrame(rdd, generateResultSchema(schema))
}
private def produceResultIterator(

View File

@@ -18,17 +18,12 @@ package ml.dmlc.xgboost4j.scala.spark
class XGBoostTrainingSummary private(
val trainObjectiveHistory: Array[Float],
val validationObjectiveHistory: (String, Array[Float])*) extends Serializable {
val testObjectiveHistory: Option[Array[Float]]
) extends Serializable {
override def toString: String = {
val train = trainObjectiveHistory.mkString(",")
val vaidationObjectiveHistoryString = {
validationObjectiveHistory.map {
case (name, metrics) =>
s"${name}ObjectiveHistory=${metrics.mkString(",")}"
}.mkString(";")
}
s"XGBoostTrainingSummary(trainObjectiveHistory=$train; $vaidationObjectiveHistoryString)"
val train = trainObjectiveHistory.toList
val test = testObjectiveHistory.map(_.toList)
s"XGBoostTrainingSummary(trainObjectiveHistory=$train, testObjectiveHistory=$test)"
}
}
@@ -36,6 +31,6 @@ private[xgboost4j] object XGBoostTrainingSummary {
def apply(metrics: Map[String, Array[Float]]): XGBoostTrainingSummary = {
new XGBoostTrainingSummary(
trainObjectiveHistory = metrics("train"),
metrics.filter(_._1 != "train").toSeq: _*)
testObjectiveHistory = metrics.get("test"))
}
}

View File

@@ -50,21 +50,10 @@ private[spark] trait BoosterParams extends Params {
* overfitting. [default=6] range: [1, Int.MaxValue]
*/
final val maxDepth = new IntParam(this, "maxDepth", "maximum depth of a tree, increase this " +
"value will make model more complex/likely to be overfitting.", (value: Int) => value >= 0)
"value will make model more complex/likely to be overfitting.", (value: Int) => value >= 1)
final def getMaxDepth: Int = $(maxDepth)
/**
* Maximum number of nodes to be added. Only relevant when grow_policy=lossguide is set.
*/
final val maxLeaves = new IntParam(this, "maxLeaves",
"Maximum number of nodes to be added. Only relevant when grow_policy=lossguide is set.",
(value: Int) => value >= 0)
final def getMaxLeaves: Int = $(maxLeaves)
/**
* minimum sum of instance weight(hessian) needed in a child. If the tree partition step results
* in a leaf node with the sum of instance weight less than min_child_weight, then the building
@@ -158,9 +147,7 @@ private[spark] trait BoosterParams extends Params {
* growth policy for fast histogram algorithm
*/
final val growPolicy = new Param[String](this, "growPolicy",
"Controls a way new nodes are added to the tree. Currently supported only if" +
" tree_method is set to hist. Choices: depthwise, lossguide. depthwise: split at nodes" +
" closest to the root. lossguide: split at nodes with highest loss change.",
"growth policy for fast histogram algorithm",
(value: String) => BoosterParams.supportedGrowthPolicies.contains(value))
final def getGrowPolicy: String = $(growPolicy)
@@ -253,23 +240,7 @@ private[spark] trait BoosterParams extends Params {
final val treeLimit = new IntParam(this, name = "treeLimit",
doc = "number of trees used in the prediction; defaults to 0 (use all trees).")
final def getTreeLimit: Int = $(treeLimit)
final val monotoneConstraints = new Param[String](this, name = "monotoneConstraints",
doc = "a list in length of number of features, 1 indicate monotonic increasing, - 1 means " +
"decreasing, 0 means no constraint. If it is shorter than number of features, 0 will be " +
"padded ")
final def getMonotoneConstraints: String = $(monotoneConstraints)
final val interactionConstraints = new Param[String](this,
name = "interactionConstraints",
doc = "Constraints for interaction representing permitted interactions. The constraints" +
" must be specified in the form of a nest list, e.g. [[0, 1], [2, 3, 4]]," +
" where each inner list is a group of indices of features that are allowed to interact" +
" with each other. See tutorial for more information")
final def getInteractionConstraints: String = $(interactionConstraints)
final def getTreeLimit: Double = $(treeLimit)
setDefault(eta -> 0.3, gamma -> 0, maxDepth -> 6,
minChildWeight -> 1, maxDeltaStep -> 0,

View File

@@ -22,7 +22,26 @@ import org.json4s.{DefaultFormats, Extraction, NoTypeHints}
import org.json4s.jackson.JsonMethods.{compact, parse, render}
import org.apache.spark.ml.param.{Param, ParamPair, Params}
import org.apache.spark.sql.DataFrame
class GroupDataParam(
parent: Params,
name: String,
doc: String) extends Param[Seq[Seq[Int]]](parent, name, doc) {
/** Creates a param pair with the given value (for Java). */
override def w(value: Seq[Seq[Int]]): ParamPair[Seq[Seq[Int]]] = super.w(value)
override def jsonEncode(value: Seq[Seq[Int]]): String = {
import org.json4s.jackson.Serialization
implicit val formats = Serialization.formats(NoTypeHints)
compact(render(Extraction.decompose(value)))
}
override def jsonDecode(json: String): Seq[Seq[Int]] = {
implicit val formats = DefaultFormats
parse(json).extract[Seq[Seq[Int]]]
}
}
class CustomEvalParam(
parent: Params,
@@ -81,6 +100,7 @@ class TrackerConfParam(
override def jsonDecode(json: String): TrackerConf = {
implicit val formats = DefaultFormats
val parsedValue = parse(json)
println(parsedValue.children)
parsedValue.extract[TrackerConf]
}
}

Some files were not shown because too many files have changed in this diff Show More