Compare commits

..

5 Commits

Author SHA1 Message Date
Tong He
1995db85e8 fix additional files note (#4699)
* fix additional files note

* Trigger CI

* Trigger CI
2019-07-25 11:21:48 -07:00
Philip Hyunsu Cho
9c02016844 Upgrade dmlc-core submodule (#4688) 2019-07-20 11:31:04 -07:00
Philip Hyunsu Cho
00e58bd08b Upgrade dmlc-core submodule (#4674) 2019-07-18 11:58:54 -07:00
Tong He
b77a89ec28 [R] Fix CRAN error for Mac OS X (#4672)
* fix cran error for mac os x

* ignore float on windows check for now
2019-07-18 11:58:30 -07:00
Philip Cho
cafc8bff58 Fix version number in R package 2019-06-20 14:23:20 -07:00
478 changed files with 14547 additions and 34876 deletions

View File

@@ -1,4 +1,4 @@
Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
CheckOptions:
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
- { key: readability-identifier-naming.StructCase, value: CamelCase }

13
.gitignore vendored
View File

@@ -17,7 +17,7 @@
*.tar.gz
*conf
*buffer
*.model
*model
*pyc
*.train
*.test
@@ -69,8 +69,10 @@ config.mk
/xgboost
*.data
build_plugin
.idea
recommonmark/
tags
*.iml
*.class
target
*.swp
@@ -88,16 +90,9 @@ lib/
# spark
metastore_db
plugin/updater_gpu/test/cpp/data
/include/xgboost/build_config.h
# files from R-package source install
**/config.status
R-package/src/Makevars
# Visual Studio Code
/.vscode/
# IntelliJ/CLion
.idea
*.iml
/cmake-build-debug/

View File

@@ -1,51 +1,36 @@
# disable sudo for container build.
sudo: required
# Enabling test OS X
# Enabling test on Linux and OS X
os:
- linux
- osx
osx_image: xcode10.3
dist: bionic
osx_image: xcode9.3
# Use Build Matrix to do lint and build seperately
env:
matrix:
# python package test
- TASK=python_test
# test installation of Python source distribution
- TASK=python_sdist_test
# java package test
- TASK=java_test
# cmake test
- TASK=cmake_test
# - TASK=cmake_test
matrix:
exclude:
- os: linux
env: TASK=python_test
- os: linux
env: TASK=java_test
- os: linux
env: TASK=cmake_test
# dependent brew packages
# dependent apt packages
addons:
homebrew:
packages:
- cmake
- libomp
- gcc@7
- graphviz
- openssl
- libgit2
- wget
- r
update: true
before_install:
- source dmlc-core/scripts/travis/travis_setup_env.sh
- if [ "${TASK}" != "python_sdist_test" ]; then export PYTHONPATH=${PYTHONPATH}:${PWD}/python-package; fi
- export PYTHONPATH=${PYTHONPATH}:${PWD}/python-package
- echo "MAVEN_OPTS='-Xmx2g -XX:MaxPermSize=1024m -XX:ReservedCodeCacheSize=512m -Dorg.slf4j.simpleLogger.defaultLogLevel=error'" > ~/.mavenrc
install:

View File

@@ -1,23 +1,14 @@
cmake_minimum_required(VERSION 3.12)
project(xgboost LANGUAGES CXX C VERSION 1.0.0)
cmake_minimum_required(VERSION 3.3)
project(xgboost LANGUAGES CXX C VERSION 0.90)
include(cmake/Utils.cmake)
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake/modules")
cmake_policy(SET CMP0022 NEW)
if ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13))
cmake_policy(SET CMP0077 NEW)
endif ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13))
message(STATUS "CMake version ${CMAKE_VERSION}")
if (MSVC)
cmake_minimum_required(VERSION 3.11)
endif (MSVC)
if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0)
message(FATAL_ERROR "GCC version must be at least 5.0!")
endif()
include(${xgboost_SOURCE_DIR}/cmake/FindPrefetchIntrinsics.cmake)
find_prefetch_intrinsics()
include(${xgboost_SOURCE_DIR}/cmake/Version.cmake)
write_version()
set_default_configuration_release()
#-- Options
@@ -27,61 +18,44 @@ option(USE_OPENMP "Build with OpenMP support." ON)
option(JVM_BINDINGS "Build JVM bindings" OFF)
option(R_LIB "Build shared library for R package" OFF)
## Dev
option(USE_DEBUG_OUTPUT "Dump internal training results like gradients and predictions to stdout.
Should only be used for debugging." OFF)
option(GOOGLE_TEST "Build google tests" OFF)
option(USE_DMLC_GTEST "Use google tests bundled with dmlc-core submodule" OFF)
option(USE_DMLC_GTEST "Use google tests bundled with dmlc-core submodule (EXPERIMENTAL)" OFF)
option(USE_NVTX "Build with cuda profiling annotations. Developers only." OFF)
set(NVTX_HEADER_DIR "" CACHE PATH "Path to the stand-alone nvtx header")
option(RABIT_MOCK "Build rabit with mock" OFF)
## CUDA
option(USE_CUDA "Build with GPU acceleration" OFF)
option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF)
option(USE_NCCL "Build with NCCL to enable multi-GPU support." OFF)
option(BUILD_WITH_SHARED_NCCL "Build with shared NCCL library." OFF)
set(GPU_COMPUTE_VER "" CACHE STRING
"Semicolon separated list of compute versions to be built against, e.g. '35;61'")
## Copied From dmlc
option(USE_HDFS "Build with HDFS support" OFF)
option(USE_AZURE "Build with AZURE support" OFF)
option(USE_S3 "Build with S3 support" OFF)
if (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
message(SEND_ERROR "Build XGBoost with -DUSE_NCCL=ON to enable BUILD_WITH_SHARED_NCCL.")
endif (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
## Sanitizers
option(USE_SANITIZER "Use santizer flags" OFF)
option(SANITIZER_PATH "Path to sanitizes.")
set(ENABLED_SANITIZERS "address" "leak" CACHE STRING
"Semicolon separated list of sanitizer names. E.g 'address;leak'. Supported sanitizers are
address, leak, undefined and thread.")
address, leak and thread.")
## Plugins
option(PLUGIN_LZ4 "Build lz4 plugin" OFF)
option(PLUGIN_DENSE_PARSER "Build dense parser plugin" OFF)
#-- Checks for building XGBoost
if (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug)))
message(SEND_ERROR "Do not enable `USE_DEBUG_OUTPUT' with release build.")
endif (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug)))
if (USE_NCCL AND NOT (USE_CUDA))
message(SEND_ERROR "`USE_NCCL` must be enabled with `USE_CUDA` flag.")
endif (USE_NCCL AND NOT (USE_CUDA))
if (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
message(SEND_ERROR "Build XGBoost with -DUSE_NCCL=ON to enable BUILD_WITH_SHARED_NCCL.")
endif (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
if (JVM_BINDINGS AND R_LIB)
message(SEND_ERROR "`R_LIB' is not compatible with `JVM_BINDINGS' as they both have customized configurations.")
endif (JVM_BINDINGS AND R_LIB)
if (R_LIB AND GOOGLE_TEST)
message(WARNING "Some C++ unittests will fail with `R_LIB` enabled,
as R package redirects some functions to R runtime implementation.")
endif (R_LIB AND GOOGLE_TEST)
## Deprecation warning
if (USE_AVX)
message(SEND_ERROR "The option 'USE_AVX' is deprecated as experimental AVX features have been removed from XGBoost.")
message(WARNING "The option 'USE_AVX' is deprecated as experimental AVX features have been removed from xgboost.")
endif (USE_AVX)
#-- Sanitizer
# Sanitizer
if (USE_SANITIZER)
# Older CMake versions have had troubles with Sanitizer
cmake_minimum_required(VERSION 3.12)
include(cmake/Sanitizer.cmake)
enable_sanitizers("${ENABLED_SANITIZERS}")
endif (USE_SANITIZER)
if (USE_CUDA)
cmake_minimum_required(VERSION 3.12)
SET(USE_OPENMP ON CACHE BOOL "CUDA requires OpenMP" FORCE)
# `export CXX=' is ignored by CMake CUDA.
set(CMAKE_CUDA_HOST_COMPILER ${CMAKE_CXX_COMPILER})
@@ -93,18 +67,9 @@ if (USE_CUDA)
message(STATUS "CUDA GEN_CODE: ${GEN_CODE}")
endif (USE_CUDA)
if (USE_OPENMP)
if (APPLE)
# Require CMake 3.16+ on Mac OSX, as previous versions of CMake had trouble locating
# OpenMP on Mac. See https://github.com/dmlc/xgboost/pull/5146#issuecomment-568312706
cmake_minimum_required(VERSION 3.16)
endif (APPLE)
find_package(OpenMP REQUIRED)
endif (USE_OPENMP)
# dmlc-core
msvc_use_static_runtime()
add_subdirectory(${xgboost_SOURCE_DIR}/dmlc-core)
add_subdirectory(${PROJECT_SOURCE_DIR}/dmlc-core)
set_target_properties(dmlc PROPERTIES
CXX_STANDARD 11
CXX_STANDARD_REQUIRED ON
@@ -112,29 +77,40 @@ set_target_properties(dmlc PROPERTIES
list(APPEND LINKED_LIBRARIES_PRIVATE dmlc)
# rabit
set(RABIT_BUILD_DMLC OFF)
set(DMLC_ROOT ${xgboost_SOURCE_DIR}/dmlc-core)
set(RABIT_WITH_R_LIB ${R_LIB})
add_subdirectory(rabit)
if (RABIT_MOCK)
list(APPEND LINKED_LIBRARIES_PRIVATE rabit_mock_static)
else()
list(APPEND LINKED_LIBRARIES_PRIVATE rabit)
endif(RABIT_MOCK)
# full rabit doesn't build on windows, so we can't import it as subdirectory
if(MINGW OR R_LIB)
set(RABIT_SOURCES
rabit/src/engine_empty.cc
rabit/src/c_api.cc)
else ()
set(RABIT_SOURCES
rabit/src/allreduce_base.cc
rabit/src/allreduce_robust.cc
rabit/src/engine.cc
rabit/src/c_api.cc)
endif (MINGW OR R_LIB)
add_library(rabit STATIC ${RABIT_SOURCES})
target_include_directories(rabit PRIVATE
$<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}/dmlc-core/include>
$<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}/rabit/include/rabit>)
set_target_properties(rabit
PROPERTIES
CXX_STANDARD 11
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON)
list(APPEND LINKED_LIBRARIES_PRIVATE rabit)
# Exports some R specific definitions and objects
if (R_LIB)
add_subdirectory(${xgboost_SOURCE_DIR}/R-package)
add_subdirectory(${PROJECT_SOURCE_DIR}/R-package)
endif (R_LIB)
# core xgboost
add_subdirectory(${xgboost_SOURCE_DIR}/plugin)
add_subdirectory(${xgboost_SOURCE_DIR}/src)
add_subdirectory(${PROJECT_SOURCE_DIR}/src)
set(XGBOOST_OBJ_SOURCES "${XGBOOST_OBJ_SOURCES};$<TARGET_OBJECTS:objxgboost>")
#-- Shared library
add_library(xgboost SHARED ${XGBOOST_OBJ_SOURCES})
add_library(xgboost SHARED ${XGBOOST_OBJ_SOURCES} ${PLUGINS_SOURCES})
target_include_directories(xgboost
INTERFACE
$<INSTALL_INTERFACE:${CMAKE_INSTALL_PREFIX}/include>
@@ -143,18 +119,22 @@ target_link_libraries(xgboost PRIVATE ${LINKED_LIBRARIES_PRIVATE})
# This creates its own shared library `xgboost4j'.
if (JVM_BINDINGS)
add_subdirectory(${xgboost_SOURCE_DIR}/jvm-packages)
add_subdirectory(${PROJECT_SOURCE_DIR}/jvm-packages)
endif (JVM_BINDINGS)
#-- End shared library
#-- CLI for xgboost
add_executable(runxgboost ${xgboost_SOURCE_DIR}/src/cli_main.cc ${XGBOOST_OBJ_SOURCES})
add_executable(runxgboost ${PROJECT_SOURCE_DIR}/src/cli_main.cc ${XGBOOST_OBJ_SOURCES})
# For cli_main.cc only
if (USE_OPENMP)
find_package(OpenMP REQUIRED)
target_compile_options(runxgboost PRIVATE ${OpenMP_CXX_FLAGS})
endif (USE_OPENMP)
target_include_directories(runxgboost
PRIVATE
${xgboost_SOURCE_DIR}/include
${xgboost_SOURCE_DIR}/dmlc-core/include
${xgboost_SOURCE_DIR}/rabit/include)
${PROJECT_SOURCE_DIR}/include
${PROJECT_SOURCE_DIR}/dmlc-core/include
${PROJECT_SOURCE_DIR}/rabit/include)
target_link_libraries(runxgboost PRIVATE ${LINKED_LIBRARIES_PRIVATE})
set_target_properties(
runxgboost PROPERTIES
@@ -163,8 +143,8 @@ set_target_properties(
CXX_STANDARD_REQUIRED ON)
#-- End CLI for xgboost
set_output_directory(runxgboost ${xgboost_SOURCE_DIR})
set_output_directory(xgboost ${xgboost_SOURCE_DIR}/lib)
set_output_directory(runxgboost ${PROJECT_SOURCE_DIR})
set_output_directory(xgboost ${PROJECT_SOURCE_DIR}/lib)
# Ensure these two targets do not build simultaneously, as they produce outputs with conflicting names
add_dependencies(xgboost runxgboost)
@@ -187,9 +167,11 @@ if (BUILD_C_DOC)
endif (BUILD_C_DOC)
include(GNUInstallDirs)
# Install all headers. Please note that currently the C++ headers does not form an "API".
install(DIRECTORY ${xgboost_SOURCE_DIR}/include/xgboost
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
# Exposing only C APIs.
install(FILES
"${PROJECT_SOURCE_DIR}/include/xgboost/c_api.h"
DESTINATION
include/xgboost/)
install(TARGETS xgboost runxgboost
EXPORT XGBoostTargets
@@ -221,21 +203,21 @@ install(
if (GOOGLE_TEST)
enable_testing()
# Unittests.
add_subdirectory(${xgboost_SOURCE_DIR}/tests/cpp)
add_subdirectory(${PROJECT_SOURCE_DIR}/tests/cpp)
add_test(
NAME TestXGBoostLib
COMMAND testxgboost
WORKING_DIRECTORY ${xgboost_BINARY_DIR})
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
# CLI tests
configure_file(
${xgboost_SOURCE_DIR}/tests/cli/machine.conf.in
${xgboost_BINARY_DIR}/tests/cli/machine.conf
${PROJECT_SOURCE_DIR}/tests/cli/machine.conf.in
${PROJECT_BINARY_DIR}/tests/cli/machine.conf
@ONLY)
add_test(
NAME TestXGBoostCLI
COMMAND runxgboost ${xgboost_BINARY_DIR}/tests/cli/machine.conf
WORKING_DIRECTORY ${xgboost_BINARY_DIR})
COMMAND runxgboost ${PROJECT_BINARY_DIR}/tests/cli/machine.conf
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
set_tests_properties(TestXGBoostCLI
PROPERTIES
PASS_REGULAR_EXPRESSION ".*test-rmse:0.087.*")

View File

@@ -2,42 +2,34 @@ Contributors of DMLC/XGBoost
============================
XGBoost has been developed and used by a group of active community. Everyone is more than welcomed to is a great way to make the project better and more accessible to more users.
Project Management Committee(PMC)
----------
The Project Management Committee(PMC) consists group of active committers that moderate the discussion, manage the project release, and proposes new committer/PMC members.
* [Tianqi Chen](https://github.com/tqchen), University of Washington
- Tianqi is a Ph.D. student working on large-scale machine learning. He is the creator of the project.
* [Michael Benesty](https://github.com/pommedeterresautee)
- Michael is a lawyer and data scientist in France. He is the creator of XGBoost interactive analysis module in R.
* [Yuan Tang](https://github.com/terrytangyuan), Ant Financial
- Yuan is a software engineer in Ant Financial. He contributed mostly in R and Python packages.
* [Nan Zhu](https://github.com/CodingCat), Uber
- Nan is a software engineer in Uber. He contributed mostly in JVM packages.
* [Jiaming Yuan](https://github.com/trivialfis)
- Jiaming contributed to the GPU algorithms. He has also introduced new abstractions to improve the quality of the C++ codebase.
* [Hyunsu Cho](http://hyunsu-cho.io/), Amazon AI
- Hyunsu is an applied scientist in Amazon AI. He is the maintainer of the XGBoost Python package. He also manages the Jenkins continuous integration system (https://xgboost-ci.net/). He is the initial author of the CPU 'hist' updater.
* [Rory Mitchell](https://github.com/RAMitchell), University of Waikato
- Rory is a Ph.D. student at University of Waikato. He is the original creator of the GPU training algorithms. He improved the CMake build system and continuous integration.
* [Hongliang Liu](https://github.com/phunterlau)
Committers
----------
Committers are people who have made substantial contribution to the project and granted write access to the project.
* [Tianqi Chen](https://github.com/tqchen), University of Washington
- Tianqi is a Ph.D. student working on large-scale machine learning. He is the creator of the project.
* [Tong He](https://github.com/hetong007), Amazon AI
- Tong is an applied scientist in Amazon AI. He is the maintainer of XGBoost R package.
* [Vadim Khotilovich](https://github.com/khotilov)
- Vadim contributes many improvements in R and core packages.
* [Bing Xu](https://github.com/antinucleon)
- Bing is the original creator of XGBoost Python package and currently the maintainer of [XGBoost.jl](https://github.com/antinucleon/XGBoost.jl).
* [Michael Benesty](https://github.com/pommedeterresautee)
- Michael is a lawyer and data scientist in France. He is the creator of XGBoost interactive analysis module in R.
* [Yuan Tang](https://github.com/terrytangyuan), Ant Financial
- Yuan is a software engineer in Ant Financial. He contributed mostly in R and Python packages.
* [Nan Zhu](https://github.com/CodingCat), Uber
- Nan is a software engineer in Uber. He contributed mostly in JVM packages.
* [Sergei Lebedev](https://github.com/superbobry), Criteo
- Sergei is a software engineer in Criteo. He contributed mostly in JVM packages.
* [Hongliang Liu](https://github.com/phunterlau)
* [Scott Lundberg](http://scottlundberg.com/), University of Washington
- Scott is a Ph.D. student at University of Washington. He is the creator of SHAP, a unified approach to explain the output of machine learning models such as decision tree ensembles. He also helps maintain the XGBoost Julia package.
* [Rory Mitchell](https://github.com/RAMitchell), University of Waikato
- Rory is a Ph.D. student at University of Waikato. He is the original creator of the GPU training algorithms. He improved the CMake build system and continuous integration.
* [Hyunsu Cho](http://hyunsu-cho.io/), Amazon AI
- Hyunsu is an applied scientist in Amazon AI. He is the maintainer of the XGBoost Python package. He also manages the Jenkins continuous integration system (https://xgboost-ci.net/). He is the initial author of the CPU 'hist' updater.
* [Jiaming](https://github.com/trivialfis)
- Jiaming contributed to the GPU algorithms. He has also introduced new abstractions to improve the quality of the C++ codebase.
Become a Committer
------------------
@@ -97,8 +89,3 @@ List of Contributors
* [Sam Wilkinson](https://samwilkinson.io)
* [Matthew Jones](https://github.com/mt-jones)
* [Jiaxiang Li](https://github.com/JiaxiangBU)
* [Bryan Woods](https://github.com/bryan-woods)
- Bryan added support for cross-validation for the ranking objective
* [Haoda Fu](https://github.com/fuhaoda)
* [Evan Kepner](https://github.com/EvanKepner)
- Evan Kepner added support for os.PathLike file paths in Python

78
Jenkinsfile vendored
View File

@@ -6,25 +6,19 @@
// Command to run command inside a docker container
dockerRun = 'tests/ci_build/ci_build.sh'
import groovy.transform.Field
@Field
def commit_id // necessary to pass a variable from one stage to another
pipeline {
// Each stage specify its own agent
agent none
environment {
DOCKER_CACHE_ECR_ID = '492475357299'
DOCKER_CACHE_ECR_REGION = 'us-west-2'
DOCKER_CACHE_REPO = '492475357299.dkr.ecr.us-west-2.amazonaws.com'
}
// Setup common job properties
options {
ansiColor('xterm')
timestamps()
timeout(time: 240, unit: 'MINUTES')
timeout(time: 120, unit: 'MINUTES')
buildDiscarder(logRotator(numToKeepStr: '10'))
preserveStashes()
}
@@ -36,7 +30,6 @@ pipeline {
steps {
script {
checkoutSrcs()
commit_id = "${GIT_COMMIT}"
}
stash name: 'srcs'
milestone ordinal: 1
@@ -62,7 +55,7 @@ pipeline {
script {
parallel ([
'build-cpu': { BuildCPU() },
'build-cpu-rabit-mock': { BuildCPUMock() },
'build-gpu-cuda8.0': { BuildCUDA(cuda_version: '8.0') },
'build-gpu-cuda9.0': { BuildCUDA(cuda_version: '9.0') },
'build-gpu-cuda10.0': { BuildCUDA(cuda_version: '10.0') },
'build-gpu-cuda10.1': { BuildCUDA(cuda_version: '10.1') },
@@ -79,6 +72,7 @@ pipeline {
script {
parallel ([
'test-python-cpu': { TestPythonCPU() },
'test-python-gpu-cuda8.0': { TestPythonGPU(cuda_version: '8.0') },
'test-python-gpu-cuda9.0': { TestPythonGPU(cuda_version: '9.0') },
'test-python-gpu-cuda10.0': { TestPythonGPU(cuda_version: '10.0') },
'test-python-gpu-cuda10.1': { TestPythonGPU(cuda_version: '10.1') },
@@ -121,7 +115,7 @@ def ClangTidy() {
def docker_binary = "docker"
def dockerArgs = "--build-arg CUDA_VERSION=9.2"
sh """
${dockerRun} ${container_type} ${docker_binary} ${dockerArgs} python3 tests/ci_build/tidy.py
${dockerRun} ${container_type} ${docker_binary} ${dockerArgs} tests/ci_build/clang_tidy.sh
"""
deleteDir()
}
@@ -163,6 +157,7 @@ def Doxygen() {
sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/doxygen.sh ${BRANCH_NAME}
"""
archiveArtifacts artifacts: "build/${BRANCH_NAME}.tar.bz2", allowEmptyArchive: true
echo 'Uploading doc...'
s3Upload file: "build/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "doxygen/${BRANCH_NAME}.tar.bz2"
deleteDir()
@@ -180,10 +175,10 @@ def BuildCPU() {
${dockerRun} ${container_type} ${docker_binary} build/testxgboost
"""
// Sanitizer test
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e ASAN_SYMBOLIZER_PATH=/usr/bin/llvm-symbolizer -e ASAN_OPTIONS=symbolize=1 -e UBSAN_OPTIONS=print_stacktrace=1:log_path=ubsan_error.log --cap-add SYS_PTRACE'"
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e ASAN_SYMBOLIZER_PATH=/usr/bin/llvm-symbolizer -e ASAN_OPTIONS=symbolize=1 --cap-add SYS_PTRACE'"
def docker_args = "--build-arg CMAKE_VERSION=3.12"
sh """
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak;undefined" \
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address" \
-DCMAKE_BUILD_TYPE=Debug -DSANITIZER_PATH=/usr/lib/x86_64-linux-gnu/
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} build/testxgboost
"""
@@ -191,22 +186,6 @@ def BuildCPU() {
}
}
def BuildCPUMock() {
node('linux && cpu') {
unstash name: 'srcs'
echo "Build CPU with rabit mock"
def container_type = "cpu"
def docker_binary = "docker"
sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_mock_cmake.sh
"""
echo 'Stashing rabit C++ test executable (xgboost)...'
stash name: 'xgboost_rabit_tests', includes: 'xgboost'
deleteDir()
}
}
def BuildCUDA(args) {
node('linux && cpu') {
unstash name: 'srcs'
@@ -217,14 +196,15 @@ def BuildCUDA(args) {
sh """
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_CUDA=ON -DUSE_NCCL=ON -DOPEN_MP:BOOL=ON
${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal"
${dockerRun} ${container_type} ${docker_binary} ${docker_args} python3 tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux1_x86_64
"""
// Stash wheel for CUDA 9.0 target
if (args.cuda_version == '9.0') {
// Stash wheel for CUDA 8.0 / 9.0 target
if (args.cuda_version == '8.0') {
echo 'Stashing Python wheel...'
stash name: 'xgboost_whl_cuda8', includes: 'python-package/dist/*.whl'
} else if (args.cuda_version == '9.0') {
echo 'Stashing Python wheel...'
stash name: 'xgboost_whl_cuda9', includes: 'python-package/dist/*.whl'
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
archiveArtifacts artifacts: "python-package/dist/*.whl", allowEmptyArchive: true
echo 'Stashing C++ test executable (testxgboost)...'
stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost'
}
@@ -258,6 +238,7 @@ def BuildJVMDoc() {
sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_doc.sh ${BRANCH_NAME}
"""
archiveArtifacts artifacts: "jvm-packages/${BRANCH_NAME}.tar.bz2", allowEmptyArchive: true
echo 'Uploading doc...'
s3Upload file: "jvm-packages/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${BRANCH_NAME}.tar.bz2"
deleteDir()
@@ -281,7 +262,11 @@ def TestPythonCPU() {
def TestPythonGPU(args) {
nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu'
node(nodeReq) {
unstash name: 'xgboost_whl_cuda9'
if (args.cuda_version == '8.0') {
unstash name: 'xgboost_whl_cuda8'
} else {
unstash name: 'xgboost_whl_cuda9'
}
unstash name: 'srcs'
echo "Test Python GPU: CUDA ${args.cuda_version}"
def container_type = "gpu"
@@ -298,27 +283,6 @@ def TestPythonGPU(args) {
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh gpu
"""
}
// For CUDA 10.0 target, run cuDF tests too
if (args.cuda_version == '10.0') {
echo "Running tests with cuDF..."
sh """
${dockerRun} cudf ${docker_binary} ${docker_args} tests/ci_build/test_python.sh cudf
"""
}
deleteDir()
}
}
def TestCppRabit() {
node(nodeReq) {
unstash name: 'xgboost_rabit_tests'
unstash name: 'srcs'
echo "Test C++, rabit mock on"
def container_type = "cpu"
def docker_binary = "docker"
sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/runxgb.sh xgboost tests/ci_build/approx.conf.in
"""
deleteDir()
}
}
@@ -374,7 +338,7 @@ def TestR(args) {
def use_r35_flag = (args.use_r35) ? "1" : "0"
def docker_args = "--build-arg USE_R35=${use_r35_flag}"
sh """
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_test_rpkg.sh || tests/ci_build/print_r_stacktrace.sh
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_test_rpkg.sh
"""
deleteDir()
}

View File

@@ -3,11 +3,6 @@
/* Jenkins pipeline for Windows AMD64 target */
import groovy.transform.Field
@Field
def commit_id // necessary to pass a variable from one stage to another
pipeline {
agent none
// Build stages
@@ -17,7 +12,6 @@ pipeline {
steps {
script {
checkoutSrcs()
commit_id = "${GIT_COMMIT}"
}
stash name: 'srcs'
milestone ordinal: 1
@@ -82,7 +76,7 @@ def BuildWin64() {
"""
bat """
cd python-package
conda activate && python setup.py bdist_wheel --universal && for /R %%i in (dist\\*.whl) DO python ../tests/ci_build/rename_whl.py "%%i" ${commit_id} win_amd64
conda activate && python setup.py bdist_wheel --universal
"""
echo "Insert vcomp140.dll (OpenMP runtime) into the wheel..."
bat """
@@ -92,8 +86,7 @@ def BuildWin64() {
"""
echo 'Stashing Python wheel...'
stash name: 'xgboost_whl', includes: 'python-package/dist/*.whl'
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
archiveArtifacts artifacts: "python-package/dist/*.whl", allowEmptyArchive: true
echo 'Stashing C++ test executable (testxgboost)...'
stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost.exe'
deleteDir()

View File

@@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright (c) 2019 by Contributors
Copyright (c) 2018 by Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -42,6 +42,11 @@ ifeq ($(USE_OPENMP), 0)
endif
include $(DMLC_CORE)/make/dmlc.mk
# include the plugins
ifdef XGB_PLUGINS
include $(XGB_PLUGINS)
endif
# set compiler defaults for OSX versus *nix
# let people override either
OS := $(shell uname)
@@ -62,8 +67,8 @@ export CXX = g++
endif
endif
export LDFLAGS= -pthread -lm $(ADD_LDFLAGS) $(DMLC_LDFLAGS)
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++11 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS)
export LDFLAGS= -pthread -lm $(ADD_LDFLAGS) $(DMLC_LDFLAGS) $(PLUGIN_LDFLAGS)
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++11 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS) $(PLUGIN_CFLAGS)
CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include -I$(GTEST_PATH)/include
#java include path
export JAVAINCFLAGS = -I${JAVA_HOME}/include -I./java
@@ -125,7 +130,7 @@ $(RABIT)/lib/$(LIB_RABIT): $(wildcard $(RABIT)/src/*.cc)
jvm: jvm-packages/lib/libxgboost4j.so
SRC = $(wildcard src/*.cc src/*/*.cc)
ALL_OBJ = $(patsubst src/%.cc, build/%.o, $(SRC))
ALL_OBJ = $(patsubst src/%.cc, build/%.o, $(SRC)) $(PLUGIN_OBJS)
AMALGA_OBJ = amalgamation/xgboost-all0.o
LIB_DEP = $(DMLC_CORE)/libdmlc.a $(RABIT)/lib/$(LIB_RABIT)
ALL_DEP = $(filter-out build/cli_main.o, $(ALL_OBJ)) $(LIB_DEP)
@@ -137,6 +142,11 @@ build/%.o: src/%.cc
$(CXX) $(CFLAGS) -MM -MT build/$*.o $< >build/$*.d
$(CXX) -c $(CFLAGS) $< -o $@
build_plugin/%.o: plugin/%.cc
@mkdir -p $(@D)
$(CXX) $(CFLAGS) -MM -MT build_plugin/$*.o $< >build_plugin/$*.d
$(CXX) -c $(CFLAGS) $< -o $@
# The should be equivalent to $(ALL_OBJ) except for build/cli_main.o
amalgamation/xgboost-all0.o: amalgamation/xgboost-all0.cc
$(CXX) -c $(CFLAGS) $< -o $@
@@ -170,7 +180,7 @@ lint: rcpplint
python-package/xgboost/include python-package/xgboost/lib \
python-package/xgboost/make python-package/xgboost/rabit \
python-package/xgboost/src --pylint-rc ${PWD}/python-package/.pylintrc xgboost \
${LINT_LANG} include src python-package
${LINT_LANG} include src plugin python-package
pylint:
flake8 --ignore E501 python-package
@@ -190,7 +200,7 @@ cover: check
endif
clean:
$(RM) -rf build lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o #xgboost
$(RM) -rf build build_plugin lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o #xgboost
$(RM) -rf build_tests *.gcov tests/cpp/xgboost_test
if [ -d "R-package/src" ]; then \
cd R-package/src; \
@@ -221,9 +231,7 @@ pippack: clean_all
rm -rf python-package/xgboost/rabit
rm -rf python-package/xgboost/src
cp -r python-package xgboost-python
cp -r CMakeLists.txt xgboost-python/xgboost/
cp -r cmake xgboost-python/xgboost/
cp -r plugin xgboost-python/xgboost/
cp -r Makefile xgboost-python/xgboost/
cp -r make xgboost-python/xgboost/
cp -r src xgboost-python/xgboost/
cp -r tests xgboost-python/xgboost/
@@ -254,17 +262,10 @@ Rpack: clean_all
cp -r dmlc-core/include xgboost/src/dmlc-core/include
cp -r dmlc-core/src xgboost/src/dmlc-core/src
cp ./LICENSE xgboost
# Modify PKGROOT in Makevars.in
cat R-package/src/Makevars.in|sed '2s/.*/PKGROOT=./' > xgboost/src/Makevars.in
# Configure Makevars.win (Windows-specific Makevars, likely using MinGW)
cat R-package/src/Makevars.in|sed '2s/.*/PKGROOT=./' | sed '3s/.*/ENABLE_STD_THREAD=0/' > xgboost/src/Makevars.in
cp xgboost/src/Makevars.in xgboost/src/Makevars.win
cat xgboost/src/Makevars.in| sed '3s/.*/ENABLE_STD_THREAD=0/' > xgboost/src/Makevars.win
sed -i -e 's/@OPENMP_CXXFLAGS@/$$\(SHLIB_OPENMP_CXXFLAGS\)/g' xgboost/src/Makevars.win
sed -i -e 's/-pthread/$$\(SHLIB_PTHREAD_FLAGS\)/g' xgboost/src/Makevars.win
sed -i -e 's/@ENDIAN_FLAG@/-DDMLC_CMAKE_LITTLE_ENDIAN=1/g' xgboost/src/Makevars.win
sed -i -e 's/@BACKTRACE_LIB@//g' xgboost/src/Makevars.win
sed -i -e 's/@OPENMP_LIB@//g' xgboost/src/Makevars.win
rm -f xgboost/src/Makevars.win-e # OSX sed create this extra file; remove it
bash R-package/remove_warning_suppression_pragma.sh
rm xgboost/remove_warning_suppression_pragma.sh
@@ -277,3 +278,4 @@ Rcheck: Rbuild
-include build/*.d
-include build/*/*.d
-include build_plugin/*/*.d

View File

@@ -29,6 +29,6 @@ set_target_properties(
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON)
set(XGBOOST_DEFINITIONS "${XGBOOST_DEFINITIONS};${R_DEFINITIONS}" PARENT_SCOPE)
set(XGBOOST_DEFINITIONS ${R_DEFINITIONS} PARENT_SCOPE)
set(XGBOOST_OBJ_SOURCES $<TARGET_OBJECTS:xgboost-r> PARENT_SCOPE)
set(LINKED_LIBRARIES_PRIVATE ${LINKED_LIBRARIES_PRIVATE} ${LIBR_CORE_LIBRARY} PARENT_SCOPE)

View File

@@ -1,8 +1,8 @@
Package: xgboost
Type: Package
Title: Extreme Gradient Boosting
Version: 1.0.0.1
Date: 2019-07-23
Version: 0.90.0.1
Date: 2019-05-18
Authors@R: c(
person("Tianqi", "Chen", role = c("aut"),
email = "tianqi.tchen@gmail.com"),
@@ -63,5 +63,5 @@ Imports:
data.table (>= 1.9.6),
magrittr (>= 1.5),
stringi (>= 0.5.2)
RoxygenNote: 7.0.2
RoxygenNote: 6.1.0
SystemRequirements: GNU make, C++11

View File

@@ -145,7 +145,7 @@ xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
if (is.null(obj)) {
.Call(XGBoosterUpdateOneIter_R, booster_handle, as.integer(iter), dtrain)
} else {
pred <- predict(booster_handle, dtrain, training = TRUE)
pred <- predict(booster_handle, dtrain)
gpair <- obj(pred, dtrain)
.Call(XGBoosterBoostOneIter_R, booster_handle, dtrain, gpair$grad, gpair$hess)
}

View File

@@ -51,13 +51,11 @@ is.null.handle <- function(handle) {
# Return a verified to be valid handle out of either xgb.Booster.handle or xgb.Booster
# internal utility function
xgb.get.handle <- function(object) {
if (inherits(object, "xgb.Booster")) {
handle <- object$handle
} else if (inherits(object, "xgb.Booster.handle")) {
handle <- object
} else {
handle <- switch(class(object)[1],
xgb.Booster = object$handle,
xgb.Booster.handle = object,
stop("argument must be of either xgb.Booster or xgb.Booster.handle class")
}
)
if (is.null.handle(handle)) {
stop("invalid xgb.Booster.handle")
}
@@ -139,8 +137,6 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
#' @param reshape whether to reshape the vector of predictions to a matrix form when there are several
#' prediction outputs per case. This option has no effect when either of predleaf, predcontrib,
#' or predinteraction flags is TRUE.
#' @param training whether is the prediction result used for training. For dart booster,
#' training predicting will perform dropout.
#' @param ... Parameters passed to \code{predict.xgb.Booster}
#'
#' @details
@@ -290,7 +286,7 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
#' @export
predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FALSE, ntreelimit = NULL,
predleaf = FALSE, predcontrib = FALSE, approxcontrib = FALSE, predinteraction = FALSE,
reshape = FALSE, training = FALSE, ...) {
reshape = FALSE, ...) {
object <- xgb.Booster.complete(object, saveraw = FALSE)
if (!inherits(newdata, "xgb.DMatrix"))
@@ -309,8 +305,7 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
option <- 0L + 1L * as.logical(outputmargin) + 2L * as.logical(predleaf) + 4L * as.logical(predcontrib) +
8L * as.logical(approxcontrib) + 16L * as.logical(predinteraction)
ret <- .Call(XGBoosterPredict_R, object$handle, newdata, option[1],
as.integer(ntreelimit), as.integer(training))
ret <- .Call(XGBoosterPredict_R, object$handle, newdata, option[1], as.integer(ntreelimit))
n_ret <- length(ret)
n_row <- nrow(newdata)

View File

@@ -47,8 +47,6 @@
#' @param folds \code{list} provides a possibility to use a list of pre-defined CV folds
#' (each element must be a vector of test fold's indices). When folds are supplied,
#' the \code{nfold} and \code{stratified} parameters are ignored.
#' @param train_folds \code{list} list specifying which indicies to use for training. If \code{NULL}
#' (the default) all indices not specified in \code{folds} will be used for training.
#' @param verbose \code{boolean}, print the statistics during the process
#' @param print_every_n Print each n-th iteration evaluation messages when \code{verbose>0}.
#' Default is 1 which means all messages are printed. This parameter is passed to the
@@ -116,7 +114,7 @@
#' @export
xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing = NA,
prediction = FALSE, showsd = TRUE, metrics=list(),
obj = NULL, feval = NULL, stratified = TRUE, folds = NULL, train_folds = NULL,
obj = NULL, feval = NULL, stratified = TRUE, folds = NULL,
verbose = TRUE, print_every_n=1L,
early_stopping_rounds = NULL, maximize = NULL, callbacks = list(), ...) {
@@ -135,15 +133,8 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
# Check the labels
if ( (inherits(data, 'xgb.DMatrix') && is.null(getinfo(data, 'label'))) ||
(!inherits(data, 'xgb.DMatrix') && is.null(label))) {
(!inherits(data, 'xgb.DMatrix') && is.null(label)))
stop("Labels must be provided for CV either through xgb.DMatrix, or through 'label=' when 'data' is matrix")
} else if (inherits(data, 'xgb.DMatrix')) {
if (!is.null(label))
warning("xgb.cv: label will be ignored, since data is of type xgb.DMatrix")
cv_label = getinfo(data, 'label')
} else {
cv_label = label
}
# CV folds
if(!is.null(folds)) {
@@ -153,7 +144,7 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
} else {
if (nfold <= 1)
stop("'nfold' must be > 1")
folds <- generate.cv.folds(nfold, nrow(data), stratified, cv_label, params)
folds <- generate.cv.folds(nfold, nrow(data), stratified, label, params)
}
# Potential TODO: sequential CV
@@ -188,15 +179,10 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
# create the booster-folds
# train_folds
dall <- xgb.get.DMatrix(data, label, missing)
bst_folds <- lapply(seq_along(folds), function(k) {
dtest <- slice(dall, folds[[k]])
# code originally contributed by @RolandASc on stackoverflow
if(is.null(train_folds))
dtrain <- slice(dall, unlist(folds[-k]))
else
dtrain <- slice(dall, train_folds[[k]])
dtrain <- slice(dall, unlist(folds[-k]))
handle <- xgb.Booster.handle(params, list(dtrain, dtest))
list(dtrain = dtrain, bst = handle, watchlist = list(train = dtrain, test=dtest), index = folds[[k]])
})

View File

@@ -293,9 +293,6 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
}
# Sort the callbacks into categories
cb <- categorize.callbacks(callbacks)
if (!is.null(params[['seed']])) {
warning("xgb.train: `seed` is ignored in R package. Use `set.seed()` instead.")
}
# The tree updating process would need slightly different handling
is_update <- NVL(params[['process_type']], '.') == 'update'

View File

@@ -5,8 +5,8 @@
#' @export
xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL,
params = list(), nrounds,
verbose = 1, print_every_n = 1L,
early_stopping_rounds = NULL, maximize = NULL,
verbose = 1, print_every_n = 1L,
early_stopping_rounds = NULL, maximize = NULL,
save_period = NULL, save_name = "xgboost.model",
xgb_model = NULL, callbacks = list(), ...) {
@@ -18,16 +18,16 @@ xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL,
early_stopping_rounds = early_stopping_rounds, maximize = maximize,
save_period = save_period, save_name = save_name,
xgb_model = xgb_model, callbacks = callbacks, ...)
return (bst)
return(bst)
}
#' Training part from Mushroom Data Set
#'
#'
#' This data set is originally from the Mushroom data set,
#' UCI Machine Learning Repository.
#'
#'
#' This data set includes the following fields:
#'
#'
#' \itemize{
#' \item \code{label} the label for each record
#' \item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns.
@@ -35,16 +35,16 @@ xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL,
#'
#' @references
#' https://archive.ics.uci.edu/ml/datasets/Mushroom
#'
#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
#'
#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
#' School of Information and Computer Science.
#'
#'
#' @docType data
#' @keywords datasets
#' @name agaricus.train
#' @usage data(agaricus.train)
#' @format A list containing a label vector, and a dgCMatrix object with 6513
#' @format A list containing a label vector, and a dgCMatrix object with 6513
#' rows and 127 variables
NULL
@@ -52,9 +52,9 @@ NULL
#'
#' This data set is originally from the Mushroom data set,
#' UCI Machine Learning Repository.
#'
#'
#' This data set includes the following fields:
#'
#'
#' \itemize{
#' \item \code{label} the label for each record
#' \item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns.
@@ -62,16 +62,16 @@ NULL
#'
#' @references
#' https://archive.ics.uci.edu/ml/datasets/Mushroom
#'
#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
#'
#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
#' School of Information and Computer Science.
#'
#'
#' @docType data
#' @keywords datasets
#' @name agaricus.test
#' @usage data(agaricus.test)
#' @format A list containing a label vector, and a dgCMatrix object with 1611
#' @format A list containing a label vector, and a dgCMatrix object with 1611
#' rows and 126 variables
NULL
@@ -107,7 +107,7 @@ NULL
#' @importFrom graphics par
#' @importFrom graphics title
#' @importFrom grDevices rgb
#'
#'
#' @import methods
#' @useDynLib xgboost, .registration = TRUE
NULL

1043
R-package/configure vendored

File diff suppressed because it is too large Load Diff

View File

@@ -4,21 +4,6 @@ AC_PREREQ(2.62)
AC_INIT([xgboost],[0.6-3],[],[xgboost],[])
# Use this line to set CC variable to a C compiler
AC_PROG_CC
### Check whether backtrace() is part of libc or the external lib libexecinfo
AC_MSG_CHECKING([Backtrace lib])
AC_MSG_RESULT([])
AC_CHECK_LIB([execinfo], [backtrace], [BACKTRACE_LIB=-lexecinfo], [BACKTRACE_LIB=''])
### Endian detection
AC_MSG_CHECKING([endian])
AC_MSG_RESULT([])
AC_RUN_IFELSE([AC_LANG_PROGRAM([[#include <stdint.h>]], [[const uint16_t endianness = 256; return !!(*(const uint8_t *)&endianness);]])],
[ENDIAN_FLAG="-DDMLC_CMAKE_LITTLE_ENDIAN=1"],
[ENDIAN_FLAG="-DDMLC_CMAKE_LITTLE_ENDIAN=0"])
OPENMP_CXXFLAGS=""
if test `uname -s` = "Linux"
@@ -28,28 +13,19 @@ fi
if test `uname -s` = "Darwin"
then
OPENMP_CXXFLAGS='-Xclang -fopenmp'
OPENMP_LIB='/usr/local/lib/libomp.dylib'
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CXXFLAGS)"
ac_pkg_openmp=no
AC_MSG_CHECKING([whether OpenMP will work in a package])
AC_LANG_CONFTEST([AC_LANG_PROGRAM([[#include <omp.h>]], [[ return (omp_get_max_threads() <= 1); ]])])
${CC} -o conftest conftest.c /usr/local/lib/libomp.dylib -Xclang -fopenmp 2>/dev/null && ./conftest && ac_pkg_openmp=yes
AC_LANG_CONFTEST(
[AC_LANG_PROGRAM([[#include <omp.h>]], [[ return omp_get_num_threads (); ]])])
PKG_CFLAGS="${OPENMP_CFLAGS}" PKG_LIBS="${OPENMP_CFLAGS}" "$RBIN" CMD SHLIB conftest.c 1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD && "$RBIN" --vanilla -q -e "dyn.load(paste('conftest',.Platform\$dynlib.ext,sep=''))" 1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD && ac_pkg_openmp=yes
AC_MSG_RESULT([${ac_pkg_openmp}])
if test "${ac_pkg_openmp}" = no; then
OPENMP_CXXFLAGS=''
OPENMP_LIB=''
echo '*****************************************************************************************'
echo 'WARNING: OpenMP is unavailable on this Mac OSX system. Training speed may be suboptimal.'
echo ' To use all CPU cores for training jobs, you should install OpenMP by running\n'
echo ' brew install libomp'
echo '*****************************************************************************************'
fi
fi
AC_SUBST(OPENMP_CXXFLAGS)
AC_SUBST(OPENMP_LIB)
AC_SUBST(ENDIAN_FLAG)
AC_SUBST(BACKTRACE_LIB)
AC_CONFIG_FILES([src/Makevars])
AC_OUTPUT

View File

@@ -30,7 +30,7 @@ wl <- list(train = dtrain, test = dtest)
# - similar to the 'hist'
# - the fastest option for moderately large datasets
# - current limitations: max_depth < 16, does not implement guided loss
# You can use tree_method = 'gpu_hist' for another GPU accelerated algorithm,
# You can use tree_method = 'gpu_exact' for another GPU accelerated algorithm,
# which is slower, more memory-hungry, but does not use binning.
param <- list(objective = 'reg:logistic', eval_metric = 'auc', subsample = 0.5, nthread = 4,
max_bin = 64, tree_method = 'gpu_hist')

View File

@@ -4,7 +4,7 @@
\name{agaricus.test}
\alias{agaricus.test}
\title{Test part from Mushroom Data Set}
\format{A list containing a label vector, and a dgCMatrix object with 1611
\format{A list containing a label vector, and a dgCMatrix object with 1611
rows and 126 variables}
\usage{
data(agaricus.test)
@@ -24,8 +24,8 @@ This data set includes the following fields:
\references{
https://archive.ics.uci.edu/ml/datasets/Mushroom
Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
School of Information and Computer Science.
}
\keyword{datasets}

View File

@@ -4,7 +4,7 @@
\name{agaricus.train}
\alias{agaricus.train}
\title{Training part from Mushroom Data Set}
\format{A list containing a label vector, and a dgCMatrix object with 6513
\format{A list containing a label vector, and a dgCMatrix object with 6513
rows and 127 variables}
\usage{
data(agaricus.train)
@@ -24,8 +24,8 @@ This data set includes the following fields:
\references{
https://archive.ics.uci.edu/ml/datasets/Mushroom
Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
School of Information and Computer Science.
}
\keyword{datasets}

View File

@@ -4,12 +4,8 @@
\alias{cb.early.stop}
\title{Callback closure to activate the early stopping.}
\usage{
cb.early.stop(
stopping_rounds,
maximize = FALSE,
metric_name = NULL,
verbose = TRUE
)
cb.early.stop(stopping_rounds, maximize = FALSE, metric_name = NULL,
verbose = TRUE)
}
\arguments{
\item{stopping_rounds}{The number of rounds with no improvement in

View File

@@ -5,20 +5,10 @@
\alias{predict.xgb.Booster.handle}
\title{Predict method for eXtreme Gradient Boosting model}
\usage{
\method{predict}{xgb.Booster}(
object,
newdata,
missing = NA,
outputmargin = FALSE,
ntreelimit = NULL,
predleaf = FALSE,
predcontrib = FALSE,
approxcontrib = FALSE,
predinteraction = FALSE,
reshape = FALSE,
training = FALSE,
...
)
\method{predict}{xgb.Booster}(object, newdata, missing = NA,
outputmargin = FALSE, ntreelimit = NULL, predleaf = FALSE,
predcontrib = FALSE, approxcontrib = FALSE, predinteraction = FALSE,
reshape = FALSE, ...)
\method{predict}{xgb.Booster.handle}(object, ...)
}
@@ -49,9 +39,6 @@ It will use all the trees by default (\code{NULL} value).}
prediction outputs per case. This option has no effect when either of predleaf, predcontrib,
or predinteraction flags is TRUE.}
\item{training}{whether is the prediction result used for training. For dart booster,
training predicting will perform dropout.}
\item{...}{Parameters passed to \code{predict.xgb.Booster}}
}
\value{

View File

@@ -87,6 +87,6 @@ accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) /
# Here the accuracy was already good and is now perfect.
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now",
accuracy.after, "!\n"))
accuracy.after, "!\\n"))
}

View File

@@ -4,28 +4,11 @@
\alias{xgb.cv}
\title{Cross Validation}
\usage{
xgb.cv(
params = list(),
data,
nrounds,
nfold,
label = NULL,
missing = NA,
prediction = FALSE,
showsd = TRUE,
metrics = list(),
obj = NULL,
feval = NULL,
stratified = TRUE,
folds = NULL,
train_folds = NULL,
verbose = TRUE,
print_every_n = 1L,
early_stopping_rounds = NULL,
maximize = NULL,
callbacks = list(),
...
)
xgb.cv(params = list(), data, nrounds, nfold, label = NULL, missing = NA,
prediction = FALSE, showsd = TRUE, metrics = list(), obj = NULL,
feval = NULL, stratified = TRUE, folds = NULL, verbose = TRUE,
print_every_n = 1L, early_stopping_rounds = NULL, maximize = NULL,
callbacks = list(), ...)
}
\arguments{
\item{params}{the list of parameters. Commonly used ones are:
@@ -86,9 +69,6 @@ by the values of outcome labels.}
(each element must be a vector of test fold's indices). When folds are supplied,
the \code{nfold} and \code{stratified} parameters are ignored.}
\item{train_folds}{\code{list} list specifying which indicies to use for training. If \code{NULL}
(the default) all indices not specified in \code{folds} will be used for training.}
\item{verbose}{\code{boolean}, print the statistics during the process}
\item{print_every_n}{Print each n-th iteration evaluation messages when \code{verbose>0}.

View File

@@ -4,14 +4,8 @@
\alias{xgb.dump}
\title{Dump an xgboost model in text format.}
\usage{
xgb.dump(
model,
fname = NULL,
fmap = "",
with_stats = FALSE,
dump_format = c("text", "json"),
...
)
xgb.dump(model, fname = NULL, fmap = "", with_stats = FALSE,
dump_format = c("text", "json"), ...)
}
\arguments{
\item{model}{the model object.}

View File

@@ -4,14 +4,8 @@
\alias{xgb.importance}
\title{Importance of features in a model.}
\usage{
xgb.importance(
feature_names = NULL,
model = NULL,
trees = NULL,
data = NULL,
label = NULL,
target = NULL
)
xgb.importance(feature_names = NULL, model = NULL, trees = NULL,
data = NULL, label = NULL, target = NULL)
}
\arguments{
\item{feature_names}{character vector of feature names. If the model already

View File

@@ -4,14 +4,8 @@
\alias{xgb.model.dt.tree}
\title{Parse a boosted tree model text dump}
\usage{
xgb.model.dt.tree(
feature_names = NULL,
model = NULL,
text = NULL,
trees = NULL,
use_int_id = FALSE,
...
)
xgb.model.dt.tree(feature_names = NULL, model = NULL, text = NULL,
trees = NULL, use_int_id = FALSE, ...)
}
\arguments{
\item{feature_names}{character vector of feature names. If the model already

View File

@@ -5,17 +5,11 @@
\alias{xgb.plot.deepness}
\title{Plot model trees deepness}
\usage{
xgb.ggplot.deepness(
model = NULL,
which = c("2x1", "max.depth", "med.depth", "med.weight")
)
xgb.ggplot.deepness(model = NULL, which = c("2x1", "max.depth", "med.depth",
"med.weight"))
xgb.plot.deepness(
model = NULL,
which = c("2x1", "max.depth", "med.depth", "med.weight"),
plot = TRUE,
...
)
xgb.plot.deepness(model = NULL, which = c("2x1", "max.depth", "med.depth",
"med.weight"), plot = TRUE, ...)
}
\arguments{
\item{model}{either an \code{xgb.Booster} model generated by the \code{xgb.train} function

View File

@@ -5,25 +5,12 @@
\alias{xgb.plot.importance}
\title{Plot feature importance as a bar graph}
\usage{
xgb.ggplot.importance(
importance_matrix = NULL,
top_n = NULL,
measure = NULL,
rel_to_first = FALSE,
n_clusters = c(1:10),
...
)
xgb.ggplot.importance(importance_matrix = NULL, top_n = NULL,
measure = NULL, rel_to_first = FALSE, n_clusters = c(1:10), ...)
xgb.plot.importance(
importance_matrix = NULL,
top_n = NULL,
measure = NULL,
rel_to_first = FALSE,
left_margin = 10,
cex = NULL,
plot = TRUE,
...
)
xgb.plot.importance(importance_matrix = NULL, top_n = NULL,
measure = NULL, rel_to_first = FALSE, left_margin = 10, cex = NULL,
plot = TRUE, ...)
}
\arguments{
\item{importance_matrix}{a \code{data.table} returned by \code{\link{xgb.importance}}.}

View File

@@ -4,15 +4,8 @@
\alias{xgb.plot.multi.trees}
\title{Project all trees on one tree and plot it}
\usage{
xgb.plot.multi.trees(
model,
feature_names = NULL,
features_keep = 5,
plot_width = NULL,
plot_height = NULL,
render = TRUE,
...
)
xgb.plot.multi.trees(model, feature_names = NULL, features_keep = 5,
plot_width = NULL, plot_height = NULL, render = TRUE, ...)
}
\arguments{
\item{model}{produced by the \code{xgb.train} function.}

View File

@@ -4,33 +4,13 @@
\alias{xgb.plot.shap}
\title{SHAP contribution dependency plots}
\usage{
xgb.plot.shap(
data,
shap_contrib = NULL,
features = NULL,
top_n = 1,
model = NULL,
trees = NULL,
target_class = NULL,
approxcontrib = FALSE,
subsample = NULL,
n_col = 1,
col = rgb(0, 0, 1, 0.2),
pch = ".",
discrete_n_uniq = 5,
discrete_jitter = 0.01,
ylab = "SHAP",
plot_NA = TRUE,
col_NA = rgb(0.7, 0, 1, 0.6),
pch_NA = ".",
pos_NA = 1.07,
plot_loess = TRUE,
col_loess = 2,
span_loess = 0.5,
which = c("1d", "2d"),
plot = TRUE,
...
)
xgb.plot.shap(data, shap_contrib = NULL, features = NULL, top_n = 1,
model = NULL, trees = NULL, target_class = NULL,
approxcontrib = FALSE, subsample = NULL, n_col = 1, col = rgb(0, 0, 1,
0.2), pch = ".", discrete_n_uniq = 5, discrete_jitter = 0.01,
ylab = "SHAP", plot_NA = TRUE, col_NA = rgb(0.7, 0, 1, 0.6),
pch_NA = ".", pos_NA = 1.07, plot_loess = TRUE, col_loess = 2,
span_loess = 0.5, which = c("1d", "2d"), plot = TRUE, ...)
}
\arguments{
\item{data}{data as a \code{matrix} or \code{dgCMatrix}.}

View File

@@ -4,16 +4,9 @@
\alias{xgb.plot.tree}
\title{Plot a boosted tree model}
\usage{
xgb.plot.tree(
feature_names = NULL,
model = NULL,
trees = NULL,
plot_width = NULL,
plot_height = NULL,
render = TRUE,
show_node_id = FALSE,
...
)
xgb.plot.tree(feature_names = NULL, model = NULL, trees = NULL,
plot_width = NULL, plot_height = NULL, render = TRUE,
show_node_id = FALSE, ...)
}
\arguments{
\item{feature_names}{names of each feature as a \code{character} vector.}

View File

@@ -5,41 +5,15 @@
\alias{xgboost}
\title{eXtreme Gradient Boosting Training}
\usage{
xgb.train(
params = list(),
data,
nrounds,
watchlist = list(),
obj = NULL,
feval = NULL,
verbose = 1,
print_every_n = 1L,
early_stopping_rounds = NULL,
maximize = NULL,
save_period = NULL,
save_name = "xgboost.model",
xgb_model = NULL,
callbacks = list(),
...
)
xgb.train(params = list(), data, nrounds, watchlist = list(), obj = NULL,
feval = NULL, verbose = 1, print_every_n = 1L,
early_stopping_rounds = NULL, maximize = NULL, save_period = NULL,
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(), ...)
xgboost(
data = NULL,
label = NULL,
missing = NA,
weight = NULL,
params = list(),
nrounds,
verbose = 1,
print_every_n = 1L,
early_stopping_rounds = NULL,
maximize = NULL,
save_period = NULL,
save_name = "xgboost.model",
xgb_model = NULL,
callbacks = list(),
...
)
xgboost(data = NULL, label = NULL, missing = NA, weight = NULL,
params = list(), nrounds, verbose = 1, print_every_n = 1L,
early_stopping_rounds = NULL, maximize = NULL, save_period = NULL,
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(), ...)
}
\arguments{
\item{params}{the list of parameters.

View File

@@ -17,8 +17,8 @@ endif
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ @ENDIAN_FLAG@ -pthread
PKG_LIBS = @OPENMP_CXXFLAGS@ @OPENMP_LIB@ @ENDIAN_FLAG@ @BACKTRACE_LIB@ -pthread
PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ -pthread
PKG_LIBS = @OPENMP_CXXFLAGS@ -pthread
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o\
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o

View File

@@ -24,7 +24,7 @@ extern SEXP XGBoosterGetAttr_R(SEXP, SEXP);
extern SEXP XGBoosterLoadModelFromRaw_R(SEXP, SEXP);
extern SEXP XGBoosterLoadModel_R(SEXP, SEXP);
extern SEXP XGBoosterModelToRaw_R(SEXP);
extern SEXP XGBoosterPredict_R(SEXP, SEXP, SEXP, SEXP, SEXP);
extern SEXP XGBoosterPredict_R(SEXP, SEXP, SEXP, SEXP);
extern SEXP XGBoosterSaveModel_R(SEXP, SEXP);
extern SEXP XGBoosterSetAttr_R(SEXP, SEXP, SEXP);
extern SEXP XGBoosterSetParam_R(SEXP, SEXP, SEXP);
@@ -50,7 +50,7 @@ static const R_CallMethodDef CallEntries[] = {
{"XGBoosterLoadModelFromRaw_R", (DL_FUNC) &XGBoosterLoadModelFromRaw_R, 2},
{"XGBoosterLoadModel_R", (DL_FUNC) &XGBoosterLoadModel_R, 2},
{"XGBoosterModelToRaw_R", (DL_FUNC) &XGBoosterModelToRaw_R, 1},
{"XGBoosterPredict_R", (DL_FUNC) &XGBoosterPredict_R, 5},
{"XGBoosterPredict_R", (DL_FUNC) &XGBoosterPredict_R, 4},
{"XGBoosterSaveModel_R", (DL_FUNC) &XGBoosterSaveModel_R, 2},
{"XGBoosterSetAttr_R", (DL_FUNC) &XGBoosterSetAttr_R, 3},
{"XGBoosterSetParam_R", (DL_FUNC) &XGBoosterSetParam_R, 3},

View File

@@ -136,10 +136,9 @@ SEXP XGDMatrixSliceDMatrix_R(SEXP handle, SEXP idxset) {
idxvec[i] = INTEGER(idxset)[i] - 1;
}
DMatrixHandle res;
CHECK_CALL(XGDMatrixSliceDMatrixEx(R_ExternalPtrAddr(handle),
BeginPtr(idxvec), len,
&res,
0));
CHECK_CALL(XGDMatrixSliceDMatrix(R_ExternalPtrAddr(handle),
BeginPtr(idxvec), len,
&res));
ret = PROTECT(R_MakeExternalPtr(res, R_NilValue, R_NilValue));
R_RegisterCFinalizerEx(ret, _DMatrixFinalizer, TRUE);
R_API_END();
@@ -166,9 +165,7 @@ SEXP XGDMatrixSetInfo_R(SEXP handle, SEXP field, SEXP array) {
for (int i = 0; i < len; ++i) {
vec[i] = static_cast<unsigned>(INTEGER(array)[i]);
}
CHECK_CALL(XGDMatrixSetUIntInfo(R_ExternalPtrAddr(handle),
CHAR(asChar(field)),
BeginPtr(vec), len));
CHECK_CALL(XGDMatrixSetGroup(R_ExternalPtrAddr(handle), BeginPtr(vec), len));
} else {
std::vector<float> vec(len);
#pragma omp parallel for schedule(static)
@@ -176,8 +173,8 @@ SEXP XGDMatrixSetInfo_R(SEXP handle, SEXP field, SEXP array) {
vec[i] = REAL(array)[i];
}
CHECK_CALL(XGDMatrixSetFloatInfo(R_ExternalPtrAddr(handle),
CHAR(asChar(field)),
BeginPtr(vec), len));
CHAR(asChar(field)),
BeginPtr(vec), len));
}
R_API_END();
return R_NilValue;
@@ -295,26 +292,24 @@ SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evnames) {
vec_sptr.push_back(vec_names[i].c_str());
}
CHECK_CALL(XGBoosterEvalOneIter(R_ExternalPtrAddr(handle),
asInteger(iter),
BeginPtr(vec_dmats),
BeginPtr(vec_sptr),
len, &ret));
asInteger(iter),
BeginPtr(vec_dmats),
BeginPtr(vec_sptr),
len, &ret));
R_API_END();
return mkString(ret);
}
SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask,
SEXP ntree_limit, SEXP training) {
SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask, SEXP ntree_limit) {
SEXP ret;
R_API_BEGIN();
bst_ulong olen;
const float *res;
CHECK_CALL(XGBoosterPredict(R_ExternalPtrAddr(handle),
R_ExternalPtrAddr(dmat),
asInteger(option_mask),
asInteger(ntree_limit),
asInteger(training),
&olen, &res));
R_ExternalPtrAddr(dmat),
asInteger(option_mask),
asInteger(ntree_limit),
&olen, &res));
ret = PROTECT(allocVector(REALSXP, olen));
for (size_t i = 0; i < olen; ++i) {
REAL(ret)[i] = res[i];

View File

@@ -148,10 +148,8 @@ XGB_DLL SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evn
* \param dmat data matrix
* \param option_mask output_margin:1 predict_leaf:2
* \param ntree_limit limit number of trees used in prediction
* \param training Whether the prediction value is used for training.
*/
XGB_DLL SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask,
SEXP ntree_limit, SEXP training);
XGB_DLL SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask, SEXP ntree_limit);
/*!
* \brief load model from existing file
* \param handle handle

View File

@@ -27,7 +27,7 @@ test_that("train and predict binary classification", {
pred <- predict(bst, test$data)
expect_length(pred, 1611)
pred1 <- predict(bst, train$data, ntreelimit = 1)
expect_length(pred1, 6513)
err_pred1 <- sum((pred1 > 0.5) != train$label)/length(train$label)
@@ -35,54 +35,6 @@ test_that("train and predict binary classification", {
expect_lt(abs(err_pred1 - err_log), 10e-6)
})
test_that("dart prediction works", {
nrounds = 32
set.seed(1994)
d <- cbind(
x1 = rnorm(100),
x2 = rnorm(100),
x3 = rnorm(100))
y <- d[,"x1"] + d[,"x2"]^2 +
ifelse(d[,"x3"] > .5, d[,"x3"]^2, 2^d[,"x3"]) +
rnorm(100)
set.seed(1994)
booster_by_xgboost <- xgboost(data = d, label = y, max_depth = 2, booster = "dart",
rate_drop = 0.5, one_drop = TRUE,
eta = 1, nthread = 2, nrounds = nrounds, objective = "reg:squarederror")
pred_by_xgboost_0 <- predict(booster_by_xgboost, newdata = d, ntreelimit = 0)
pred_by_xgboost_1 <- predict(booster_by_xgboost, newdata = d, ntreelimit = nrounds)
expect_true(all(matrix(pred_by_xgboost_0, byrow=TRUE) == matrix(pred_by_xgboost_1, byrow=TRUE)))
pred_by_xgboost_2 <- predict(booster_by_xgboost, newdata = d, training = TRUE)
expect_false(all(matrix(pred_by_xgboost_0, byrow=TRUE) == matrix(pred_by_xgboost_2, byrow=TRUE)))
set.seed(1994)
dtrain <- xgb.DMatrix(data=d, info = list(label=y))
booster_by_train <- xgb.train( params = list(
booster = "dart",
max_depth = 2,
eta = 1,
rate_drop = 0.5,
one_drop = TRUE,
nthread = 1,
tree_method= "exact",
verbosity = 3,
objective = "reg:squarederror"
),
data = dtrain,
nrounds = nrounds
)
pred_by_train_0 <- predict(booster_by_train, newdata = dtrain, ntreelimit = 0)
pred_by_train_1 <- predict(booster_by_train, newdata = dtrain, ntreelimit = nrounds)
pred_by_train_2 <- predict(booster_by_train, newdata = dtrain, training = TRUE)
expect_true(all(matrix(pred_by_train_0, byrow=TRUE) == matrix(pred_by_xgboost_0, byrow=TRUE)))
expect_true(all(matrix(pred_by_train_1, byrow=TRUE) == matrix(pred_by_xgboost_1, byrow=TRUE)))
expect_true(all(matrix(pred_by_train_2, byrow=TRUE) == matrix(pred_by_xgboost_2, byrow=TRUE)))
})
test_that("train and predict softprob", {
lb <- as.numeric(iris$Species) - 1
set.seed(11)
@@ -122,7 +74,7 @@ test_that("train and predict softmax", {
expect_false(is.null(bst$evaluation_log))
expect_lt(bst$evaluation_log[, min(train_merror)], 0.025)
expect_equal(bst$niter * 3, xgb.ntree(bst))
pred <- predict(bst, as.matrix(iris[, -5]))
expect_length(pred, nrow(iris))
err <- sum(pred != lb)/length(lb)
@@ -138,12 +90,12 @@ test_that("train and predict RF", {
num_parallel_tree = 20, subsample = 0.6, colsample_bytree = 0.1)
expect_equal(bst$niter, 1)
expect_equal(xgb.ntree(bst), 20)
pred <- predict(bst, train$data)
pred_err <- sum((pred > 0.5) != lb)/length(lb)
expect_lt(abs(bst$evaluation_log[1, train_error] - pred_err), 10e-6)
#expect_lt(pred_err, 0.03)
pred <- predict(bst, train$data, ntreelimit = 20)
pred_err_20 <- sum((pred > 0.5) != lb)/length(lb)
expect_equal(pred_err_20, pred_err)
@@ -239,27 +191,13 @@ test_that("xgb.cv works", {
expect_false(is.null(cv$call))
})
test_that("xgb.cv works with stratified folds", {
dtrain <- xgb.DMatrix(train$data, label = train$label)
set.seed(314159)
cv <- xgb.cv(data = dtrain, max_depth = 2, nfold = 5,
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
verbose=TRUE, stratified = FALSE)
set.seed(314159)
cv2 <- xgb.cv(data = dtrain, max_depth = 2, nfold = 5,
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
verbose=TRUE, stratified = TRUE)
# Stratified folds should result in a different evaluation logs
expect_true(all(cv$evaluation_log[, test_error_mean] != cv2$evaluation_log[, test_error_mean]))
})
test_that("train and predict with non-strict classes", {
# standard dense matrix input
train_dense <- as.matrix(train$data)
bst <- xgboost(data = train_dense, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 0)
pr0 <- predict(bst, train_dense)
# dense matrix-like input of non-matrix class
class(train_dense) <- 'shmatrix'
expect_true(is.matrix(train_dense))
@@ -269,7 +207,7 @@ test_that("train and predict with non-strict classes", {
, regexp = NA)
expect_error(pr <- predict(bst, train_dense), regexp = NA)
expect_equal(pr0, pr)
# dense matrix-like input of non-matrix class with some inheritance
class(train_dense) <- c('pphmatrix','shmatrix')
expect_true(is.matrix(train_dense))
@@ -279,7 +217,7 @@ test_that("train and predict with non-strict classes", {
, regexp = NA)
expect_error(pr <- predict(bst, train_dense), regexp = NA)
expect_equal(pr0, pr)
# when someone inhertis from xgb.Booster, it should still be possible to use it as xgb.Booster
class(bst) <- c('super.Booster', 'xgb.Booster')
expect_error(pr <- predict(bst, train_dense), regexp = NA)

View File

@@ -285,8 +285,7 @@ test_that("prediction in early-stopping xgb.cv works", {
set.seed(11)
expect_output(
cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.1, nrounds = 20,
early_stopping_rounds = 5, maximize = FALSE, stratified = FALSE,
prediction = TRUE)
early_stopping_rounds = 5, maximize = FALSE, prediction = TRUE)
, "Stopping. Best iteration")
expect_false(is.null(cv$best_iteration))

View File

@@ -31,6 +31,7 @@ num_round <- 2
test_that("custom objective works", {
bst <- xgb.train(param, dtrain, num_round, watchlist)
expect_equal(class(bst), "xgb.Booster")
expect_equal(length(bst$raw), 1100)
expect_false(is.null(bst$evaluation_log))
expect_false(is.null(bst$evaluation_log$eval_error))
expect_lt(bst$evaluation_log[num_round, eval_error], 0.03)
@@ -57,4 +58,5 @@ test_that("custom objective using DMatrix attr works", {
param$objective = logregobjattr
bst <- xgb.train(param, dtrain, num_round, watchlist)
expect_equal(class(bst), "xgb.Booster")
expect_equal(length(bst$raw), 1100)
})

View File

@@ -142,44 +142,6 @@ test_that("predict feature contributions works", {
}
})
test_that("SHAPs sum to predictions, with or without DART", {
d <- cbind(
x1 = rnorm(100),
x2 = rnorm(100),
x3 = rnorm(100))
y <- d[,"x1"] + d[,"x2"]^2 +
ifelse(d[,"x3"] > .5, d[,"x3"]^2, 2^d[,"x3"]) +
rnorm(100)
nrounds <- 30
for (booster in list("gbtree", "dart")) {
fit <- xgboost(
params = c(
list(
booster = booster,
objective = "reg:squarederror",
eval_metric = "rmse"),
if (booster == "dart")
list(rate_drop = .01, one_drop = T)),
data = d,
label = y,
nrounds = nrounds)
pr <- function(...)
predict(fit, newdata = d, ...)
pred <- pr()
shap <- pr(predcontrib = T)
shapi <- pr(predinteraction = T)
tol = 1e-5
expect_equal(rowSums(shap), pred, tol = tol)
expect_equal(apply(shapi, 1, sum), pred, tol = tol)
for (i in 1 : nrow(d))
for (f in list(rowSums, colSums))
expect_equal(f(shapi[i,,]), shap[i,], tol = tol)
}
})
test_that("xgb-attribute functionality", {
val <- "my attribute value"
list.val <- list(my_attr=val, a=123, b='ok')

View File

@@ -7,7 +7,6 @@
[![GitHub license](http://dmlc.github.io/img/apache2.svg)](./LICENSE)
[![CRAN Status Badge](http://www.r-pkg.org/badges/version/xgboost)](http://cran.r-project.org/web/packages/xgboost)
[![PyPI version](https://badge.fury.io/py/xgboost.svg)](https://pypi.python.org/pypi/xgboost/)
[![Optuna](https://img.shields.io/badge/Optuna-integrated-blue)](https://optuna.org)
[Community](https://xgboost.ai/community) |
[Documentation](https://xgboost.readthedocs.org) |
@@ -18,11 +17,11 @@
XGBoost is an optimized distributed gradient boosting library designed to be highly ***efficient***, ***flexible*** and ***portable***.
It implements machine learning algorithms under the [Gradient Boosting](https://en.wikipedia.org/wiki/Gradient_boosting) framework.
XGBoost provides a parallel tree boosting (also known as GBDT, GBM) that solve many data science problems in a fast and accurate way.
The same code runs on major distributed environment (Kubernetes, Hadoop, SGE, MPI, Dask) and can solve problems beyond billions of examples.
The same code runs on major distributed environment (Hadoop, SGE, MPI) and can solve problems beyond billions of examples.
License
-------
© Contributors, 2019. Licensed under an [Apache-2](https://github.com/dmlc/xgboost/blob/master/LICENSE) license.
© Contributors, 2016. Licensed under an [Apache-2](https://github.com/dmlc/xgboost/blob/master/LICENSE) license.
Contribute to XGBoost
---------------------
@@ -39,7 +38,7 @@ Sponsors
Become a sponsor and get a logo here. See details at [Sponsoring the XGBoost Project](https://xgboost.ai/sponsors). The funds are used to defray the cost of continuous integration and testing infrastructure (https://xgboost-ci.net).
## Open Source Collective sponsors
[![Backers on Open Collective](https://opencollective.com/xgboost/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/xgboost/sponsors/badge.svg)](#sponsors)
[![Backers on Open Collective](https://opencollective.com/xgboost/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/xgboost/sponsors/badge.svg)](#sponsors)
### Sponsors
[[Become a sponsor](https://opencollective.com/xgboost#sponsor)]

View File

@@ -1,5 +1,5 @@
/*!
* Copyright 2015-2019 by Contributors.
* Copyright 2015 by Contributors.
* \brief XGBoost Amalgamation.
* This offers an alternative way to compile the entire library from this single file.
*
@@ -25,28 +25,25 @@
// gbms
#include "../src/gbm/gbm.cc"
#include "../src/gbm/gbtree.cc"
#include "../src/gbm/gbtree_model.cc"
#include "../src/gbm/gblinear.cc"
#include "../src/gbm/gblinear_model.cc"
// data
#include "../src/data/data.cc"
#include "../src/data/simple_csr_source.cc"
#include "../src/data/simple_dmatrix.cc"
#include "../src/data/sparse_page_raw_format.cc"
#include "../src/data/ellpack_page.cc"
#include "../src/data/ellpack_page_source.cc"
// prediction
#include "../src/predictor/predictor.cc"
#include "../src/predictor/cpu_predictor.cc"
#if DMLC_ENABLE_STD_THREAD
#include "../src/data/sparse_page_source.cc"
#include "../src/data/sparse_page_dmatrix.cc"
#include "../src/data/sparse_page_writer.cc"
#endif
// tress
#include "../src/tree/param.cc"
#include "../src/tree/split_evaluator.cc"
#include "../src/tree/tree_model.cc"
#include "../src/tree/tree_updater.cc"
@@ -57,7 +54,6 @@
#include "../src/tree/updater_sync.cc"
#include "../src/tree/updater_histmaker.cc"
#include "../src/tree/updater_skmaker.cc"
#include "../src/tree/constraints.cc"
// linear
#include "../src/linear/linear_updater.cc"
@@ -68,12 +64,8 @@
#include "../src/learner.cc"
#include "../src/logging.cc"
#include "../src/common/common.cc"
#include "../src/common/timer.cc"
#include "../src/common/host_device_vector.cc"
#include "../src/common/hist_util.cc"
#include "../src/common/json.cc"
#include "../src/common/io.cc"
#include "../src/common/version.cc"
// c_api
#include "../src/c_api/c_api.cc"

View File

@@ -2,6 +2,10 @@ environment:
R_ARCH: x64
USE_RTOOLS: true
matrix:
- target: msvc
ver: 2013
generator: "Visual Studio 12 2013 Win64"
configuration: Release
- target: msvc
ver: 2015
generator: "Visual Studio 14 2015 Win64"
@@ -94,7 +98,7 @@ build_script:
cmake .. -G"%generator%" -DCMAKE_CONFIGURATION_TYPES="Release" -DR_LIB=ON &&
cmake --build . --target install --config Release
)
- if /i "%target%" == "jvm" cd jvm-packages && mvn test -pl :xgboost4j_2.12
- if /i "%target%" == "jvm" cd jvm-packages && mvn test -pl :xgboost4j
test_script:
- cd %APPVEYOR_BUILD_FOLDER%

View File

@@ -6,7 +6,7 @@ function (run_doxygen)
endif (NOT DOXYGEN_DOT_FOUND)
configure_file(
${xgboost_SOURCE_DIR}/doc/Doxyfile.in
${PROJECT_SOURCE_DIR}/doc/Doxyfile.in
${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY)
add_custom_target( doc_doxygen ALL
COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile

View File

@@ -1,22 +0,0 @@
function (find_prefetch_intrinsics)
include(CheckCXXSourceCompiles)
check_cxx_source_compiles("
#include <xmmintrin.h>
int main() {
char data = 0;
const char* address = &data;
_mm_prefetch(address, _MM_HINT_NTA);
return 0;
}
" XGBOOST_MM_PREFETCH_PRESENT)
check_cxx_source_compiles("
int main() {
char data = 0;
const char* address = &data;
__builtin_prefetch(address, 0, 0);
return 0;
}
" XGBOOST_BUILTIN_PREFETCH_PRESENT)
set(XGBOOST_MM_PREFETCH_PRESENT ${XGBOOST_MM_PREFETCH_PRESENT} PARENT_SCOPE)
set(XGBOOST_BUILTIN_PREFETCH_PRESENT ${XGBOOST_BUILTIN_PREFETCH_PRESENT} PARENT_SCOPE)
endfunction (find_prefetch_intrinsics)

View File

@@ -1 +0,0 @@
@xgboost_VERSION_MAJOR@.@xgboost_VERSION_MINOR@.@xgboost_VERSION_PATCH@

View File

@@ -4,29 +4,24 @@
# enable_sanitizers("address;leak")
# Add flags
macro(enable_sanitizer sanitizer)
if(${sanitizer} MATCHES "address")
macro(enable_sanitizer santizer)
if(${santizer} MATCHES "address")
find_package(ASan REQUIRED)
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=address")
link_libraries(${ASan_LIBRARY})
elseif(${sanitizer} MATCHES "thread")
elseif(${santizer} MATCHES "thread")
find_package(TSan REQUIRED)
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=thread")
link_libraries(${TSan_LIBRARY})
elseif(${sanitizer} MATCHES "leak")
elseif(${santizer} MATCHES "leak")
find_package(LSan REQUIRED)
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=leak")
link_libraries(${LSan_LIBRARY})
elseif(${sanitizer} MATCHES "undefined")
find_package(UBSan REQUIRED)
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=undefined -fno-sanitize-recover=undefined")
link_libraries(${UBSan_LIBRARY})
else()
message(FATAL_ERROR "Santizer ${sanitizer} not supported.")
message(FATAL_ERROR "Santizer ${santizer} not supported.")
endif()
endmacro()

View File

@@ -58,13 +58,9 @@ function(set_output_directory target dir)
RUNTIME_OUTPUT_DIRECTORY ${dir}
RUNTIME_OUTPUT_DIRECTORY_DEBUG ${dir}
RUNTIME_OUTPUT_DIRECTORY_RELEASE ${dir}
RUNTIME_OUTPUT_DIRECTORY_RELWITHDEBINFO ${dir}
RUNTIME_OUTPUT_DIRECTORY_MINSIZEREL ${dir}
LIBRARY_OUTPUT_DIRECTORY ${dir}
LIBRARY_OUTPUT_DIRECTORY_DEBUG ${dir}
LIBRARY_OUTPUT_DIRECTORY_RELEASE ${dir}
LIBRARY_OUTPUT_DIRECTORY_RELWITHDEBINFO ${dir}
LIBRARY_OUTPUT_DIRECTORY_MINSIZEREL ${dir}
)
endfunction(set_output_directory)
@@ -115,7 +111,7 @@ DESTINATION \"${build_dir}/bak\")")
install(CODE "file(REMOVE_RECURSE \"${build_dir}/R-package\")")
install(
DIRECTORY "${xgboost_SOURCE_DIR}/R-package"
DIRECTORY "${PROJECT_SOURCE_DIR}/R-package"
DESTINATION "${build_dir}"
REGEX "src/*" EXCLUDE
REGEX "R-package/configure" EXCLUDE

View File

@@ -1,9 +0,0 @@
function (write_version)
message(STATUS "xgboost VERSION: ${xgboost_VERSION}")
configure_file(
${xgboost_SOURCE_DIR}/cmake/version_config.h.in
${xgboost_SOURCE_DIR}/include/xgboost/version_config.h @ONLY)
configure_file(
${xgboost_SOURCE_DIR}/cmake/Python_version.in
${xgboost_SOURCE_DIR}/python-package/xgboost/VERSION @ONLY)
endfunction (write_version)

View File

@@ -1,7 +1,7 @@
set(ASan_LIB_NAME ASan)
find_library(ASan_LIBRARY
NAMES libasan.so libasan.so.5 libasan.so.4 libasan.so.3 libasan.so.2 libasan.so.1 libasan.so.0
NAMES libasan.so libasan.so.4 libasan.so.3 libasan.so.2 libasan.so.1 libasan.so.0
PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib)
include(FindPackageHandleStandardArgs)

View File

@@ -1,23 +0,0 @@
if (NVML_LIBRARY)
unset(NVML_LIBRARY CACHE)
endif(NVML_LIBRARY)
set(NVML_LIB_NAME nvml)
find_path(NVML_INCLUDE_DIR
NAMES nvml.h
PATHS ${CUDA_HOME}/include ${CUDA_INCLUDE} /usr/local/cuda/include)
find_library(NVML_LIBRARY
NAMES nvidia-ml)
message(STATUS "Using nvml library: ${NVML_LIBRARY}")
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(NVML DEFAULT_MSG
NVML_INCLUDE_DIR NVML_LIBRARY)
mark_as_advanced(
NVML_INCLUDE_DIR
NVML_LIBRARY
)

View File

@@ -1,13 +0,0 @@
set(UBSan_LIB_NAME UBSan)
find_library(UBSan_LIBRARY
NAMES libubsan.so libubsan.so.5 libubsan.so.4 libubsan.so.3 libubsan.so.2 libubsan.so.1 libubsan.so.0
PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(UBSan DEFAULT_MSG
UBSan_LIBRARY)
mark_as_advanced(
UBSan_LIBRARY
UBSan_LIB_NAME)

View File

@@ -1,11 +0,0 @@
/*!
* Copyright 2019 XGBoost contributors
*/
#ifndef XGBOOST_VERSION_CONFIG_H_
#define XGBOOST_VERSION_CONFIG_H_
#define XGBOOST_VER_MAJOR @xgboost_VERSION_MAJOR@
#define XGBOOST_VER_MINOR @xgboost_VERSION_MINOR@
#define XGBOOST_VER_PATCH @xgboost_VERSION_PATCH@
#endif // XGBOOST_VERSION_CONFIG_H_

View File

@@ -1,4 +0,0 @@
cmake_minimum_required(VERSION 3.12)
find_package(xgboost REQUIRED)
add_executable(api-demo c-api-demo.c)
target_link_libraries(api-demo xgboost::xgboost)

View File

@@ -36,12 +36,13 @@ int main(int argc, char** argv) {
// https://xgboost.readthedocs.io/en/latest/parameter.html
safe_xgboost(XGBoosterSetParam(booster, "tree_method", use_gpu ? "gpu_hist" : "hist"));
if (use_gpu) {
// set the GPU to use;
// set the number of GPUs and the first GPU to use;
// this is not necessary, but provided here as an illustration
safe_xgboost(XGBoosterSetParam(booster, "n_gpus", "1"));
safe_xgboost(XGBoosterSetParam(booster, "gpu_id", "0"));
} else {
// avoid evaluating objective and metric on a GPU
safe_xgboost(XGBoosterSetParam(booster, "gpu_id", "-1"));
safe_xgboost(XGBoosterSetParam(booster, "n_gpus", "0"));
}
safe_xgboost(XGBoosterSetParam(booster, "objective", "binary:logistic"));
@@ -65,7 +66,7 @@ int main(int argc, char** argv) {
const float* out_result = NULL;
int n_print = 10;
safe_xgboost(XGBoosterPredict(booster, dtest, 0, 0, 0, &out_len, &out_result));
safe_xgboost(XGBoosterPredict(booster, dtest, 0, 0, &out_len, &out_result));
printf("y_pred: ");
for (int i = 0; i < n_print; ++i) {
printf("%1.4f ", out_result[i]);

View File

@@ -1,6 +0,0 @@
Dask
====
This directory contains some demonstrations for using `dask` with `XGBoost`.
For an overview, see
https://xgboost.readthedocs.io/en/latest/tutorials/dask.html .

View File

@@ -1,42 +0,0 @@
import xgboost as xgb
from xgboost.dask import DaskDMatrix
from dask.distributed import Client
from dask.distributed import LocalCluster
from dask import array as da
def main(client):
# generate some random data for demonstration
m = 100000
n = 100
X = da.random.random(size=(m, n), chunks=100)
y = da.random.random(size=(m, ), chunks=100)
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
output = xgb.dask.train(client,
{'verbosity': 1,
'nthread': 1,
'tree_method': 'hist'},
dtrain,
num_boost_round=4, evals=[(dtrain, 'train')])
bst = output['booster']
history = output['history']
# you can pass output directly into `predict` too.
prediction = xgb.dask.predict(client, bst, dtrain)
print('Evaluation history:', history)
return prediction
if __name__ == '__main__':
# or use other clusters for scaling
with LocalCluster(n_workers=7, threads_per_worker=1) as cluster:
with Client(cluster) as client:
main(client)

View File

@@ -1,46 +0,0 @@
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
from dask import array as da
import xgboost as xgb
from xgboost.dask import DaskDMatrix
def main(client):
# generate some random data for demonstration
m = 100000
n = 100
X = da.random.random(size=(m, n), chunks=100)
y = da.random.random(size=(m, ), chunks=100)
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
output = xgb.dask.train(client,
{'verbosity': 2,
'nthread': 1,
# Golden line for GPU training
'tree_method': 'gpu_hist'},
dtrain,
num_boost_round=4, evals=[(dtrain, 'train')])
bst = output['booster']
history = output['history']
# you can pass output directly into `predict` too.
prediction = xgb.dask.predict(client, bst, dtrain)
prediction = prediction.compute()
print('Evaluation history:', history)
return prediction
if __name__ == '__main__':
# `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here
# `n_workers` represents the number of GPUs since we use one GPU per worker
# process.
with LocalCUDACluster(n_workers=2, threads_per_worker=1) as cluster:
with Client(cluster) as client:
main(client)

View File

@@ -1,39 +0,0 @@
'''Dask interface demo:
Use scikit-learn regressor interface with CPU histogram tree method.'''
from dask.distributed import Client
from dask.distributed import LocalCluster
from dask import array as da
import xgboost
def main(client):
# generate some random data for demonstration
n = 100
m = 10000
partition_size = 100
X = da.random.random((m, n), partition_size)
y = da.random.random(m, partition_size)
regressor = xgboost.dask.DaskXGBRegressor(verbosity=1, n_estimators=2)
regressor.set_params(tree_method='hist')
# assigning client here is optional
regressor.client = client
regressor.fit(X, y, eval_set=[(X, y)])
prediction = regressor.predict(X)
bst = regressor.get_booster()
history = regressor.evals_result()
print('Evaluation history:', history)
# returned prediction is always a dask array.
assert isinstance(prediction, da.Array)
return bst # returning the trained model
if __name__ == '__main__':
# or use other clusters for scaling
with LocalCluster(n_workers=4, threads_per_worker=1) as cluster:
with Client(cluster) as client:
main(client)

View File

@@ -1,42 +0,0 @@
'''Dask interface demo:
Use scikit-learn regressor interface with GPU histogram tree method.'''
from dask.distributed import Client
# It's recommended to use dask_cuda for GPU assignment
from dask_cuda import LocalCUDACluster
from dask import array as da
import xgboost
def main(client):
# generate some random data for demonstration
n = 100
m = 1000000
partition_size = 10000
X = da.random.random((m, n), partition_size)
y = da.random.random(m, partition_size)
regressor = xgboost.dask.DaskXGBRegressor(verbosity=1)
regressor.set_params(tree_method='gpu_hist')
# assigning client here is optional
regressor.client = client
regressor.fit(X, y, eval_set=[(X, y)])
prediction = regressor.predict(X)
bst = regressor.get_booster()
history = regressor.evals_result()
print('Evaluation history:', history)
# returned prediction is always a dask array.
assert isinstance(prediction, da.Array)
return bst # returning the trained model
if __name__ == '__main__':
# With dask cuda, one can scale up XGBoost to arbitrary GPU clusters.
# `LocalCUDACluster` used here is only for demonstration purpose.
with LocalCUDACluster() as cluster:
with Client(cluster) as client:
main(client)

View File

@@ -1,5 +1,7 @@
# GPU Acceleration Demo
`cover_type.py` shows how to train a model on the [forest cover type](https://archive.ics.uci.edu/ml/datasets/covertype) dataset using GPU acceleration. The forest cover type dataset has 581,012 rows and 54 features, making it time consuming to process. We compare the run-time and accuracy of the GPU and CPU histogram algorithms.
This demo shows how to train a model on the [forest cover type](https://archive.ics.uci.edu/ml/datasets/covertype) dataset using GPU acceleration. The forest cover type dataset has 581,012 rows and 54 features, making it time consuming to process. We compare the run-time and accuracy of the GPU and CPU histogram algorithms.
`memory.py` shows how to repeatedly train xgboost models while freeing memory between iterations.
This demo requires the [GPU plug-in](https://xgboost.readthedocs.io/en/latest/gpu/index.html) to be built and installed.
The dataset is automatically loaded via the sklearn script.

View File

@@ -1,51 +0,0 @@
import xgboost as xgb
import numpy as np
import time
import pickle
import GPUtil
n = 10000
m = 1000
X = np.random.random((n, m))
y = np.random.random(n)
param = {'objective': 'binary:logistic',
'tree_method': 'gpu_hist'
}
iterations = 5
dtrain = xgb.DMatrix(X, label=y)
# High memory usage
# active bst objects with device memory persist across iterations
boosters = []
for i in range(iterations):
bst = xgb.train(param, dtrain)
boosters.append(bst)
print("Example 1")
GPUtil.showUtilization()
del boosters
# Better memory usage
# The bst object can be destroyed by the python gc, freeing device memory
# The gc may not immediately free the object, so more than one booster can be allocated at a time
boosters = []
for i in range(iterations):
bst = xgb.train(param, dtrain)
boosters.append(pickle.dumps(bst))
print("Example 2")
GPUtil.showUtilization()
del boosters
# Best memory usage
# The gc explicitly frees the booster before starting the next iteration
boosters = []
for i in range(iterations):
bst = xgb.train(param, dtrain)
boosters.append(pickle.dumps(bst))
del bst
print("Example 3")
GPUtil.showUtilization()
del boosters

View File

@@ -2,7 +2,6 @@ XGBoost Python Feature Walkthrough
==================================
* [Basic walkthrough of wrappers](basic_walkthrough.py)
* [Customize loss function, and evaluation metric](custom_objective.py)
* [Re-implement RMSLE as customized metric and objective](custom_rmsle.py)
* [Boosting from existing prediction](boost_from_prediction.py)
* [Predicting using first n trees](predict_first_ntree.py)
* [Generalized Linear Model](generalized_linear_model.py)

View File

@@ -1,4 +1,5 @@
#!/usr/bin/python
import numpy as np
import xgboost as xgb
dtrain = xgb.DMatrix('../data/agaricus.txt.train')
@@ -7,19 +8,18 @@ watchlist = [(dtest, 'eval'), (dtrain, 'train')]
###
# advanced: start from a initial base prediction
#
print('start running example to start from a initial prediction')
print ('start running example to start from a initial prediction')
# specify parameters via map, definition are same as c++ version
param = {'max_depth': 2, 'eta': 1, 'silent': 1, 'objective': 'binary:logistic'}
param = {'max_depth':2, 'eta':1, 'silent':1, 'objective':'binary:logistic'}
# train xgboost for 1 round
bst = xgb.train(param, dtrain, 1, watchlist)
# Note: we need the margin value instead of transformed prediction in
# set_base_margin
# do predict with output_margin=True, will always give you margin values
# before logistic transformation
# Note: we need the margin value instead of transformed prediction in set_base_margin
# do predict with output_margin=True, will always give you margin values before logistic transformation
ptrain = bst.predict(dtrain, output_margin=True)
ptest = bst.predict(dtest, output_margin=True)
dtrain.set_base_margin(ptrain)
dtest.set_base_margin(ptest)
print('this is result of running from initial prediction')
bst = xgb.train(param, dtrain, 1, watchlist)
bst = xgb.train(param, dtrain, 1, watchlist)

View File

@@ -16,9 +16,8 @@ param = {'max_depth': 2, 'eta': 1, 'silent': 1}
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
num_round = 2
# user define objective function, given prediction, return gradient and second
# order gradient this is log likelihood loss
# user define objective function, given prediction, return gradient and second order gradient
# this is log likelihood loss
def logregobj(preds, dtrain):
labels = dtrain.get_label()
preds = 1.0 / (1.0 + np.exp(-preds))
@@ -26,24 +25,18 @@ def logregobj(preds, dtrain):
hess = preds * (1.0 - preds)
return grad, hess
# user defined evaluation function, return a pair metric_name, result
# NOTE: when you do customized loss function, the default prediction value is
# margin. this may make builtin evaluation metric not function properly for
# example, we are doing logistic loss, the prediction is score before logistic
# transformation the builtin evaluation error assumes input is after logistic
# transformation Take this in mind when you use the customization, and maybe
# you need write customized evaluation function
# NOTE: when you do customized loss function, the default prediction value is margin
# this may make builtin evaluation metric not function properly
# for example, we are doing logistic loss, the prediction is score before logistic transformation
# the builtin evaluation error assumes input is after logistic transformation
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
def evalerror(preds, dtrain):
labels = dtrain.get_label()
# return a pair metric_name, result. The metric name must not contain a
# colon (:) or a space since preds are margin(before logistic
# transformation, cutoff at 0)
# return a pair metric_name, result. The metric name must not contain a colon (:) or a space
# since preds are margin(before logistic transformation, cutoff at 0)
return 'my-error', float(sum(labels != (preds > 0.0))) / len(labels)
# training with customized objective, we can also do step by step training
# simply look at xgboost.py's implementation of train
bst = xgb.train(param, dtrain, num_round, watchlist, obj=logregobj,
feval=evalerror)
bst = xgb.train(param, dtrain, num_round, watchlist, obj=logregobj, feval=evalerror)

View File

@@ -1,179 +0,0 @@
'''Demo for defining customized metric and objective. Notice that for
simplicity reason weight is not used in following example. In this
script, we implement the Squared Log Error (SLE) objective and RMSLE metric as customized
functions, then compare it with native implementation in XGBoost.
See doc/tutorials/custom_metric_obj.rst for a step by step
walkthrough, with other details.
The `SLE` objective reduces impact of outliers in training dataset,
hence here we also compare its performance with standard squared
error.
'''
import numpy as np
import xgboost as xgb
from typing import Tuple, Dict, List
from time import time
import matplotlib
from matplotlib import pyplot as plt
# shape of generated data.
kRows = 4096
kCols = 16
kOutlier = 10000 # mean of generated outliers
kNumberOfOutliers = 64
kRatio = 0.7
kSeed = 1994
kBoostRound = 20
np.random.seed(seed=kSeed)
def generate_data() -> Tuple[xgb.DMatrix, xgb.DMatrix]:
'''Generate data containing outliers.'''
x = np.random.randn(kRows, kCols)
y = np.random.randn(kRows)
y += np.abs(np.min(y))
# Create outliers
for i in range(0, kNumberOfOutliers):
ind = np.random.randint(0, len(y)-1)
y[ind] += np.random.randint(0, kOutlier)
train_portion = int(kRows * kRatio)
# rmsle requires all label be greater than -1.
assert np.all(y > -1.0)
train_x: np.ndarray = x[: train_portion]
train_y: np.ndarray = y[: train_portion]
dtrain = xgb.DMatrix(train_x, label=train_y)
test_x = x[train_portion:]
test_y = y[train_portion:]
dtest = xgb.DMatrix(test_x, label=test_y)
return dtrain, dtest
def native_rmse(dtrain: xgb.DMatrix,
dtest: xgb.DMatrix) -> Dict[str, Dict[str, List[float]]]:
'''Train using native implementation of Root Mean Squared Loss.'''
print('Squared Error')
squared_error = {
'objective': 'reg:squarederror',
'eval_metric': 'rmse',
'tree_method': 'hist',
'seed': kSeed
}
start = time()
results: Dict[str, Dict[str, List[float]]] = {}
xgb.train(squared_error,
dtrain=dtrain,
num_boost_round=kBoostRound,
evals=[(dtrain, 'dtrain'), (dtest, 'dtest')],
evals_result=results)
print('Finished Squared Error in:', time() - start, '\n')
return results
def native_rmsle(dtrain: xgb.DMatrix,
dtest: xgb.DMatrix) -> Dict[str, Dict[str, List[float]]]:
'''Train using native implementation of Squared Log Error.'''
print('Squared Log Error')
results: Dict[str, Dict[str, List[float]]] = {}
squared_log_error = {
'objective': 'reg:squaredlogerror',
'eval_metric': 'rmsle',
'tree_method': 'hist',
'seed': kSeed
}
start = time()
xgb.train(squared_log_error,
dtrain=dtrain,
num_boost_round=kBoostRound,
evals=[(dtrain, 'dtrain'), (dtest, 'dtest')],
evals_result=results)
print('Finished Squared Log Error in:', time() - start)
return results
def py_rmsle(dtrain: xgb.DMatrix, dtest: xgb.DMatrix) -> Dict:
'''Train using Python implementation of Squared Log Error.'''
def gradient(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray:
'''Compute the gradient squared log error.'''
y = dtrain.get_label()
return (np.log1p(predt) - np.log1p(y)) / (predt + 1)
def hessian(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray:
'''Compute the hessian for squared log error.'''
y = dtrain.get_label()
return ((-np.log1p(predt) + np.log1p(y) + 1) /
np.power(predt + 1, 2))
def squared_log(predt: np.ndarray,
dtrain: xgb.DMatrix) -> Tuple[np.ndarray, np.ndarray]:
'''Squared Log Error objective. A simplified version for RMSLE used as
objective function.
:math:`\frac{1}{2}[log(pred + 1) - log(label + 1)]^2`
'''
predt[predt < -1] = -1 + 1e-6
grad = gradient(predt, dtrain)
hess = hessian(predt, dtrain)
return grad, hess
def rmsle(predt: np.ndarray, dtrain: xgb.DMatrix) -> Tuple[str, float]:
''' Root mean squared log error metric.
:math:`\sqrt{\frac{1}{N}[log(pred + 1) - log(label + 1)]^2}`
'''
y = dtrain.get_label()
predt[predt < -1] = -1 + 1e-6
elements = np.power(np.log1p(y) - np.log1p(predt), 2)
return 'PyRMSLE', float(np.sqrt(np.sum(elements) / len(y)))
results: Dict[str, Dict[str, List[float]]] = {}
xgb.train({'tree_method': 'hist', 'seed': kSeed,
'disable_default_eval_metric': 1},
dtrain=dtrain,
num_boost_round=kBoostRound,
obj=squared_log,
feval=rmsle,
evals=[(dtrain, 'dtrain'), (dtest, 'dtest')],
evals_result=results)
return results
if __name__ == '__main__':
dtrain, dtest = generate_data()
rmse_evals = native_rmse(dtrain, dtest)
rmsle_evals = native_rmsle(dtrain, dtest)
py_rmsle_evals = py_rmsle(dtrain, dtest)
fig, axs = plt.subplots(3, 1)
ax0: matplotlib.axes.Axes = axs[0]
ax1: matplotlib.axes.Axes = axs[1]
ax2: matplotlib.axes.Axes = axs[2]
x = np.arange(0, kBoostRound, 1)
ax0.plot(x, rmse_evals['dtrain']['rmse'], label='train-RMSE')
ax0.plot(x, rmse_evals['dtest']['rmse'], label='test-RMSE')
ax0.legend()
ax1.plot(x, rmsle_evals['dtrain']['rmsle'], label='train-native-RMSLE')
ax1.plot(x, rmsle_evals['dtest']['rmsle'], label='test-native-RMSLE')
ax1.legend()
ax2.plot(x, py_rmsle_evals['dtrain']['PyRMSLE'], label='train-PyRMSLE')
ax2.plot(x, py_rmsle_evals['dtest']['PyRMSLE'], label='test-PyRMSLE')
ax2.legend()
plt.show()
plt.close()

View File

@@ -1,3 +0,0 @@
We introduced initial support for saving XGBoost model in JSON format in 1.0.0. Note that
it's still experimental and under development, output schema is subject to change due to
bug fixes or further refactoring. For an overview, see https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html .

View File

@@ -1,180 +0,0 @@
'''Demonstration for parsing JSON tree model file generated by XGBoost. The
support is experimental, output schema is subject to change in the future.
'''
import json
import argparse
class Tree:
'''A tree built by XGBoost.'''
# Index into node array
_left = 0
_right = 1
_parent = 2
_ind = 3
_cond = 4
_default_left = 5
# Index into stat array
_loss_chg = 0
_sum_hess = 1
_base_weight = 2
_child_cnt = 3
def __init__(self, tree_id: int, nodes, stats):
self.tree_id = tree_id
self.nodes = nodes
self.stats = stats
def loss_change(self, node_id: int):
'''Loss gain of a node.'''
return self.stats[node_id][self._loss_chg]
def sum_hessian(self, node_id: int):
'''Sum Hessian of a node.'''
return self.stats[node_id][self._sum_hess]
def base_weight(self, node_id: int):
'''Base weight of a node.'''
return self.stats[node_id][self._base_weight]
def num_children(self, node_id: int):
'''Number of children of a node.'''
return self.stats[node_id][self._child_cnt]
def split_index(self, node_id: int):
'''Split feature index of node.'''
return self.nodes[node_id][self._ind]
def split_condition(self, node_id: int):
'''Split value of a node.'''
return self.nodes[node_id][self._cond]
def parent(self, node_id: int):
'''Parent ID of a node.'''
return self.nodes[node_id][self._parent]
def left_child(self, node_id: int):
'''Left child ID of a node.'''
return self.nodes[node_id][self._left]
def right_child(self, node_id: int):
'''Right child ID of a node.'''
return self.nodes[node_id][self._right]
def is_leaf(self, node_id: int):
'''Whether a node is leaf.'''
return self.nodes[node_id][self._left] == -1
def is_deleted(self, node_id: int):
'''Whether a node is deleted.'''
# std::numeric_limits<uint32_t>::max()
return self.nodes[node_id][self._ind] == 4294967295
def __str__(self):
stacks = [0]
nodes = []
while stacks:
node = {}
nid = stacks.pop()
node['node id'] = nid
node['gain'] = self.loss_change(nid)
node['cover'] = self.sum_hessian(nid)
nodes.append(node)
if not self.is_leaf(nid) and not self.is_deleted(nid):
left = self.left_child(nid)
right = self.right_child(nid)
stacks.append(left)
stacks.append(right)
string = '\n'.join(map(lambda x: ' ' + str(x), nodes))
return string
class Model:
'''Gradient boosted tree model.'''
def __init__(self, m: dict):
'''Construct the Model from JSON object.
parameters
----------
m: A dictionary loaded by json
'''
# Basic property of a model
self.learner_model_shape = model['learner']['learner_model_param']
self.num_output_group = int(self.learner_model_shape['num_class'])
self.num_feature = int(self.learner_model_shape['num_feature'])
self.base_score = float(self.learner_model_shape['base_score'])
# A field encoding which output group a tree belongs
self.tree_info = model['learner']['gradient_booster']['model'][
'tree_info']
model_shape = model['learner']['gradient_booster']['model'][
'gbtree_model_param']
# JSON representation of trees
j_trees = model['learner']['gradient_booster']['model']['trees']
# Load the trees
self.num_trees = int(model_shape['num_trees'])
self.leaf_size = int(model_shape['size_leaf_vector'])
# Right now XGBoost doesn't support vector leaf yet
assert self.leaf_size == 0, str(self.leaf_size)
trees = []
for i in range(self.num_trees):
tree = j_trees[i]
tree_id = int(tree['id'])
assert tree_id == i, (tree_id, i)
# properties
left_children = tree['left_children']
right_children = tree['right_children']
parents = tree['parents']
split_conditions = tree['split_conditions']
split_indices = tree['split_indices']
default_left = tree['default_left']
# stats
base_weights = tree['base_weights']
loss_changes = tree['loss_changes']
sum_hessian = tree['sum_hessian']
leaf_child_counts = tree['leaf_child_counts']
stats = []
nodes = []
# We resemble the structure used inside XGBoost, which is similar
# to adjacency list.
for node_id in range(len(left_children)):
nodes.append([
left_children[node_id], right_children[node_id],
parents[node_id], split_indices[node_id],
split_conditions[node_id], default_left[node_id]
])
stats.append([
loss_changes[node_id], sum_hessian[node_id],
base_weights[node_id], leaf_child_counts[node_id]
])
tree = Tree(tree_id, nodes, stats)
trees.append(tree)
self.trees = trees
def print_model(self):
for i, tree in enumerate(self.trees):
print('tree_id:', i)
print(tree)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Demonstration for loading and printing XGBoost model.')
parser.add_argument('--model',
type=str,
required=True,
help='Path to JSON model file.')
args = parser.parse_args()
with open(args.model, 'r') as fd:
model = json.load(fd)
model = Model(model)
model.print_model()

View File

@@ -1,6 +1,6 @@
Learning to rank
====
XGBoost supports accomplishing ranking tasks. In ranking scenario, data are often grouped and we need the [group information file](../../doc/tutorials/input_format.rst#group-input-format) to specify ranking tasks. The model used in XGBoost for ranking is the LambdaRank. See [parameters](../../doc/parameter.rst) for supported metrics.
XGBoost supports accomplishing ranking tasks. In ranking scenario, data are often grouped and we need the [group information file](../../doc/tutorials/input_format.rst#group-input-format) to specify ranking tasks. The model used in XGBoost for ranking is the LambdaRank, this function is not yet completed. Currently, we provide pairwise rank.
### Parameters
The configuration setting is similar to the regression and binary classification setting, except user need to specify the objectives:
@@ -28,7 +28,7 @@ Run the example:
```
### Python
There are two ways of doing ranking in python.
There are two ways of doing ranking in python.
Run the example using `xgboost.train`:
```

View File

@@ -34,8 +34,8 @@ test_dmatrix = DMatrix(x_test)
train_dmatrix.set_group(group_train)
valid_dmatrix.set_group(group_valid)
params = {'objective': 'rank:ndcg', 'eta': 0.1, 'gamma': 1.0,
'min_child_weight': 0.1, 'max_depth': 6}
params = {'objective': 'rank:pairwise', 'eta': 0.1, 'gamma': 1.0,
'min_child_weight': 0.1, 'max_depth': 6}
xgb_model = xgb.train(params, train_dmatrix, num_boost_round=4,
evals=[(valid_dmatrix, 'validation')])
evals=[(valid_dmatrix, 'validation')])
pred = xgb_model.predict(test_dmatrix)

View File

@@ -2,6 +2,7 @@
import xgboost as xgb
from sklearn.datasets import load_svmlight_file
# This script demonstrate how to do ranking with XGBRanker
x_train, y_train = load_svmlight_file("mq2008.train")
x_valid, y_valid = load_svmlight_file("mq2008.vali")
@@ -25,10 +26,10 @@ with open("mq2008.test.group", "r") as f:
for line in data:
group_test.append(int(line.split("\n")[0]))
params = {'objective': 'rank:ndcg', 'learning_rate': 0.1,
params = {'objective': 'rank:pairwise', 'learning_rate': 0.1,
'gamma': 1.0, 'min_child_weight': 0.1,
'max_depth': 6, 'n_estimators': 4}
model = xgb.sklearn.XGBRanker(**params)
model.fit(x_train, y_train, group_train, verbose=True,
model.fit(x_train, y_train, group_train,
eval_set=[(x_valid, y_valid)], eval_group=[group_valid])
pred = model.predict(x_test)

View File

@@ -44,7 +44,7 @@ drat:::addRepo("dmlc")
install.packages("xgboost", repos="http://dmlc.ml/drat/", type = "source")
```
> *Windows* users will need to install [Rtools](http://cran.r-project.org/bin/windows/Rtools/) first.
> *Windows* user will need to install [Rtools](http://cran.r-project.org/bin/windows/Rtools/) first.
### CRAN version
@@ -97,7 +97,7 @@ train <- agaricus.train
test <- agaricus.test
```
> In the real world, it would be up to you to make this division between `train` and `test` data. The way to do it is out of scope for this article, however `caret` package may [help](http://topepo.github.io/caret/data-splitting.html).
> In the real world, it would be up to you to make this division between `train` and `test` data. The way to do it is out of the purpose of this article, however `caret` package may [help](http://topepo.github.io/caret/splitting.html).
Each variable is a `list` containing two things, `label` and `data`:
@@ -141,7 +141,7 @@ dim(test$data)
## [1] 1611 126
```
This dataset is very small to not make the **R** package too heavy, however **XGBoost** is built to manage huge datasets very efficiently.
This dataset is very small to not make the **R** package too heavy, however **XGBoost** is built to manage huge dataset very efficiently.
As seen below, the `data` are stored in a `dgCMatrix` which is a *sparse* matrix and `label` vector is a `numeric` vector (`{0,1}`):
@@ -171,7 +171,7 @@ This step is the most critical part of the process for the quality of our model.
We are using the `train` data. As explained above, both `data` and `label` are stored in a `list`.
In a *sparse* matrix, cells containing `0` are not stored in memory. Therefore, in a dataset mainly made of `0`, memory size is reduced. It is very common to have such a dataset.
In a *sparse* matrix, cells containing `0` are not stored in memory. Therefore, in a dataset mainly made of `0`, memory size is reduced. It is very usual to have such dataset.
We will train decision tree model using the following parameters:
@@ -190,7 +190,7 @@ bstSparse <- xgboost(data = train$data, label = train$label, max.depth = 2, eta
## [1] train-error:0.022263
```
> The more complex the relationship between your features and your `label` is, the more passes you need.
> More complex the relationship between your features and your `label` is, more passes you need.
#### Parameter variations
@@ -210,7 +210,7 @@ bstDense <- xgboost(data = as.matrix(train$data), label = train$label, max.depth
##### xgb.DMatrix
**XGBoost** offers a way to group them in a `xgb.DMatrix`. You can even add other meta data in it. This will be useful for the most advanced features we will discover later.
**XGBoost** offers a way to group them in a `xgb.DMatrix`. You can even add other meta data in it. It will be useful for the most advanced features we will discover later.
```r
@@ -225,9 +225,9 @@ bstDMatrix <- xgboost(data = dtrain, max.depth = 2, eta = 1, nthread = 2, nround
##### Verbose option
**XGBoost** has several features to help you view the learning progress internally. The purpose is to help you to set the best parameters, which is the key of your model quality.
**XGBoost** has several features to help you to view how the learning progress internally. The purpose is to help you to set the best parameters, which is the key of your model quality.
One of the simplest way to see the training progress is to set the `verbose` option (see below for more advanced techniques).
One of the simplest way to see the training progress is to set the `verbose` option (see below for more advanced technics).
```r
@@ -360,11 +360,11 @@ dtest <- xgb.DMatrix(data = test$data, label=test$label)
Both `xgboost` (simple) and `xgb.train` (advanced) functions train models.
One of the special features of `xgb.train` is the capacity to follow the progress of the learning after each round. Because of the way boosting works, there is a time when having too many rounds lead to overfitting. You can see this feature as a cousin of a cross-validation method. The following techniques will help you to avoid overfitting or optimizing the learning time in stopping it as soon as possible.
One of the special feature of `xgb.train` is the capacity to follow the progress of the learning after each round. Because of the way boosting works, there is a time when having too many rounds lead to an overfitting. You can see this feature as a cousin of cross-validation method. The following techniques will help you to avoid overfitting or optimizing the learning time in stopping it as soon as possible.
One way to measure progress in the learning of a model is to provide to **XGBoost** a second dataset already classified. Therefore it can learn on the first dataset and test its model on the second one. Some metrics are measured after each round during the learning.
One way to measure progress in learning of a model is to provide to **XGBoost** a second dataset already classified. Therefore it can learn on the first dataset and test its model on the second one. Some metrics are measured after each round during the learning.
> in some way it is similar to what we have done above with the average error. The main difference is that above it was after building the model, and now it is during the construction that we measure errors.
> in some way it is similar to what we have done above with the average error. The main difference is that below it was after building the model, and now it is during the construction that we measure errors.
For the purpose of this example, we use `watchlist` parameter. It is a list of `xgb.DMatrix`, each of them tagged with a name.
@@ -380,11 +380,11 @@ bst <- xgb.train(data=dtrain, max.depth=2, eta=1, nthread = 2, nrounds=2, watchl
## [1] train-error:0.022263 test-error:0.021726
```
**XGBoost** has computed at each round the same average error metric seen above (we set `nrounds` to 2, that is why we have two lines). Obviously, the `train-error` number is related to the training dataset (the one the algorithm learns from) and the `test-error` number to the test dataset.
**XGBoost** has computed at each round the same average error metric than seen above (we set `nrounds` to 2, that is why we have two lines). Obviously, the `train-error` number is related to the training dataset (the one the algorithm learns from) and the `test-error` number to the test dataset.
Both training and test error related metrics are very similar, and in some way, it makes sense: what we have learned from the training dataset matches the observations from the test dataset.
If with your own dataset you do not have such results, you should think about how you divided your dataset in training and test. May be there is something to fix. Again, `caret` package may [help](http://topepo.github.io/caret/data-splitting.html).
If with your own dataset you have not such results, you should think about how you divided your dataset in training and test. May be there is something to fix. Again, `caret` package may [help](http://topepo.github.io/caret/splitting.html).
For a better understanding of the learning progression, you may want to have some specific metric or even use multiple evaluation metrics.
@@ -403,7 +403,7 @@ bst <- xgb.train(data=dtrain, max.depth=2, eta=1, nthread = 2, nrounds=2, watchl
### Linear boosting
Until now, all the learnings we have performed were based on boosting trees. **XGBoost** implements a second algorithm, based on linear boosting. The only difference with the previous command is `booster = "gblinear"` parameter (and removing `eta` parameter).
Until now, all the learnings we have performed were based on boosting trees. **XGBoost** implements a second algorithm, based on linear boosting. The only difference with previous command is `booster = "gblinear"` parameter (and removing `eta` parameter).
```r
@@ -415,9 +415,9 @@ bst <- xgb.train(data=dtrain, booster = "gblinear", max.depth=2, nthread = 2, nr
## [1] train-error:0.004146 train-logloss:0.069885 test-error:0.003724 test-logloss:0.068081
```
In this specific case, *linear boosting* gets slightly better performance metrics than a decision tree based algorithm.
In this specific case, *linear boosting* gets slightly better performance metrics than decision trees based algorithm.
In simple cases, this will happen because there is nothing better than a linear algorithm to catch a linear link. However, decision trees are much better to catch a non linear link between predictors and outcome. Because there is no silver bullet, we advise you to check both algorithms with your own datasets to have an idea of what to use.
In simple cases, it will happen because there is nothing better than a linear algorithm to catch a linear link. However, decision trees are much better to catch a non linear link between predictors and outcome. Because there is no silver bullet, we advise you to check both algorithms with your own datasets to have an idea of what to use.
### Manipulating xgb.DMatrix
@@ -457,7 +457,7 @@ bst <- xgb.train(data=dtrain2, max.depth=2, eta=1, nthread = 2, nrounds=2, watch
#### Information extraction
Information can be extracted from an `xgb.DMatrix` using `getinfo` function. Hereafter we will extract `label` data.
Information can be extracted from `xgb.DMatrix` using `getinfo` function. Hereafter we will extract `label` data.
```r
@@ -489,7 +489,7 @@ You can dump the tree you learned using `xgb.dump` into a text file.
```r
xgb.dump(bst, with_stats = T)
xgb.dump(bst, with.stats = T)
```
```
@@ -522,7 +522,7 @@ xgb.plot.tree(model = bst)
Maybe your dataset is big, and it takes time to train a model on it? May be you are not a big fan of losing time in redoing the same task again and again? In these very rare cases, you will want to save your model and load it when required.
Helpfully for you, **XGBoost** implements such functions.
Hopefully for you, **XGBoost** implements such functions.
```r

View File

@@ -13,9 +13,8 @@ Installation Guide
# * xgboost-{version}-py2.py3-none-win_amd64.whl
pip3 install xgboost
* The binary wheel will support GPU algorithms (`gpu_hist`) on machines with NVIDIA GPUs. Please note that **training with multiple GPUs is only supported for Linux platform**. See :doc:`gpu/index`.
* The binary wheel will support GPU algorithms (`gpu_exact`, `gpu_hist`) on machines with NVIDIA GPUs. Please note that **training with multiple GPUs is only supported for Linux platform**. See :doc:`gpu/index`.
* Currently, we provide binary wheels for 64-bit Linux and Windows.
* Nightly builds are available. You can now run *pip install https://s3-us-west-2.amazonaws.com/xgboost-nightly-builds/xgboost-[version]+[commit hash]-py2.py3-none-manylinux1_x86_64.whl* to install the nightly build with the given commit hash. See `this page <https://s3-us-west-2.amazonaws.com/xgboost-nightly-builds/list.html>`_ to see the list of all nightly builds.
****************************
Building XGBoost from source
@@ -58,9 +57,6 @@ to ask questions at `the user forum <https://discuss.xgboost.ai>`_.
* `Python Package Installation`_
* `R Package Installation`_
* `Trouble Shooting`_
* `Building the documentation`_
.. _build_shared_lib:
***************************
Building the Shared Library
@@ -73,10 +69,8 @@ Our goal is to build the shared library:
The minimal building requirement is
- A recent C++ compiler supporting C++11 (g++-5.0 or higher)
- CMake 3.3 or higher (3.12 for building with CUDA)
For a list of CMake options, see ``#-- Options`` in CMakeLists.txt on top of source tree.
- A recent C++ compiler supporting C++11 (g++-4.8 or higher)
- CMake 3.2 or higher
Building on Ubuntu/Debian
=========================
@@ -98,11 +92,11 @@ Building on OSX
Install with pip: simple method
--------------------------------
First, obtain the OpenMP library (``libomp``) with Homebrew (https://brew.sh/) to enable multi-threading (i.e. using multiple CPU threads for training):
First, obtain ``gcc-8`` with Homebrew (https://brew.sh/) to enable multi-threading (i.e. using multiple CPU threads for training). The default Apple Clang compiler does not support OpenMP, so using the default compiler would have disabled multi-threading.
.. code-block:: bash
brew install libomp
brew install gcc@8
Then install XGBoost with ``pip``:
@@ -115,12 +109,11 @@ You might need to run the command with ``--user`` flag if you run into permissio
Build from the source code - advanced method
--------------------------------------------
Obtain ``libomp`` from Homebrew:
Obtain ``gcc-8`` from Homebrew:
.. code-block:: bash
brew install libomp
brew install gcc@8
Now clone the repository:
@@ -128,13 +121,13 @@ Now clone the repository:
git clone --recursive https://github.com/dmlc/xgboost
Create the ``build/`` directory and invoke CMake. After invoking CMake, you can build XGBoost with ``make``:
Create the ``build/`` directory and invoke CMake. Make sure to add ``CC=gcc-8 CXX=g++-8`` so that Homebrew GCC is selected. After invoking CMake, you can build XGBoost with ``make``:
.. code-block:: bash
mkdir build
cd build
cmake ..
CC=gcc-8 CXX=g++-8 cmake ..
make -j4
You may now continue to `Python Package Installation`_.
@@ -192,7 +185,9 @@ Building with GPU support
=========================
XGBoost can be built with GPU support for both Linux and Windows using CMake. GPU support works with the Python package as well as the CLI version. See `Installing R package with GPU support`_ for special instructions for R.
An up-to-date version of the CUDA toolkit is required.
An up-to-date version of the CUDA toolkit is required. Please note that we
skipped the support for compiling XGBoost with NVCC 10.1 due a small bug in its
spliter, see `#4264 <https://github.com/dmlc/xgboost/issues/4264>`_.
From the command line on Linux starting from the XGBoost directory:
@@ -203,9 +198,9 @@ From the command line on Linux starting from the XGBoost directory:
cmake .. -DUSE_CUDA=ON
make -j4
.. note:: Enabling distributed GPU training
.. note:: Enabling multi-GPU training
By default, distributed GPU training is disabled and only a single GPU will be used. To enable distributed GPU training, set the option ``USE_NCCL=ON``. Distributed GPU training depends on NCCL2, available at `this link <https://developer.nvidia.com/nccl>`_. Since NCCL2 is only available for Linux machines, **distributed GPU training is available only for Linux**.
By default, multi-GPU training is disabled and only a single GPU will be used. To enable multi-GPU training, set the option ``USE_NCCL=ON``. Multi-GPU training depends on NCCL2, available at `this link <https://developer.nvidia.com/nccl>`_. Since NCCL2 is only available for Linux machines, **multi-GPU training is available only for Linux**.
.. code-block:: bash
@@ -230,7 +225,7 @@ On Windows, run CMake as follows:
.. code-block:: bash
cmake .. -G"Visual Studio 15 2017 Win64" -T v140,cuda=8.0 -DUSE_CUDA=ON
make .. -G"Visual Studio 15 2017 Win64" -T v140,cuda=8.0 -DUSE_CUDA=ON
To speed up compilation, the compute version specific to your GPU could be passed to cmake as, e.g., ``-DGPU_COMPUTE_VER=50``.
The above cmake configuration run will create an ``xgboost.sln`` solution file in the build directory. Build this solution in release mode as a x64 build, either from Visual studio or from command line:
@@ -316,21 +311,13 @@ R Package Installation
Installing pre-packaged version
-------------------------------
You can install XGBoost from CRAN just like any other R package:
You can install xgboost from CRAN just like any other R package:
.. code-block:: R
install.packages("xgboost")
install.packages("xgboost")
.. note:: Using all CPU cores (threads) on Mac OSX
If you are using Mac OSX, you should first install OpenMP library (``libomp``) by running
.. code-block:: bash
brew install libomp
and then run ``install.packages("xgboost")``. Without OpenMP, XGBoost will only use a single CPU core, leading to suboptimal training speed.
For OSX users, single-threaded version will be installed. So only one thread will be used for training. To enable use of multiple threads (and utilize capacity of multi-core CPUs), see the section :ref:`osx_multithread` to install XGBoost from source.
Installing the development version
----------------------------------
@@ -347,14 +334,48 @@ Thus, one has to run git to check out the code first:
cd xgboost
git submodule init
git submodule update
mkdir build
cd build
cmake .. -DR_LIB=ON
make -j4
make install
cd R-package
R CMD INSTALL .
If the last line fails because of the error ``R: command not found``, it means that R was not set up to run from command line.
In this case, just start R as you would normally do and run the following:
.. code-block:: R
setwd('wherever/you/cloned/it/xgboost/R-package/')
install.packages('.', repos = NULL, type="source")
The package could also be built and installed with CMake (and Visual C++ 2015 on Windows) using instructions from :ref:`r_gpu_support`, but without GPU support (omit the ``-DUSE_CUDA=ON`` cmake parameter).
If all fails, try `Building the shared library`_ to see whether a problem is specific to R package or not.
.. _osx_multithread:
Installing R package on Mac OSX with multi-threading
----------------------------------------------------
First, obtain ``gcc-8`` with Homebrew (https://brew.sh/) to enable multi-threading (i.e. using multiple CPU threads for training). The default Apple Clang compiler does not support OpenMP, so using the default compiler would have disabled multi-threading.
.. code-block:: bash
brew install gcc@8
Now, clone the repository:
.. code-block:: bash
git clone --recursive https://github.com/dmlc/xgboost
Create the ``build/`` directory and invoke CMake with option ``R_LIB=ON``. Make sure to add ``CC=gcc-8 CXX=g++-8`` so that Homebrew GCC is selected. After invoking CMake, you can install the R package by running ``make`` and ``make install``:
.. code-block:: bash
mkdir build
cd build
CC=gcc-7 CXX=g++-7 cmake .. -DR_LIB=ON
make -j4
make install
.. _r_gpu_support:
Installing R package with GPU support
@@ -429,23 +450,3 @@ Trouble Shooting
.. code-block:: bash
git clone https://github.com/dmlc/xgboost --recursive
Building the Documentation
==========================
XGBoost uses `Sphinx <https://www.sphinx-doc.org/en/stable/>`_ for documentation. To build it locally, you need a installed XGBoost with all its dependencies along with:
* System dependencies
- git
- graphviz
* Python dependencies
- sphinx
- breathe
- guzzle_sphinx_theme
- recommonmark
- mock
Under ``xgboost/doc`` directory, run ``make <format>`` with ``<format>`` replaced by the format you want. For a list of supported formats, run ``make help`` under the same directory.

View File

@@ -1,12 +0,0 @@
###############
XGBoost C++ API
###############
Starting from 1.0 release, CMake will generate installation rules to export all C++ headers. But
the c++ interface is much closer to the internal of XGBoost than other language bindings.
As a result it's changing quite often and we don't maintain its stability. Along with the
plugin system (see ``plugin/example`` in XGBoost's source tree), users can utilize some
existing c++ headers for gaining more access to the internal of XGBoost.
* `C++ interface documentation (latest master branch) <https://xgboost.readthedocs.io/en/latest/dev/files.html>`_
* `C++ interface documentation (last stable release) <https://xgboost.readthedocs.io/en/stable/dev/files.html>`_

View File

@@ -1,12 +0,0 @@
#################
XGBoost C Package
#################
XGBoost implements a set of C API designed for various bindings, we maintain its
stability and the CMake/make build interface. See ``demo/c-api/README.md`` for an
overview and related examples. Also one can generate doxygen document by providing
``-DBUILD_C_DOC=ON`` as parameter to ``CMake`` during build, or simply look at function
comments in ``include/xgboost/c_api.h``.
* `C API documentation (latest master branch) <https://xgboost.readthedocs.io/en/latest/dev/c__api_8h.html>`_
* `C API documentation (last stable release) <https://xgboost.readthedocs.io/en/stable/dev/c__api_8h.html>`_

View File

@@ -58,7 +58,7 @@ for mod_name in MOCK_MODULES:
# General information about the project.
project = u'xgboost'
author = u'%s developers' % project
copyright = u'2019, %s' % author
copyright = u'2016, %s' % author
github_doc_root = 'https://github.com/dmlc/xgboost/tree/master/doc/'
os.environ['XGBOOST_BUILD_DOC'] = '1'

View File

@@ -1,136 +0,0 @@
################
Coding Guideline
################
**Contents**
.. contents::
:backlinks: none
:local:
********************
C++ Coding Guideline
********************
- Follow `Google style for C++ <https://google.github.io/styleguide/cppguide.html>`_, with two exceptions:
* Each line of text may contain up to 100 characters.
* The use of C++ exceptions is allowed.
- Use C++11 features such as smart pointers, braced initializers, lambda functions, and ``std::thread``.
- Use Doxygen to document all the interface code.
- We have a series of automatic checks to ensure that all of our codebase complies with the Google style. Before submitting your pull request, you are encouraged to run the style checks on your machine. See :ref:`running_checks_locally`.
***********************
Python Coding Guideline
***********************
- Follow `PEP 8: Style Guide for Python Code <https://www.python.org/dev/peps/pep-0008/>`_. We use PyLint to automatically enforce PEP 8 style across our Python codebase. Before submitting your pull request, you are encouraged to run PyLint on your machine. See :ref:`running_checks_locally`.
- Docstrings should be in `NumPy docstring format <https://numpydoc.readthedocs.io/en/latest/format.html>`_.
.. _running_checks_locally:
******************
R Coding Guideline
******************
Code Style
==========
- We follow Google's C++ Style guide for C++ code.
- This is mainly to be consistent with the rest of the project.
- Another reason is we will be able to check style automatically with a linter.
- You can check the style of the code by typing the following command at root folder.
.. code-block:: bash
make rcpplint
- When needed, you can disable the linter warning of certain line with ``// NOLINT(*)`` comments.
- We use `roxygen <https://cran.r-project.org/web/packages/roxygen2/vignettes/roxygen2.html>`_ for documenting the R package.
Rmarkdown Vignettes
===================
Rmarkdown vignettes are placed in `R-package/vignettes <https://github.com/dmlc/xgboost/tree/master/R-package/vignettes>`_.
These Rmarkdown files are not compiled. We host the compiled version on `doc/R-package <https://github.com/dmlc/xgboost/tree/master/doc/R-package>`_.
The following steps are followed to add a new Rmarkdown vignettes:
- Add the original rmarkdown to ``R-package/vignettes``.
- Modify ``doc/R-package/Makefile`` to add the markdown files to be build.
- Clone the `dmlc/web-data <https://github.com/dmlc/web-data>`_ repo to folder ``doc``.
- Now type the following command on ``doc/R-package``:
.. code-block:: bash
make the-markdown-to-make.md
- This will generate the markdown, as well as the figures in ``doc/web-data/xgboost/knitr``.
- Modify the ``doc/R-package/index.md`` to point to the generated markdown.
- Add the generated figure to the ``dmlc/web-data`` repo.
- If you already cloned the repo to doc, this means ``git add``
- Create PR for both the markdown and ``dmlc/web-data``.
- You can also build the document locally by typing the following command at the ``doc`` directory:
.. code-block:: bash
make html
The reason we do this is to avoid exploded repo size due to generated images.
R package versioning
====================
See :ref:`release`.
Registering native routines in R
================================
According to `R extension manual <https://cran.r-project.org/doc/manuals/r-release/R-exts.html#Registering-native-routines>`_,
it is good practice to register native routines and to disable symbol search. When any changes or additions are made to the
C++ interface of the R package, please make corresponding changes in ``src/init.c`` as well.
*********************************
Running Formatting Checks Locally
*********************************
Once you submit a pull request to `dmlc/xgboost <https://github.com/dmlc/xgboost>`_, we perform
two automatic checks to enforce coding style conventions. To expedite the code review process, you are encouraged to run the checks locally on your machine prior to submitting your pull request.
Linter
======
We use `pylint <https://github.com/PyCQA/pylint>`_ and `cpplint <https://github.com/cpplint/cpplint>`_ to enforce style convention and find potential errors. Linting is especially useful for Python, as we can catch many errors that would have otherwise occured at run-time.
To run this check locally, run the following command from the top level source tree:
.. code-block:: bash
cd /path/to/xgboost/
make lint
This command requires the Python packages pylint and cpplint.
Clang-tidy
==========
`Clang-tidy <https://clang.llvm.org/extra/clang-tidy/>`_ is an advance linter for C++ code, made by the LLVM team. We use it to conform our C++ codebase to modern C++ practices and conventions.
To run this check locally, run the following command from the top level source tree:
.. code-block:: bash
cd /path/to/xgboost/
python3 tests/ci_build/tidy.py
Also, the script accepts two optional integer arguments, namely ``--cpp`` and ``--cuda``. By default they are both set to 1, meaning that both C++ and CUDA code will be checked. If the CUDA toolkit is not installed on your machine, you'll encounter an error. To exclude CUDA source from linting, use:
.. code-block:: bash
cd /path/to/xgboost/
python3 tests/ci_build/tidy.py --cuda=0
Similarly, if you want to exclude C++ source from linting:
.. code-block:: bash
cd /path/to/xgboost/
python3 tests/ci_build/tidy.py --cpp=0

View File

@@ -1,33 +0,0 @@
.. _community_guide:
XGBoost Community Guideline
===========================
XGBoost adopts the Apache style model and governs by merit. We believe that it is important to create an inclusive community where everyone can use, contribute to, and influence the direction of the project. See `CONTRIBUTORS.md <https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md>`_ for the current list of contributors.
General Development Process
---------------------------
Everyone in the community is welcomed to send patches, documents, and propose new directions to the project. The key guideline here is to enable everyone in the community to get involved and participate the decision and development. When major changes are proposed, an RFC should be sent to allow discussion by the community. We encourage public discussion, archivable channels such as issues and discuss forum, so that everyone in the community can participate and review the process later.
Code reviews are one of the key ways to ensure the quality of the code. High-quality code reviews prevent technical debt for long-term and are crucial to the success of the project. A pull request needs to be reviewed before it gets merged. A committer who has the expertise of the corresponding area would moderate the pull request and the merge the code when it is ready. The corresponding committer could request multiple reviewers who are familiar with the area of the code. We encourage contributors to request code reviews themselves and help review each other's code -- remember everyone is volunteering their time to the community, high-quality code review itself costs as much as the actual code contribution, you could get your code quickly reviewed if you do others the same favor.
The community should strive to reach a consensus on technical decisions through discussion. We expect committers and PMCs to moderate technical discussions in a diplomatic way, and provide suggestions with clear technical reasoning when necessary.
Committers
----------
Committers are individuals who are granted the write access to the project. A committer is usually responsible for a certain area or several areas of the code where they oversee the code review process. The area of contribution can take all forms, including code contributions and code reviews, documents, education, and outreach. Committers are essential for a high quality and healthy project. The community actively look for new committers from contributors. Here is a list of useful traits that help the community to recognize potential committers:
- Sustained contribution to the project, demonstrated by discussion over RFCs, code reviews and proposals of new features, and other development activities. Being familiar with, and being able to take ownership on one or several areas of the project.
- Quality of contributions: High-quality, readable code contributions indicated by pull requests that can be merged without a substantial code review. History of creating clean, maintainable code and including good test cases. Informative code reviews to help other contributors that adhere to a good standard.
- Community involvement: active participation in the discussion forum, promote the projects via tutorials, talks and outreach. We encourage committers to collaborate broadly, e.g. do code reviews and discuss designs with community members that they do not interact physically.
The Project Management Committee(PMC) consists group of active committers that moderate the discussion, manage the project release, and proposes new committer/PMC members. Potential candidates are usually proposed via an internal discussion among PMCs, followed by a consensus approval, i.e. least 3 +1 votes, and no vetoes. Any veto must be accompanied by reasoning. PMCs should serve the community by upholding the community practices and guidelines XGBoost a better community for everyone. PMCs should strive to only nominate new candidates outside of their own organization.
Reviewers
---------
Reviewers are individuals who actively contributed to the project and are willing to participate in the code review of new contributions. We identify reviewers from active contributors. The committers should explicitly solicit reviews from reviewers. High-quality code reviews prevent technical debt for long-term and are crucial to the success of the project. A pull request to the project has to be reviewed by at least one reviewer in order to be merged.

View File

@@ -1,30 +0,0 @@
##########################
Documentation and Examples
##########################
**Contents**
.. contents::
:backlinks: none
:local:
*********
Documents
*********
* Documentation is built using `Sphinx <http://www.sphinx-doc.org/en/master/>`_.
* Each document is written in `reStructuredText <http://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`_.
* You can build document locally to see the effect, by running
.. code-block:: bash
make html
inside the ``doc/`` directory.
********
Examples
********
* Use cases and examples will be in `demo <https://github.com/dmlc/xgboost/tree/master/demo>`_.
* We are super excited to hear about your story, if you have blogposts,
tutorials code solutions using XGBoost, please tell us and we will add
a link in the example pages.

View File

@@ -1,76 +0,0 @@
###################
Git Workflow Howtos
###################
**Contents**
.. contents::
:backlinks: none
:local:
***********************************
How to resolve conflict with master
***********************************
- First rebase to most recent master
.. code-block:: bash
# The first two steps can be skipped after you do it once.
git remote add upstream https://github.com/dmlc/xgboost
git fetch upstream
git rebase upstream/master
- The git may show some conflicts it cannot merge, say ``conflicted.py``.
- Manually modify the file to resolve the conflict.
- After you resolved the conflict, mark it as resolved by
.. code-block:: bash
git add conflicted.py
- Then you can continue rebase by
.. code-block:: bash
git rebase --continue
- Finally push to your fork, you may need to force push here.
.. code-block:: bash
git push --force
****************************************
How to combine multiple commits into one
****************************************
Sometimes we want to combine multiple commits, especially when later commits are only fixes to previous ones,
to create a PR with set of meaningful commits. You can do it by following steps.
- Before doing so, configure the default editor of git if you haven't done so before.
.. code-block:: bash
git config core.editor the-editor-you-like
- Assume we want to merge last 3 commits, type the following commands
.. code-block:: bash
git rebase -i HEAD~3
- It will pop up an text editor. Set the first commit as ``pick``, and change later ones to ``squash``.
- After you saved the file, it will pop up another text editor to ask you modify the combined commit message.
- Push the changes to your fork, you need to force push.
.. code-block:: bash
git push --force
*************************************
What is the consequence of force push
*************************************
The previous two tips requires force push, this is because we altered the path of the commits.
It is fine to force push to your own fork, as long as the commits changed are only yours.

View File

@@ -1,28 +0,0 @@
#####################
Contribute to XGBoost
#####################
XGBoost has been developed by community members. Everyone is welcome to contribute. We value all forms of contributions, including, but not limited to:
* Code reviews for pull requests
* Documentation and usage examples
* Community participation in forums and issues
* Code readability and developer guide
- We welcome contributions that add code comments to improve readability.
- We also welcome contributions to docs to explain the design choices of the XGBoost internals.
* Test cases to make the codebase more robust.
* Tutorials, blog posts, talks that promote the project.
Here are guidelines for contributing to various aspect of the XGBoost project:
.. toctree::
:maxdepth: 2
Community Guideline <community>
coding_guide
unit_tests
Docs and Examples <docs>
git_guide
release

View File

@@ -1,13 +0,0 @@
.. _release:
XGBoost Release Policy
=======================
Versioning Policy
---------------------------
Starting from XGBoost 1.0.0, each XGBoost release will be versioned as [MAJOR].[FEATURE].[MAINTENANCE]
* MAJOR: We gurantee the API compatibility across releases with the same major version number. We expect to have a 1+ years development period for a new MAJOR release version.
* FEATURE: We ship new features, improvements and bug fixes through feature releases. The cycle length of a feature is decided by the size of feature roadmap. The roadmap is decided right after the previous release.
* MAINTENANCE: Maintenance version only contains bug fixes. This type of release only occurs when we found significant correctness and/or performance bugs and barrier for users to upgrade to a new version of XGBoost smoothly.

View File

@@ -1,179 +0,0 @@
########################
Adding and running tests
########################
A high-quality suite of tests is crucial in ensuring correctness and robustness of the codebase. Here, we provide instructions how to run unit tests, and also how to add a new one.
**Contents**
.. contents::
:backlinks: none
:local:
**********************
Adding a new unit test
**********************
Python package: pytest
======================
Add your test under the directory `tests/python/ <https://github.com/dmlc/xgboost/tree/master/tests/python>`_ or `tests/python-gpu/ <https://github.com/dmlc/xgboost/tree/master/tests/python-gpu>`_ (if you are testing GPU code). Refer to `the PyTest tutorial <https://docs.pytest.org/en/latest/getting-started.html>`_ to learn how to write tests for Python code.
You may try running your test by following instructions in :ref:`this section <running_pytest>`.
C++: Google Test
================
Add your test under the directory `tests/cpp/ <https://github.com/dmlc/xgboost/tree/master/tests/cpp>`_. Refer to `this excellent tutorial on using Google Test <https://developer.ibm.com/articles/au-googletestingframework/>`_.
You may try running your test by following instructions in :ref:`this section <running_gtest>`. Note. Google Test version 1.8.1 or later is required.
JVM packages: JUnit / scalatest
===============================
The JVM packages for XGBoost (XGBoost4J / XGBoost4J-Spark) use `the Maven Standard Directory Layout <https://maven.apache.org/guides/introduction/introduction-to-the-standard-directory-layout.html>`_. Specifically, the tests for the JVM packages are located in the following locations:
* `jvm-packages/xgboost4j/src/test/ <https://github.com/dmlc/xgboost/tree/master/jvm-packages/xgboost4j/src/test>`_
* `jvm-packages/xgboost4j-spark/src/test/ <https://github.com/dmlc/xgboost/tree/master/jvm-packages/xgboost4j-spark/src/test>`_
To write a test for Java code, see `JUnit 5 tutorial <https://junit.org/junit5/docs/current/user-guide/>`_.
To write a test for Scala, see `Scalatest tutorial <http://www.scalatest.org/user_guide/writing_your_first_test>`_.
You may try running your test by following instructions in :ref:`this section <running_jvm_tests>`.
R package: testthat
===================
Add your test under the directory `R-package/tests/testthat <https://github.com/dmlc/xgboost/tree/master/R-package/tests/testthat>`_. Refer to `this excellent tutorial on testthat <https://kbroman.org/pkg_primer/pages/tests.html>`_.
You may try running your test by following instructions in :ref:`this section <running_r_tests>`.
**************************
Running Unit Tests Locally
**************************
.. _running_r_tests:
R package
=========
Run
.. code-block:: bash
make Rcheck
at the root of the project directory.
.. _running_jvm_tests:
JVM packages
============
As part of the building process, tests are run:
.. code-block:: bash
mvn package
.. _running_pytest:
Python package: pytest
======================
To run Python unit tests, first install `pytest <https://docs.pytest.org/en/latest/contents.html>`_ package:
.. code:: bash
pip3 install pytest
Then compile XGBoost according to instructions in :ref:`build_shared_lib`. Finally, invoke pytest at the project root directory:
.. code:: bash
# Tell Python where to find XGBoost module
export PYTHONPATH=./python-package
pytest -v -s --fulltrace tests/python
In addition, to test CUDA code, run:
.. code:: bash
# Tell Python where to find XGBoost module
export PYTHONPATH=./python-package
pytest -v -s --fulltrace tests/python-gpu
(For this step, you should have compiled XGBoost with CUDA enabled.)
.. _running_gtest:
C++: Google Test
================
To build and run C++ unit tests enable tests while running CMake:
.. code-block:: bash
mkdir build
cd build
cmake -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON ..
make
make test
To enable tests for CUDA code, add ``-DUSE_CUDA=ON`` and ``-DUSE_NCCL=ON`` (CUDA toolkit required):
.. code-block:: bash
mkdir build
cd build
cmake -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON -DUSE_CUDA=ON -DUSE_NCCL=ON ..
make
make test
One can also run all unit test using ctest tool which provides higher flexibility. For example:
.. code-block:: bash
ctest --verbose
***********************************************
Sanitizers: Detect memory errors and data races
***********************************************
By default, sanitizers are bundled in GCC and Clang/LLVM. One can enable
sanitizers with GCC >= 4.8 or LLVM >= 3.1, But some distributions might package
sanitizers separately. Here is a list of supported sanitizers with
corresponding library names:
- Address sanitizer: libasan
- Leak sanitizer: liblsan
- Thread sanitizer: libtsan
Memory sanitizer is exclusive to LLVM, hence not supported in XGBoost.
How to build XGBoost with sanitizers
====================================
One can build XGBoost with sanitizer support by specifying -DUSE_SANITIZER=ON.
By default, address sanitizer and leak sanitizer are used when you turn the
USE_SANITIZER flag on. You can always change the default by providing a
semicolon separated list of sanitizers to ENABLED_SANITIZERS. Note that thread
sanitizer is not compatible with the other two sanitizers.
.. code-block:: bash
cmake -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak" /path/to/xgboost
By default, CMake will search regular system paths for sanitizers, you can also
supply a specified SANITIZER_PATH.
.. code-block:: bash
cmake -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak" \
-DSANITIZER_PATH=/path/to/sanitizers /path/to/xgboost
How to use sanitizers with CUDA support
=======================================
Runing XGBoost on CUDA with address sanitizer (asan) will raise memory error.
To use asan with CUDA correctly, you need to configure asan via ASAN_OPTIONS
environment variable:
.. code-block:: bash
ASAN_OPTIONS=protect_shadow_gap=0 ${BUILD_DIR}/testxgboost
For details, please consult `official documentation <https://github.com/google/sanitizers/wiki>`_ for sanitizers.

290
doc/contribute.rst Normal file
View File

@@ -0,0 +1,290 @@
#####################
Contribute to XGBoost
#####################
XGBoost has been developed and used by a group of active community members.
Everyone is more than welcome to contribute. It is a way to make the project better and more accessible to more users.
- Please add your name to `CONTRIBUTORS.md <https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md>`_ after your patch has been merged.
- Please also update `NEWS.md <https://github.com/dmlc/xgboost/blob/master/NEWS.md>`_ to add note on your changes to the API or XGBoost documentation.
**Guidelines**
* `Submit Pull Request`_
* `Git Workflow Howtos`_
- `How to resolve conflict with master`_
- `How to combine multiple commits into one`_
- `What is the consequence of force push`_
* `Documents`_
* `Testcases`_
* `Sanitizers`_
* `clang-tidy`_
* `Examples`_
* `Core Library`_
* `Python Package`_
* `R Package`_
*******************
Submit Pull Request
*******************
* Before submit, please rebase your code on the most recent version of master, you can do it by
.. code-block:: bash
git remote add upstream https://github.com/dmlc/xgboost
git fetch upstream
git rebase upstream/master
* If you have multiple small commits,
it might be good to merge them together(use git rebase then squash) into more meaningful groups.
* Send the pull request!
- Fix the problems reported by automatic checks
- If you are contributing a new module, consider add a testcase in `tests <https://github.com/dmlc/xgboost/tree/master/tests>`_.
*******************
Git Workflow Howtos
*******************
How to resolve conflict with master
===================================
- First rebase to most recent master
.. code-block:: bash
# The first two steps can be skipped after you do it once.
git remote add upstream https://github.com/dmlc/xgboost
git fetch upstream
git rebase upstream/master
- The git may show some conflicts it cannot merge, say ``conflicted.py``.
- Manually modify the file to resolve the conflict.
- After you resolved the conflict, mark it as resolved by
.. code-block:: bash
git add conflicted.py
- Then you can continue rebase by
.. code-block:: bash
git rebase --continue
- Finally push to your fork, you may need to force push here.
.. code-block:: bash
git push --force
How to combine multiple commits into one
========================================
Sometimes we want to combine multiple commits, especially when later commits are only fixes to previous ones,
to create a PR with set of meaningful commits. You can do it by following steps.
- Before doing so, configure the default editor of git if you haven't done so before.
.. code-block:: bash
git config core.editor the-editor-you-like
- Assume we want to merge last 3 commits, type the following commands
.. code-block:: bash
git rebase -i HEAD~3
- It will pop up an text editor. Set the first commit as ``pick``, and change later ones to ``squash``.
- After you saved the file, it will pop up another text editor to ask you modify the combined commit message.
- Push the changes to your fork, you need to force push.
.. code-block:: bash
git push --force
What is the consequence of force push
=====================================
The previous two tips requires force push, this is because we altered the path of the commits.
It is fine to force push to your own fork, as long as the commits changed are only yours.
*********
Documents
*********
* Documentation is built using sphinx.
* Each document is written in `reStructuredText <http://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`_.
* You can build document locally to see the effect.
*********
Testcases
*********
* All the testcases are in `tests <https://github.com/dmlc/xgboost/tree/master/tests>`_.
* We use python nose for python test cases.
**********
Sanitizers
**********
By default, sanitizers are bundled in GCC and Clang/LLVM. One can enable
sanitizers with GCC >= 4.8 or LLVM >= 3.1, But some distributions might package
sanitizers separately. Here is a list of supported sanitizers with
corresponding library names:
- Address sanitizer: libasan
- Leak sanitizer: liblsan
- Thread sanitizer: libtsan
Memory sanitizer is exclusive to LLVM, hence not supported in XGBoost.
How to build XGBoost with sanitizers
====================================
One can build XGBoost with sanitizer support by specifying -DUSE_SANITIZER=ON.
By default, address sanitizer and leak sanitizer are used when you turn the
USE_SANITIZER flag on. You can always change the default by providing a
semicolon separated list of sanitizers to ENABLED_SANITIZERS. Note that thread
sanitizer is not compatible with the other two sanitizers.
.. code-block:: bash
cmake -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak" /path/to/xgboost
By default, CMake will search regular system paths for sanitizers, you can also
supply a specified SANITIZER_PATH.
.. code-block:: bash
cmake -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak" \
-DSANITIZER_PATH=/path/to/sanitizers /path/to/xgboost
How to use sanitizers with CUDA support
=======================================
Runing XGBoost on CUDA with address sanitizer (asan) will raise memory error.
To use asan with CUDA correctly, you need to configure asan via ASAN_OPTIONS
environment variable:
.. code-block:: bash
ASAN_OPTIONS=protect_shadow_gap=0 ${BUILD_DIR}/testxgboost
For details, please consult `official documentation <https://github.com/google/sanitizers/wiki>`_ for sanitizers.
**********
clang-tidy
**********
To run clang-tidy on both C++ and CUDA source code, run the following command
from the top level source tree:
.. code-black:: bash
cd /path/to/xgboost/
python3 tests/ci_build/tidy.py --gtest-path=/path/to/google-test
The script requires the full path of Google Test library via the ``--gtest-path`` argument.
Also, the script accepts two optional integer arguments, namely ``--cpp`` and ``--cuda``.
By default they are both set to 1. If you want to exclude CUDA source from
linting, use:
.. code-black:: bash
cd /path/to/xgboost/
python3 tests/ci_build/tidy.py --cuda=0
Similarly, if you want to exclude C++ source from linting:
.. code-black:: bash
cd /path/to/xgboost/
python3 tests/ci_build/tidy.py --cpp=0
********
Examples
********
* Usecases and examples will be in `demo <https://github.com/dmlc/xgboost/tree/master/demo>`_.
* We are super excited to hear about your story, if you have blogposts,
tutorials code solutions using XGBoost, please tell us and we will add
a link in the example pages.
************
Core Library
************
- Follow `Google style for C++ <https://google.github.io/styleguide/cppguide.html>`_.
- Use C++11 features such as smart pointers, braced initializers, lambda functions, and ``std::thread``.
- We use Doxygen to document all the interface code.
- You can reproduce the linter checks by running ``make lint``
**************
Python Package
**************
- Always add docstring to the new functions in numpydoc format.
- You can reproduce the linter checks by typing ``make lint``
*********
R Package
*********
Code Style
==========
- We follow Google's C++ Style guide for C++ code.
- This is mainly to be consistent with the rest of the project.
- Another reason is we will be able to check style automatically with a linter.
- You can check the style of the code by typing the following command at root folder.
.. code-block:: bash
make rcpplint
- When needed, you can disable the linter warning of certain line with ```// NOLINT(*)``` comments.
- We use `roxygen <https://cran.r-project.org/web/packages/roxygen2/vignettes/roxygen2.html>`_ for documenting the R package.
Rmarkdown Vignettes
===================
Rmarkdown vignettes are placed in `R-package/vignettes <https://github.com/dmlc/xgboost/tree/master/R-package/vignettes>`_.
These Rmarkdown files are not compiled. We host the compiled version on `doc/R-package <https://github.com/dmlc/xgboost/tree/master/doc/R-package>`_.
The following steps are followed to add a new Rmarkdown vignettes:
- Add the original rmarkdown to ``R-package/vignettes``.
- Modify ``doc/R-package/Makefile`` to add the markdown files to be build.
- Clone the `dmlc/web-data <https://github.com/dmlc/web-data>`_ repo to folder ``doc``.
- Now type the following command on ``doc/R-package``:
.. code-block:: bash
make the-markdown-to-make.md
- This will generate the markdown, as well as the figures in ``doc/web-data/xgboost/knitr``.
- Modify the ``doc/R-package/index.md`` to point to the generated markdown.
- Add the generated figure to the ``dmlc/web-data`` repo.
- If you already cloned the repo to doc, this means ``git add``
- Create PR for both the markdown and ``dmlc/web-data``.
- You can also build the document locally by typing the following command at the ``doc`` directory:
.. code-block:: bash
make html
The reason we do this is to avoid exploded repo size due to generated images.
R package versioning
====================
Since version 0.6.4.3, we have adopted a versioning system that uses x.y.z (or ``core_major.core_minor.cran_release``)
format for CRAN releases and an x.y.z.p (or ``core_major.core_minor.cran_release.patch``) format for development patch versions.
This approach is similar to the one described in Yihui Xie's
`blog post on R Package Versioning <https://yihui.name/en/2013/06/r-package-versioning/>`_,
except we need an additional field to accomodate the x.y core library version.
Each new CRAN release bumps up the 3rd field, while developments in-between CRAN releases
would be marked by an additional 4th field on the top of an existing CRAN release version.
Some additional consideration is needed when the core library version changes.
E.g., after the core changes from 0.6 to 0.7, the R package development version would become 0.7.0.1, working towards
a 0.7.1 CRAN release. The 0.7.0 would not be released to CRAN, unless it would require almost no additional development.
Registering native routines in R
================================
According to `R extension manual <https://cran.r-project.org/doc/manuals/r-release/R-exts.html#Registering-native-routines>`_,
it is good practice to register native routines and to disable symbol search. When any changes or additions are made to the
C++ interface of the R package, please make corresponding changes in ``src/init.c`` as well.

View File

@@ -24,7 +24,7 @@ Python
dtrain = xgb.DMatrix('demo/data/agaricus.txt.train')
dtest = xgb.DMatrix('demo/data/agaricus.txt.test')
# specify parameters via map
param = {'max_depth':2, 'eta':1, 'objective':'binary:logistic' }
param = {'max_depth':2, 'eta':1, 'silent':1, 'objective':'binary:logistic' }
num_round = 2
bst = xgb.train(param, dtrain, num_round)
# make prediction

View File

@@ -5,10 +5,10 @@ XGBoost GPU Support
This page contains information about GPU algorithms supported in XGBoost.
To install GPU support, checkout the :doc:`/build`.
.. note:: CUDA 9.0, Compute Capability 3.5 required
.. note:: CUDA 8.0, Compute Capability 3.5 required
The GPU algorithms in XGBoost require a graphics card with compute capability 3.5 or higher, with
CUDA toolkits 9.0 or later.
CUDA toolkits 8.0 or later.
(See `this list <https://en.wikipedia.org/wiki/CUDA#GPUs_supported>`_ to look up compute capability of your GPU card.)
*********************************************
@@ -23,11 +23,13 @@ Specify the ``tree_method`` parameter as one of the following algorithms.
Algorithms
----------
+-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| tree_method | Description |
+=======================+=======================================================================================================================================================================+
| gpu_hist | Equivalent to the XGBoost fast histogram algorithm. Much faster and uses considerably less memory. NOTE: Will run very slowly on GPUs older than Pascal architecture. |
+-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+--------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| tree_method | Description |
+==============+=======================================================================================================================================================================+
| gpu_exact | The standard XGBoost tree construction algorithm. Performs exact search for splits. Slower and uses considerably more memory than ``gpu_hist``. |
+--------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| gpu_hist | Equivalent to the XGBoost fast histogram algorithm. Much faster and uses considerably less memory. NOTE: Will run very slowly on GPUs older than Pascal architecture. |
+--------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
Supported parameters
--------------------
@@ -35,41 +37,41 @@ Supported parameters
.. |tick| unicode:: U+2714
.. |cross| unicode:: U+2718
+--------------------------------+--------------+
| parameter | ``gpu_hist`` |
+================================+==============+
| ``subsample`` | |tick| |
+--------------------------------+--------------+
| ``colsample_bytree`` | |tick| |
+--------------------------------+--------------+
| ``colsample_bylevel`` | |tick| |
+--------------------------------+--------------+
| ``max_bin`` | |tick| |
+--------------------------------+--------------+
| ``gamma`` | |tick| |
+--------------------------------+--------------+
| ``gpu_id`` | |tick| |
+--------------------------------+--------------+
| ``n_gpus`` (deprecated) | |tick| |
+--------------------------------+--------------+
| ``predictor`` | |tick| |
+--------------------------------+--------------+
| ``grow_policy`` | |tick| |
+--------------------------------+--------------+
| ``monotone_constraints`` | |tick| |
+--------------------------------+--------------+
| ``interaction_constraints`` | |tick| |
+--------------------------------+--------------+
| ``single_precision_histogram`` | |tick| |
+--------------------------------+--------------+
+--------------------------------+---------------+--------------+
| parameter | ``gpu_exact`` | ``gpu_hist`` |
+================================+===============+==============+
| ``subsample`` | |cross| | |tick| |
+--------------------------------+---------------+--------------+
| ``colsample_bytree`` | |cross| | |tick| |
+--------------------------------+---------------+--------------+
| ``colsample_bylevel`` | |cross| | |tick| |
+--------------------------------+---------------+--------------+
| ``max_bin`` | |cross| | |tick| |
+--------------------------------+---------------+--------------+
| ``gpu_id`` | |tick| | |tick| |
+--------------------------------+---------------+--------------+
| ``n_gpus`` | |cross| | |tick| |
+--------------------------------+---------------+--------------+
| ``predictor`` | |tick| | |tick| |
+--------------------------------+---------------+--------------+
| ``grow_policy`` | |cross| | |tick| |
+--------------------------------+---------------+--------------+
| ``monotone_constraints`` | |cross| | |tick| |
+--------------------------------+---------------+--------------+
| ``single_precision_histogram`` | |cross| | |tick| |
+--------------------------------+---------------+--------------+
GPU accelerated prediction is enabled by default for the above mentioned ``tree_method`` parameters but can be switched to CPU prediction by setting ``predictor`` to ``cpu_predictor``. This could be useful if you want to conserve GPU memory. Likewise when using CPU algorithms, GPU accelerated prediction can be enabled by setting ``predictor`` to ``gpu_predictor``.
The experimental parameter ``single_precision_histogram`` can be set to True to enable building histograms using single precision. This may improve speed, in particular on older architectures.
The device ordinal (which GPU to use if you have many of them) can be selected using the
``gpu_id`` parameter, which defaults to 0 (the first device reported by CUDA runtime).
The device ordinal can be selected using the ``gpu_id`` parameter, which defaults to 0.
Multiple GPUs can be used with the ``gpu_hist`` tree method using the ``n_gpus`` parameter. which defaults to 1. If this is set to -1 all available GPUs will be used. If ``gpu_id`` is specified as non-zero, the selected gpu devices will be from ``gpu_id`` to ``gpu_id+n_gpus``, please note that ``gpu_id+n_gpus`` must be less than or equal to the number of available GPUs on your system. As with GPU vs. CPU, multi-GPU will not always be faster than a single GPU due to PCI bus bandwidth that can limit performance.
.. note:: Enabling multi-GPU training
Default installation may not enable multi-GPU training. To use multiple GPUs, make sure to read :ref:`build_gpu_support`.
The GPU algorithms currently work with CLI, Python and R packages. See :doc:`/build` for details.
@@ -77,88 +79,73 @@ The GPU algorithms currently work with CLI, Python and R packages. See :doc:`/bu
:caption: Python example
param['gpu_id'] = 0
param['max_bin'] = 16
param['tree_method'] = 'gpu_hist'
.. code-block:: python
:caption: With Scikit-Learn interface
XGBRegressor(tree_method='gpu_hist', gpu_id=0)
Single Node Multi-GPU
=====================
.. note:: Single node multi-GPU training with `n_gpus` parameter is deprecated after 0.90. Please use distributed GPU training with one process per GPU.
Multi-node Multi-GPU Training
=============================
XGBoost supports fully distributed GPU training using `Dask <https://dask.org/>`_. For
getting started see our tutorial :doc:`/tutorials/dask` and worked examples `here
<https://github.com/dmlc/xgboost/tree/master/demo/dask>`_, also Python documentation
:ref:`dask_api` for complete reference.
Objective functions
===================
Most of the objective functions implemented in XGBoost can be run on GPU. Following table shows current support status.
+--------------------+-------------+
| Objectives | GPU support |
+--------------------+-------------+
| reg:squarederror | |tick| |
+--------------------+-------------+
| reg:squaredlogerror| |tick| |
+--------------------+-------------+
| reg:logistic | |tick| |
+--------------------+-------------+
| binary:logistic | |tick| |
+--------------------+-------------+
| binary:logitraw | |tick| |
+--------------------+-------------+
| binary:hinge | |tick| |
+--------------------+-------------+
| count:poisson | |tick| |
+--------------------+-------------+
| reg:gamma | |tick| |
+--------------------+-------------+
| reg:tweedie | |tick| |
+--------------------+-------------+
| multi:softmax | |tick| |
+--------------------+-------------+
| multi:softprob | |tick| |
+--------------------+-------------+
| survival:cox | |cross| |
+--------------------+-------------+
| rank:pairwise | |cross| |
+--------------------+-------------+
| rank:ndcg | |cross| |
+--------------------+-------------+
| rank:map | |cross| |
+--------------------+-------------+
.. |tick| unicode:: U+2714
.. |cross| unicode:: U+2718
Objective will run on GPU if GPU updater (``gpu_hist``), otherwise they will run on CPU by
default. For unsupported objectives XGBoost will fall back to using CPU implementation by
default.
+-----------------+-------------+
| Objectives | GPU support |
+-----------------+-------------+
| reg:squarederror| |tick| |
+-----------------+-------------+
| reg:logistic | |tick| |
+-----------------+-------------+
| binary:logistic | |tick| |
+-----------------+-------------+
| binary:logitraw | |tick| |
+-----------------+-------------+
| binary:hinge | |tick| |
+-----------------+-------------+
| count:poisson | |tick| |
+-----------------+-------------+
| reg:gamma | |tick| |
+-----------------+-------------+
| reg:tweedie | |tick| |
+-----------------+-------------+
| multi:softmax | |tick| |
+-----------------+-------------+
| multi:softprob | |tick| |
+-----------------+-------------+
| survival:cox | |cross| |
+-----------------+-------------+
| rank:pairwise | |cross| |
+-----------------+-------------+
| rank:ndcg | |cross| |
+-----------------+-------------+
| rank:map | |cross| |
+-----------------+-------------+
For multi-gpu support, objective functions also honor the ``n_gpus`` parameter,
which, by default is set to 1. To disable running objectives on GPU, just set
``n_gpus`` to 0.
Metric functions
===================
Following table shows current support status for evaluation metrics on the GPU.
.. |tick| unicode:: U+2714
.. |cross| unicode:: U+2718
+-----------------+-------------+
| Metric | GPU Support |
+=================+=============+
| rmse | |tick| |
+-----------------+-------------+
| rmsle | |tick| |
+-----------------+-------------+
| mae | |tick| |
+-----------------+-------------+
| logloss | |tick| |
+-----------------+-------------+
| error | |tick| |
+-----------------+-------------+
| merror | |tick| |
| merror | |cross| |
+-----------------+-------------+
| mlogloss | |tick| |
| mlogloss | |cross| |
+-----------------+-------------+
| auc | |cross| |
+-----------------+-------------+
@@ -179,8 +166,10 @@ Following table shows current support status for evaluation metrics on the GPU.
| tweedie-nloglik | |tick| |
+-----------------+-------------+
Similar to objective functions, default device for metrics is selected based on tree
updater and predictor (which is selected based on tree updater).
As for objective functions, metrics honor the ``n_gpus`` parameter,
which, by default is set to 1. To disable running metrics on GPU, just set
``n_gpus`` to 0.
Benchmarks
==========
@@ -199,30 +188,15 @@ Training time time on 1,000,000 rows x 50 columns with 500 boosting iterations a
+--------------+----------+
| hist | 63.55 |
+--------------+----------+
| gpu_exact | 161.08 |
+--------------+----------+
| exact | 1082.20 |
+--------------+----------+
See `GPU Accelerated XGBoost <https://xgboost.ai/2016/12/14/GPU-accelerated-xgboost.html>`_ and `Updates to the XGBoost GPU algorithms <https://xgboost.ai/2018/07/04/gpu-xgboost-update.html>`_ for additional performance benchmarks of the ``gpu_hist`` tree method.
Memory usage
============
The following are some guidelines on the device memory usage of the `gpu_hist` updater.
If you train xgboost in a loop you may notice xgboost is not freeing device memory after each training iteration. This is because memory is allocated over the lifetime of the booster object and does not get freed until the booster is freed. A workaround is to serialise the booster object after training. See `demo/gpu_acceleration/memory.py` for a simple example.
Memory inside xgboost training is generally allocated for two reasons - storing the dataset and working memory.
The dataset itself is stored on device in a compressed ELLPACK format. The ELLPACK format is a type of sparse matrix that stores elements with a constant row stride. This format is convenient for parallel computation when compared to CSR because the row index of each element is known directly from its address in memory. The disadvantage of the ELLPACK format is that it becomes less memory efficient if the maximum row length is significantly more than the average row length. Elements are quantised and stored as integers. These integers are compressed to a minimum bit length. Depending on the number of features, we usually don't need the full range of a 32 bit integer to store elements and so compress this down. The compressed, quantised ELLPACK format will commonly use 1/4 the space of a CSR matrix stored in floating point.
In some cases the full CSR matrix stored in floating point needs to be allocated on the device. This currently occurs for prediction in multiclass classification. If this is a problem consider setting `'predictor'='cpu_predictor'`. This also occurs when the external data itself comes from a source on device e.g. a cudf DataFrame. These are known issues we hope to resolve.
Working memory is allocated inside the algorithm proportional to the number of rows to keep track of gradients, tree positions and other per row statistics. Memory is allocated for histogram bins proportional to the number of bins, number of features and nodes in the tree. For performance reasons we keep histograms in memory from previous nodes in the tree, when a certain threshold of memory usage is passed we stop doing this to conserve memory at some performance loss.
The quantile finding algorithm also uses some amount of working device memory. It is able to operate in batches, but is not currently well optimised for sparse data.
See `GPU Accelerated XGBoost <https://xgboost.ai/2016/12/14/GPU-accelerated-xgboost.html>`_ and `Updates to the XGBoost GPU algorithms <https://xgboost.ai/2018/07/04/gpu-xgboost-update.html>`_ for additional performance benchmarks of the ``gpu_exact`` and ``gpu_hist`` tree methods.
Developer notes
===============
==========
The application may be profiled with annotations by specifying USE_NTVX to cmake and providing the path to the stand-alone nvtx header via NVTX_HEADER_DIR. Regions covered by the 'Monitor' class in cuda code will automatically appear in the nsight profiler.
**********
@@ -233,9 +207,8 @@ References
`Nvidia Parallel Forall: Gradient Boosting, Decision Trees and XGBoost with CUDA <https://devblogs.nvidia.com/parallelforall/gradient-boosting-decision-trees-xgboost-cuda/>`_
Contributors
============
=======
Many thanks to the following contributors (alphabetical order):
* Andrey Adinets
* Jiaming Yuan
* Jonathan C. McKinney
@@ -245,4 +218,4 @@ Many thanks to the following contributors (alphabetical order):
* Shankara Rao Thejaswi Nanditale
* Vinay Deshpande
Please report bugs to the XGBoost issues list: https://github.com/dmlc/xgboost/issues. For general questions please visit our user form: https://discuss.xgboost.ai/.
Please report bugs to the user forum https://discuss.xgboost.ai/.

View File

@@ -25,9 +25,6 @@ Contents
Python package <python/index>
R package <R-package/index>
JVM package <jvm/index>
Ruby package <https://github.com/ankane/xgb>
Julia package <julia>
C Package <c>
C++ Interface <c++>
CLI interface <cli>
contrib/index
contribute

View File

@@ -20,7 +20,7 @@ Installation
Installation from source
========================
Building XGBoost4J using Maven requires Maven 3 or newer, Java 7+ and CMake 3.3+ for compiling the JNI bindings.
Building XGBoost4J using Maven requires Maven 3 or newer, Java 7+ and CMake 3.2+ for compiling the JNI bindings.
Before you install XGBoost4J, you need to define environment variable ``JAVA_HOME`` as your JDK directory to ensure that your compiler can find ``jni.h`` correctly, since XGBoost4J relies on JNI to implement the interaction between the JVM and native libraries.

View File

@@ -25,27 +25,27 @@ supported.
* Pass arrays to DMatrix constructor to load from sparse matrix.
Suppose we have a sparse matrix
.. code-block:: none
1 0 2 0
4 0 0 3
3 1 2 0
We can express the sparse matrix in `Compressed Sparse Row (CSR) <https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_row_(CSR,_CRS_or_Yale_format)>`_ format:
.. code-block:: java
long[] rowHeaders = new long[] {0,2,4,7};
float[] data = new float[] {1f,2f,4f,3f,3f,1f,2f};
int[] colIndex = new int[] {0,2,0,3,0,1,2};
int numColumn = 4;
DMatrix dmat = new DMatrix(rowHeaders, colIndex, data, DMatrix.SparseType.CSR, numColumn);
... or in `Compressed Sparse Column (CSC) <https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_column_(CSC_or_CCS)>`_ format:
.. code-block:: java
long[] colHeaders = new long[] {0,3,4,6,7};
float[] data = new float[] {1f,4f,3f,1f,2f,2f,3f};
int[] rowIndex = new int[] {0,1,2,2,0,2,1};
@@ -88,6 +88,7 @@ To set parameters, parameters are specified as a Map:
{
put("eta", 1.0);
put("max_depth", 2);
put("silent", 1);
put("objective", "binary:logistic");
put("eval_metric", "logloss");
}
@@ -156,3 +157,4 @@ After training and loading a model, you can use it to make prediction for other
float[][] predicts = booster.predict(dtest);
// predict leaf
float[][] leafPredicts = booster.predictLeaf(dtest, 0);

View File

@@ -1,5 +1,5 @@
#######################################
XGBoost4J-Spark Tutorial (version 0.9+)
XGBoost4J-Spark Tutorial (version 0.8+)
#######################################
**XGBoost4J-Spark** is a project aiming to seamlessly integrate XGBoost and Apache Spark by fitting XGBoost to Apache Spark's MLLIB framework. With the integration, user can not only uses the high-performant algorithm implementation of XGBoost, but also leverages the powerful data processing engine of Spark for:
@@ -139,7 +139,7 @@ we drop the column "class" and only keeps the feature columns and the transforme
The ``fit`` and ``transform`` are two key operations in MLLIB. Basically, ``fit`` produces a "transformer", e.g. StringIndexer, and each transformer applies ``transform`` method on DataFrame to add new column(s) containing transformed features/labels or prediction results, etc. To understand more about ``fit`` and ``transform``, You can find more details in `here <http://spark.apache.org/docs/latest/ml-pipeline.html#pipeline-components>`_.
Similarly, we can use another transformer, `VectorAssembler <https://spark.apache.org/docs/2.4.0/api/java/org/apache/spark/ml/feature/VectorAssembler.html>`_, to assemble feature columns "sepal length", "sepal width", "petal length" and "petal width" as a vector.
Similarly, we can use another transformer, `VectorAssembler <https://spark.apache.org/docs/2.3.1/api/scala/index.html#org.apache.spark.ml.feature.VectorAssembler>`_, to assemble feature columns "sepal length", "sepal width", "petal length" and "petal width" as a vector.
.. code-block:: scala
@@ -154,11 +154,26 @@ Now, we have a DataFrame containing only two columns, "features" which contains
labels. A DataFrame like this (containing vector-represented features and numeric labels) can be fed to XGBoost4J-Spark's training engine directly.
Dealing with missing values
~~~~~~~~~~~~~~~~~~~~~~~~~~~
~~~~~~~~~~~~~~~~~~~~~~
XGBoost supports missing values by default (`as desribed here <https://xgboost.readthedocs.io/en/latest/faq.html#how-to-deal-with-missing-value>`_).
If given a SparseVector, XGBoost will treat any values absent from the SparseVector as missing. You are also able to
specify to XGBoost to treat a specific value in your Dataset as if it was a missing value. By default XGBoost will treat NaN as the value representing missing.
Strategies to handle missing values (and therefore overcome issues as above):
In the case that a feature column contains missing values for any reason (could be related to business logic / wrong data ingestion process / etc.), the user should decide on a strategy of how to handle it.
The choice of approach depends on the value representing 'missing' which fall into four different categories:
1. 0
2. NaN
3. Null
4. Any other value which is not mentioned in (1) / (2) / (3)
We introduce the following approaches dealing with missing value and their fitting scenarios:
1. Skip VectorAssembler (using setHandleInvalid = "skip") directly. Used in (2), (3).
2. Keep it (using setHandleInvalid = "keep"), and set the "missing" parameter in XGBClassifier/XGBRegressor as the value representing missing. Used in (2) and (4).
3. Keep it (using setHandleInvalid = "keep") and transform to other irregular values. Used in (3).
4. Nothing to be done, used in (1).
Then, XGBoost will automatically learn what's the ideal direction to go when a value is missing, based on that value and strategy.
Example of setting a missing value (e.g. -999) to the "missing" parameter in XGBoostClassifier:
@@ -175,39 +190,11 @@ Example of setting a missing value (e.g. -999) to the "missing" parameter in XGB
setFeaturesCol("features").
setLabelCol("classIndex")
.. note:: Missing values with Spark's VectorAssembler
.. note:: Using 0 to represent meaningful value
If given a Dataset with enough features having a value of 0 Spark's VectorAssembler transformer class will return a
SparseVector where the absent values are meant to indicate a value of 0. This conflicts with XGBoost's default to
treat values absent from the SparseVector as missing. The model would effectively be
treating 0 as missing but not declaring that to be so which can lead to confusion when using the trained model on
other platforms. To avoid this, XGBoost will raise an exception if it receives a SparseVector and the "missing"
parameter has not been explicitly set to 0. To workaround this issue the user has three options:
Due to the fact that Spark's VectorAssembler transformer only accepts 0 as a missing values, this one creates a problem when the user has 0 as meaningful value plus there are enough 0's to use SparseVector (However, In case the dataset is represented by a DenseVector, the 0 is kept)
1. Explicitly convert the Vector returned from VectorAssembler to a DenseVector to return the zeros to the dataset. If
doing this with missing values encoded as NaN, you will want to set ``setHandleInvalid = "keep"`` on VectorAssembler
in order to keep the NaN values in the dataset. You would then set the "missing" parameter to whatever you want to be
treated as missing. However this may cause a large amount of memory use if your dataset is very sparse.
2. Before calling VectorAssembler you can transform the values you want to represent missing into an irregular value
that is not 0, NaN, or Null and set the "missing" parameter to 0. The irregular value should ideally be chosen to be
outside the range of values that your features have.
3. Do not use the VectorAssembler class and instead use a custom way of constructing a SparseVector that allows for
specifying sparsity to indicate a non-zero value. You can then set the "missing" parameter to whatever sparsity
indicates in your Dataset. If this approach is taken you can pass the parameter
``"allow_non_zero_for_missing_value" -> true`` to bypass XGBoost's assertion that "missing" must be zero when given a
SparseVector.
Option 1 is recommended if memory constraints are not an issue. Option 3 requires more work to get set up but is
guaranteed to give you correct results while option 2 will be quicker to set up but may be difficult to find a good
irregular value that does not conflict with your feature values.
.. note:: Using a non-default missing value when using other bindings of XGBoost.
When XGBoost is saved in native format only the booster itself is saved, the value of the missing parameter is not
saved alongside the model. Thus, if a non-default missing parameter is used to train the model in Spark the user should
take care to use the same missing parameter when using the saved model in another binding.
In this case, users are also supposed to transform 0 to some other values to avoid the issue.
Training
========
@@ -254,10 +241,10 @@ Early stopping is a feature to prevent the unnecessary training iterations. By s
When it comes to custom eval metrics, in additional to ``num_early_stopping_rounds``, you also need to define ``maximize_evaluation_metrics`` or call ``setMaximizeEvaluationMetrics`` to specify whether you want to maximize or minimize the metrics in training. For built-in eval metrics, XGBoost4J-Spark will automatically select the direction.
For example, we need to maximize the evaluation metrics (set ``maximize_evaluation_metrics`` with true), and set ``num_early_stopping_rounds`` with 5. The evaluation metric of 10th iteration is the maximum one until now. In the following iterations, if there is no evaluation metric greater than the 10th iteration's (best one), the traning would be early stopped at 15th iteration.
For example, we need to maximize the evaluation metrics (set ``maximize_evaluation_metrics`` with true), and set ``num_early_stopping_rounds`` with 5. The evaluation metric of 10th iteration is the maximum one until now. In the following iterations, if there is no evaluation metric greater than the 10th iteration's (best one), the traning would be early stopped at 15th iteration.
Training with Evaluation Sets
-----------------------------
----------------
You can also monitor the performance of the model during training with multiple evaluation datasets. By specifying ``eval_sets`` or call ``setEvalSets`` over a XGBoostClassifier or XGBoostRegressor, you can pass in multiple evaluation datasets typed as a Map from String to DataFrame.

View File

@@ -1,436 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"gbtree": {
"type": "object",
"properties": {
"name": {
"const": "gbtree"
},
"model": {
"type": "object",
"properties": {
"gbtree_model_param": {
"$ref": "#/definitions/gbtree_model_param"
},
"trees": {
"type": "array",
"items": {
"type": "object",
"properties": {
"tree_param": {
"type": "object",
"properties": {
"num_nodes": {
"type": "string"
},
"size_leaf_vector": {
"type": "string"
},
"num_feature": {
"type": "string"
}
},
"required": [
"num_nodes",
"num_feature",
"size_leaf_vector"
]
},
"id": {
"type": "integer"
},
"loss_changes": {
"type": "array",
"items": {
"type": "number"
}
},
"sum_hessian": {
"type": "array",
"items": {
"type": "number"
}
},
"base_weights": {
"type": "array",
"items": {
"type": "number"
}
},
"leaf_child_counts": {
"type": "array",
"items": {
"type": "integer"
}
},
"left_children": {
"type": "array",
"items": {
"type": "integer"
}
},
"right_children": {
"type": "array",
"items": {
"type": "integer"
}
},
"parents": {
"type": "array",
"items": {
"type": "integer"
}
},
"split_indices": {
"type": "array",
"items": {
"type": "integer"
}
},
"split_conditions": {
"type": "array",
"items": {
"type": "number"
}
},
"default_left": {
"type": "array",
"items": {
"type": "boolean"
}
}
},
"required": [
"tree_param",
"loss_changes",
"sum_hessian",
"base_weights",
"leaf_child_counts",
"left_children",
"right_children",
"parents",
"split_indices",
"split_conditions",
"default_left"
]
}
},
"tree_info": {
"type": "array",
"items": {
"type": "integer"
}
}
},
"required": [
"gbtree_model_param",
"trees",
"tree_info"
]
}
},
"required": [
"name",
"model"
]
},
"gbtree_model_param": {
"type": "object",
"properties": {
"num_trees": {
"type": "string"
},
"size_leaf_vector": {
"type": "string"
}
},
"required": [
"num_trees",
"size_leaf_vector"
]
},
"tree_param": {
"type": "object",
"properties": {
"num_nodes": {
"type": "string"
},
"size_leaf_vector": {
"type": "string"
},
"num_feature": {
"type": "string"
}
},
"required": [
"num_nodes",
"num_feature",
"size_leaf_vector"
]
},
"reg_loss_param": {
"type": "object",
"properties": {
"scale_pos_weight": {
"type": "string"
}
}
},
"softmax_multiclass_param": {
"type": "object",
"properties": {
"num_class": { "type": "string" }
}
},
"lambda_rank_param": {
"type": "object",
"properties": {
"num_pairsample": { "type": "string" },
"fix_list_weight": { "type": "string" }
}
}
},
"type": "object",
"properties": {
"version": {
"type": "array",
"const": [
1,
0,
0
],
"additionalItems": false
},
"learner": {
"type": "object",
"properties": {
"gradient_booster": {
"oneOf": [
{
"$ref": "#/definitions/gbtree"
},
{
"type": "object",
"properties": {
"name": { "const": "gblinear" },
"model": {
"type": "object",
"properties": {
"weights": {
"type": "array",
"items": {
"type": "number"
}
}
}
}
}
},
{
"type": "object",
"properties": {
"name": { "const": "dart" },
"gbtree": {
"$ref": "#/definitions/gbtree"
},
"weight_drop": {
"type": "array",
"items": {
"type": "number"
}
}
},
"required": [
"name",
"gbtree",
"weight_drop"
]
}
]
},
"objective": {
"oneOf": [
{
"type": "object",
"properties": {
"name": { "const": "reg:squarederror" },
"reg_loss_param": { "$ref": "#/definitions/reg_loss_param"}
},
"required": [
"name",
"reg_loss_param"
]
},
{
"type": "object",
"properties": {
"name": { "const": "reg:squaredlogerror" },
"reg_loss_param": { "$ref": "#/definitions/reg_loss_param"}
},
"required": [
"name",
"reg_loss_param"
]
},
{
"type": "object",
"properties": {
"name": { "const": "reg:logistic" },
"reg_loss_param": { "$ref": "#/definitions/reg_loss_param"}
},
"required": [
"name",
"reg_loss_param"
]
},
{
"type": "object",
"properties": {
"name": { "const": "binary:logistic" },
"reg_loss_param": { "$ref": "#/definitions/reg_loss_param"}
},
"required": [
"name",
"reg_loss_param"
]
},
{
"type": "object",
"properties": {
"name": { "const": "binary:logitraw" },
"reg_loss_param": { "$ref": "#/definitions/reg_loss_param"}
},
"required": [
"name",
"reg_loss_param"
]
},
{
"type": "object",
"properties": {
"name": { "const": "count:poisson" },
"poisson_regression_param": {
"type": "object",
"properties": {
"max_delta_step": { "type": "string" }
}
}
},
"required": [
"name",
"poisson_regression_param"
]
},
{
"type": "object",
"properties": {
"name": { "const": "reg:tweedie" },
"tweedie_regression_param": {
"type": "object",
"properties": {
"tweedie_variance_power": { "type": "string" }
}
}
},
"required": [
"name",
"tweedie_regression_param"
]
},
{
"type": "object",
"properties": {
"name": { "const": "survival:cox" }
},
"required": [ "name" ]
},
{
"type": "object",
"properties": {
"name": { "const": "reg:gamma" }
},
"required": [ "name" ]
},
{
"type": "object",
"properties": {
"name": { "const": "multi:softprob" },
"softmax_multiclass_param": { "$ref": "#/definitions/softmax_multiclass_param"}
},
"required": [
"name",
"softmax_multiclass_param"
]
},
{
"type": "object",
"properties": {
"name": { "const": "multi:softmax" },
"softmax_multiclass_param": { "$ref": "#/definitions/softmax_multiclass_param"}
},
"required": [
"name",
"softmax_multiclass_param"
]
},
{
"type": "object",
"properties": {
"name": { "const": "rank:pairwise" },
"lambda_rank_param": { "$ref": "#/definitions/lambda_rank_param"}
},
"required": [
"name",
"lambda_rank_param"
]
},
{
"type": "object",
"properties": {
"name": { "const": "rank:ndcg" },
"lambda_rank_param": { "$ref": "#/definitions/lambda_rank_param"}
},
"required": [
"name",
"lambda_rank_param"
]
},
{
"type": "object",
"properties": {
"name": { "const": "rank:map" },
"lambda_rank_param": { "$ref": "#/definitions/lambda_rank_param"}
},
"required": [
"name",
"lambda_rank_param"
]
}
]
},
"learner_model_param": {
"type": "object",
"properties": {
"base_score": { "type": "string" },
"num_class": { "type": "string" },
"num_feature": { "type": "string" }
}
}
},
"required": [
"gradient_booster",
"objective"
]
}
},
"required": [
"version",
"learner"
]
}

View File

@@ -29,16 +29,10 @@ General Parameters
* ``verbosity`` [default=1]
- Verbosity of printing messages. Valid values are 0 (silent), 1 (warning), 2 (info), 3
(debug). Sometimes XGBoost tries to change configurations based on heuristics, which
is displayed as warning message. If there's unexpected behaviour, please try to
increase value of verbosity.
* ``validate_parameters`` [default to false, except for Python ``train`` function]
- When set to True, XGBoost will perform validation of input parameters to check whether
a parameter is used or not. The feature is still experimental. It's expected to have
some false positives, especially when used with Scikit-Learn interface.
- Verbosity of printing messages. Valid values are 0 (silent),
1 (warning), 2 (info), 3 (debug). Sometimes XGBoost tries to change
configurations based on heuristics, which is displayed as warning message.
If there's unexpected behaviour, please try to increase value of verbosity.
* ``nthread`` [default to maximum number of threads available if not set]
@@ -89,12 +83,17 @@ Parameters for Tree Booster
- range: (0,1]
* ``colsample_bytree``, ``colsample_bylevel``, ``colsample_bynode`` [default=1]
- This is a family of parameters for subsampling of columns.
- All ``colsample_by*`` parameters have a range of (0, 1], the default value of 1, and specify the fraction of columns to be subsampled.
- ``colsample_bytree`` is the subsample ratio of columns when constructing each tree. Subsampling occurs once for every tree constructed.
- ``colsample_bylevel`` is the subsample ratio of columns for each level. Subsampling occurs once for every new depth level reached in a tree. Columns are subsampled from the set of columns chosen for the current tree.
- ``colsample_bynode`` is the subsample ratio of columns for each node (split). Subsampling occurs once every time a new split is evaluated. Columns are subsampled from the set of columns chosen for the current level.
- All ``colsample_by*`` parameters have a range of (0, 1], the default value of 1, and
specify the fraction of columns to be subsampled.
- ``colsample_bytree`` is the subsample ratio of columns when constructing each
tree. Subsampling occurs once for every tree constructed.
- ``colsample_bylevel`` is the subsample ratio of columns for each level. Subsampling
occurs once for every new depth level reached in a tree. Columns are subsampled from
the set of columns chosen for the current tree.
- ``colsample_bynode`` is the subsample ratio of columns for each node
(split). Subsampling occurs once every time a new split is evaluated. Columns are
subsampled from the set of columns chosen for the current level.
- ``colsample_by*`` parameters work cumulatively. For instance,
the combination ``{'colsample_bytree':0.5, 'colsample_bylevel':0.5,
'colsample_bynode':0.5}`` with 64 features will leave 8 features to choose from at
@@ -111,25 +110,20 @@ Parameters for Tree Booster
* ``tree_method`` string [default= ``auto``]
- The tree construction algorithm used in XGBoost. See description in the `reference paper <http://arxiv.org/abs/1603.02754>`_.
- XGBoost supports ``approx``, ``hist`` and ``gpu_hist`` for distributed training. Experimental support for external memory is available for ``approx`` and ``gpu_hist``.
- Choices: ``auto``, ``exact``, ``approx``, ``hist``, ``gpu_hist``, this is a
combination of commonly used updaters. For other updaters like ``refresh``, set the
parameter ``updater`` directly.
- XGBoost supports ``hist`` and ``approx`` for distributed training and only support ``approx`` for external memory version.
- Choices: ``auto``, ``exact``, ``approx``, ``hist``, ``gpu_exact``, ``gpu_hist``
- ``auto``: Use heuristic to choose the fastest method.
- For small dataset, exact greedy (``exact``) will be used.
- For larger dataset, approximate algorithm (``approx``) will be chosen. It's
recommended to try ``hist`` and ``gpu_hist`` for higher performance with large
dataset.
(``gpu_hist``)has support for ``external memory``.
- For small to medium dataset, exact greedy (``exact``) will be used.
- For very large dataset, approximate algorithm (``approx``) will be chosen.
- Because old behavior is always use exact greedy in single machine,
user will get a message when approximate algorithm is chosen to notify this choice.
- Because old behavior is always use exact greedy in single machine, user will get a
message when approximate algorithm is chosen to notify this choice.
- ``exact``: Exact greedy algorithm. Enumerates all split candidates.
- ``exact``: Exact greedy algorithm.
- ``approx``: Approximate greedy algorithm using quantile sketch and gradient histogram.
- ``hist``: Faster histogram optimized approximate greedy algorithm.
- ``hist``: Fast histogram optimized approximate greedy algorithm. It uses some performance improvements such as bins caching.
- ``gpu_exact``: GPU implementation of ``exact`` algorithm.
- ``gpu_hist``: GPU implementation of ``hist`` algorithm.
* ``sketch_eps`` [default=0.03]
@@ -147,24 +141,22 @@ Parameters for Tree Booster
* ``updater`` [default= ``grow_colmaker,prune``]
- A comma separated string defining the sequence of tree updaters to run, providing a modular way to construct and to modify the trees. This is an advanced parameter that is usually set automatically, depending on some other parameters. However, it could be also set explicitly by a user. The following updaters exist:
- A comma separated string defining the sequence of tree updaters to run, providing a modular way to construct and to modify the trees. This is an advanced parameter that is usually set automatically, depending on some other parameters. However, it could be also set explicitly by a user. The following updater plugins exist:
- ``grow_colmaker``: non-distributed column-based construction of trees.
- ``distcol``: distributed tree construction with column-based data splitting mode.
- ``grow_histmaker``: distributed tree construction with row-based data splitting based on global proposal of histogram counting.
- ``grow_local_histmaker``: based on local histogram counting.
- ``grow_skmaker``: uses the approximate sketching algorithm.
- ``grow_quantile_histmaker``: Grow tree using quantized histogram.
- ``grow_gpu_hist``: Grow tree with GPU.
- ``sync``: synchronizes trees in all distributed nodes.
- ``refresh``: refreshes tree's statistics and/or leaf values based on the current data. Note that no random subsampling of data rows is performed.
- ``prune``: prunes the splits where loss < min_split_loss (or gamma).
- In a distributed setting, the implicit updater sequence value would be adjusted to ``grow_histmaker,prune`` by default, and you can set ``tree_method`` as ``hist`` to use ``grow_histmaker``.
- In a distributed setting, the implicit updater sequence value would be adjusted to ``grow_histmaker,prune`` by default, and you can set ``tree_method`` as ``hist`` to use ``grow_histmaker``.
* ``refresh_leaf`` [default=1]
- This is a parameter of the ``refresh`` updater. When this flag is 1, tree leafs as well as tree nodes' stats are updated. When it is 0, only node stats are updated.
- This is a parameter of the ``refresh`` updater plugin. When this flag is 1, tree leafs as well as tree nodes' stats are updated. When it is 0, only node stats are updated.
* ``process_type`` [default= ``default``]
@@ -172,7 +164,7 @@ Parameters for Tree Booster
- Choices: ``default``, ``update``
- ``default``: The normal boosting process which creates new trees.
- ``update``: Starts from an existing model and only updates its trees. In each boosting iteration, a tree from the initial model is taken, a specified sequence of updaters is run for that tree, and a modified tree is added to the new model. The new model would have either the same or smaller number of trees, depending on the number of boosting iteratons performed. Currently, the following built-in updaters could be meaningfully used with this process type: ``refresh``, ``prune``. With ``process_type=update``, one cannot use updaters that create new trees.
- ``update``: Starts from an existing model and only updates its trees. In each boosting iteration, a tree from the initial model is taken, a specified sequence of updater plugins is run for that tree, and a modified tree is added to the new model. The new model would have either the same or smaller number of trees, depending on the number of boosting iteratons performed. Currently, the following built-in updater plugins could be meaningfully used with this process type: ``refresh``, ``prune``. With ``process_type=update``, one cannot use updater plugins that create new trees.
* ``grow_policy`` [default= ``depthwise``]
@@ -193,32 +185,16 @@ Parameters for Tree Booster
- Maximum number of discrete bins to bucket continuous features.
- Increasing this number improves the optimality of splits at the cost of higher computation time.
* ``predictor``, [default=``auto``]
* ``predictor``, [default=``cpu_predictor``]
- The type of predictor algorithm to use. Provides the same results but allows the use of GPU or CPU.
- ``auto``: Configure predictor based on heuristics.
- ``cpu_predictor``: Multicore CPU prediction algorithm.
- ``gpu_predictor``: Prediction using GPU. Used when ``tree_method`` is ``gpu_hist``.
When ``predictor`` is set to default value ``auto``, the ``gpu_hist`` tree method is
able to provide GPU based prediction without copying training data to GPU memory.
If ``gpu_predictor`` is explicitly specified, then all data is copied into GPU, only
recommended for performing prediction tasks.
- ``gpu_predictor``: Prediction using GPU. Default when ``tree_method`` is ``gpu_exact`` or ``gpu_hist``.
* ``num_parallel_tree``, [default=1]
- Number of parallel trees constructed during each iteration. This option is used to support boosted random forest.
* ``monotone_constraints``
- Constraint of variable monotonicity. See tutorial for more information.
* ``interaction_constraints``
- Constraints for interaction representing permitted interactions. The constraints must
be specified in the form of a nest list, e.g. ``[[0, 1], [2, 3, 4]]``, where each inner
list is a group of indices of features that are allowed to interact with each other.
See tutorial for more information
Additional parameters for Dart Booster (``booster=dart``)
=========================================================
@@ -319,8 +295,7 @@ Specify the learning task and the corresponding learning objective. The objectiv
* ``objective`` [default=reg:squarederror]
- ``reg:squarederror``: regression with squared loss.
- ``reg:squaredlogerror``: regression with squared log loss :math:`\frac{1}{2}[log(pred + 1) - log(label + 1)]^2`. All input labels are required to be greater than -1. Also, see metric ``rmsle`` for possible issue with this objective.
- ``reg:squarederror``: regression with squared loss
- ``reg:logistic``: logistic regression
- ``binary:logistic``: logistic regression for binary classification, output probability
- ``binary:logitraw``: logistic regression for binary classification, output score before logistic transformation
@@ -351,7 +326,6 @@ Specify the learning task and the corresponding learning objective. The objectiv
- The choices are listed below:
- ``rmse``: `root mean square error <http://en.wikipedia.org/wiki/Root_mean_square_error>`_
- ``rmsle``: root mean square log error: :math:`\sqrt{\frac{1}{N}[log(pred + 1) - log(label + 1)]^2}`. Default metric of ``reg:squaredlogerror`` objective. This metric reduces errors generated by outliers in dataset. But because ``log`` function is employed, ``rmsle`` might output ``nan`` when prediction value is less than -1. See ``reg:squaredlogerror`` for other requirements.
- ``mae``: `mean absolute error <https://en.wikipedia.org/wiki/Mean_absolute_error>`_
- ``logloss``: `negative log-likelihood <http://en.wikipedia.org/wiki/Log-likelihood>`_
- ``error``: Binary classification error rate. It is calculated as ``#(wrong cases)/#(all cases)``. For the predictions, the evaluation will regard the instances with prediction value larger than 0.5 as positive instances, and the others as negative instances.
@@ -372,7 +346,7 @@ Specify the learning task and the corresponding learning objective. The objectiv
* ``seed`` [default=0]
- Random number seed. This parameter is ignored in R package, use `set.seed()` instead.
- Random number seed.
***********************
Command Line Parameters

View File

@@ -1,79 +0,0 @@
'''This is a simple script that converts a pickled XGBoost
Scikit-Learn interface object from 0.90 to a native model. Pickle
format is not stable as it's a direct serialization of Python object.
We advice not to use it when stability is needed.
'''
import pickle
import json
import os
import argparse
import numpy as np
import xgboost
import warnings
def save_label_encoder(le):
'''Save the label encoder in XGBClassifier'''
meta = dict()
for k, v in le.__dict__.items():
if isinstance(v, np.ndarray):
meta[k] = v.tolist()
else:
meta[k] = v
return meta
def xgboost_skl_90to100(skl_model):
'''Extract the model and related metadata in SKL model.'''
model = {}
with open(skl_model, 'rb') as fd:
old = pickle.load(fd)
if not isinstance(old, xgboost.XGBModel):
raise TypeError(
'The script only handes Scikit-Learn interface object')
# Save Scikit-Learn specific Python attributes into a JSON document.
for k, v in old.__dict__.items():
if k == '_le':
model[k] = save_label_encoder(v)
elif k == 'classes_':
model[k] = v.tolist()
elif k == '_Booster':
continue
else:
try:
json.dumps({k: v})
model[k] = v
except TypeError:
warnings.warn(str(k) + ' is not saved in Scikit-Learn meta.')
booster = old.get_booster()
# Store the JSON serialization as an attribute
booster.set_attr(scikit_learn=json.dumps(model))
# Save it into a native model.
i = 0
while True:
path = 'xgboost_native_model_from_' + skl_model + '-' + str(i) + '.bin'
if os.path.exists(path):
i += 1
continue
booster.save_model(path)
break
if __name__ == '__main__':
assert xgboost.__version__ != '1.0.0', ('Please use the XGBoost version'
' that generates this pickle.')
parser = argparse.ArgumentParser(
description=('A simple script to convert pickle generated by'
' XGBoost 0.90 to XGBoost 1.0.0 model (not pickle).'))
parser.add_argument(
'--old-pickle',
type=str,
help='Path to old pickle file of Scikit-Learn interface object. '
'Will output a native model converted from this pickle file',
required=True)
args = parser.parse_args()
xgboost_skl_90to100(args.old_pickle)

View File

@@ -43,14 +43,6 @@ Scikit-Learn API
:members:
:inherited-members:
:show-inheritance:
.. autoclass:: xgboost.XGBRFRegressor
:members:
:inherited-members:
:show-inheritance:
.. autoclass:: xgboost.XGBRFClassifier
:members:
:inherited-members:
:show-inheritance:
Plotting API
------------
@@ -73,19 +65,3 @@ Callback API
.. autofunction:: xgboost.callback.reset_learning_rate
.. autofunction:: xgboost.callback.early_stop
.. _dask_api:
Dask API
--------
.. automodule:: xgboost.dask
.. autofunction:: xgboost.dask.DaskDMatrix
.. autofunction:: xgboost.dask.train
.. autofunction:: xgboost.dask.predict
.. autofunction:: xgboost.dask.DaskXGBClassifier
.. autofunction:: xgboost.dask.DaskXGBRegressor

View File

@@ -18,8 +18,6 @@ To verify your installation, run the following in Python:
import xgboost as xgb
.. _python_data_interface:
Data Interface
--------------
The XGBoost python module is able to load data from:
@@ -28,7 +26,6 @@ The XGBoost python module is able to load data from:
- Comma-separated values (CSV) file
- NumPy 2D array
- SciPy 2D sparse array
- cuDF DataFrame
- Pandas data frame, and
- XGBoost binary buffer file.
@@ -53,8 +50,8 @@ The data is stored in a :py:class:`DMatrix <xgboost.DMatrix>` object.
.. note:: Categorical features not supported
Note that XGBoost does not provide specialization for categorical features; if your data contains
categorical features, load it as a NumPy array first and then perform corresponding preprocessing steps like
Note that XGBoost does not support categorical features; if your data contains
categorical features, load it as a NumPy array first and then perform
`one-hot encoding <http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html>`_.
.. note:: Use Pandas to load CSV files with headers
@@ -116,7 +113,7 @@ XGBoost can use either a list of pairs or a dictionary to set :doc:`parameters <
.. code-block:: python
param = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'}
param = {'max_depth': 2, 'eta': 1, 'silent': 1, 'objective': 'binary:logistic'}
param['nthread'] = 4
param['eval_metric'] = 'auc'

View File

@@ -1,4 +1,4 @@
sphinx>=2.1
sphinx
mock
guzzle_sphinx_theme
breathe

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
"""Helper utility function for customization."""
"""Helper utilty function for customization."""
import sys
import os
import docutils

Some files were not shown because too many files have changed in this diff Show More