Compare commits

..

2 Commits

Author SHA1 Message Date
Jiaming Yuan
4b39590c14 Document GPU objectives in NEWS. (#3866) 2018-11-05 16:26:28 +13:00
Philip Hyunsu Cho
9a4d0b078f Add another contributor for rabit update 2018-11-04 10:28:09 -08:00
585 changed files with 18763 additions and 47297 deletions

View File

@@ -1,4 +1,4 @@
Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
Checks: 'modernize-*,-modernize-make-*,-modernize-raw-string-literal,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
CheckOptions:
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
- { key: readability-identifier-naming.StructCase, value: CamelCase }

18
.gitignore vendored
View File

@@ -17,7 +17,7 @@
*.tar.gz
*conf
*buffer
*.model
*model
*pyc
*.train
*.test
@@ -69,8 +69,10 @@ config.mk
/xgboost
*.data
build_plugin
.idea
recommonmark/
tags
*.iml
*.class
target
*.swp
@@ -88,16 +90,4 @@ lib/
# spark
metastore_db
/include/xgboost/build_config.h
# files from R-package source install
**/config.status
R-package/src/Makevars
# Visual Studio Code
/.vscode/
# IntelliJ/CLion
.idea
*.iml
/cmake-build-debug/
plugin/updater_gpu/test/cpp/data

View File

@@ -1,51 +1,77 @@
# disable sudo for container build.
sudo: required
# Enabling test OS X
# Enabling test on Linux and OS X
os:
- linux
- osx
osx_image: xcode10.3
dist: bionic
osx_image: xcode8
group: deprecated-2017Q4
# Use Build Matrix to do lint and build seperately
env:
matrix:
# code lint
- TASK=lint
# r package test
- TASK=r_test
# python package test
- TASK=python_test
# test installation of Python source distribution
- TASK=python_sdist_test
- TASK=python_lightweight_test
# java package test
- TASK=java_test
# cmake test
- TASK=cmake_test
# c++ test
- TASK=cpp_test
# distributed test
- TASK=distributed_test
# address sanitizer test
- TASK=sanitizer_test
matrix:
exclude:
- os: linux
env: TASK=python_test
- os: linux
env: TASK=java_test
- os: linux
- os: osx
env: TASK=lint
- os: osx
env: TASK=cmake_test
- os: linux
env: TASK=r_test
- os: osx
env: TASK=python_lightweight_test
- os: osx
env: TASK=cpp_test
- os: osx
env: TASK=distributed_test
- os: osx
env: TASK=sanitizer_test
# dependent brew packages
# dependent apt packages
addons:
homebrew:
apt:
sources:
- llvm-toolchain-trusty-5.0
- ubuntu-toolchain-r-test
- george-edison55-precise-backports
packages:
- cmake
- libomp
- graphviz
- openssl
- libgit2
- clang
- clang-tidy-5.0
- cmake-data
- doxygen
- wget
- r
update: true
- libcurl4-openssl-dev
- unzip
- graphviz
- gcc-4.8
- g++-4.8
- gcc-7
- g++-7
before_install:
- source dmlc-core/scripts/travis/travis_setup_env.sh
- if [ "${TASK}" != "python_sdist_test" ]; then export PYTHONPATH=${PYTHONPATH}:${PWD}/python-package; fi
- export PYTHONPATH=${PYTHONPATH}:${PWD}/python-package
- echo "MAVEN_OPTS='-Xmx2g -XX:MaxPermSize=1024m -XX:ReservedCodeCacheSize=512m -Dorg.slf4j.simpleLogger.defaultLogLevel=error'" > ~/.mavenrc
install:

View File

@@ -1,247 +1,264 @@
cmake_minimum_required(VERSION 3.12)
project(xgboost LANGUAGES CXX C VERSION 1.0.2)
cmake_minimum_required (VERSION 3.2)
project(xgboost)
include(cmake/Utils.cmake)
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
cmake_policy(SET CMP0022 NEW)
list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake/modules")
find_package(OpenMP)
if ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13))
cmake_policy(SET CMP0077 NEW)
endif ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13))
message(STATUS "CMake version ${CMAKE_VERSION}")
if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0)
message(FATAL_ERROR "GCC version must be at least 5.0!")
endif()
include(${xgboost_SOURCE_DIR}/cmake/FindPrefetchIntrinsics.cmake)
find_prefetch_intrinsics()
include(${xgboost_SOURCE_DIR}/cmake/Version.cmake)
write_version()
set_default_configuration_release()
msvc_use_static_runtime()
#-- Options
option(BUILD_C_DOC "Build documentation for C APIs using Doxygen." OFF)
option(USE_OPENMP "Build with OpenMP support." ON)
## Bindings
# Options
option(USE_CUDA "Build with GPU acceleration")
option(JVM_BINDINGS "Build JVM bindings" OFF)
option(R_LIB "Build shared library for R package" OFF)
## Dev
option(USE_DEBUG_OUTPUT "Dump internal training results like gradients and predictions to stdout.
Should only be used for debugging." OFF)
option(GOOGLE_TEST "Build google tests" OFF)
option(USE_DMLC_GTEST "Use google tests bundled with dmlc-core submodule" OFF)
option(USE_NVTX "Build with cuda profiling annotations. Developers only." OFF)
set(NVTX_HEADER_DIR "" CACHE PATH "Path to the stand-alone nvtx header")
option(RABIT_MOCK "Build rabit with mock" OFF)
## CUDA
option(USE_CUDA "Build with GPU acceleration" OFF)
option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF)
option(BUILD_WITH_SHARED_NCCL "Build with shared NCCL library." OFF)
option(R_LIB "Build shared library for R package" OFF)
set(GPU_COMPUTE_VER "" CACHE STRING
"Semicolon separated list of compute versions to be built against, e.g. '35;61'")
## Copied From dmlc
option(USE_HDFS "Build with HDFS support" OFF)
option(USE_AZURE "Build with AZURE support" OFF)
option(USE_S3 "Build with S3 support" OFF)
## Sanitizers
"Space separated list of compute versions to be built against, e.g. '35 61'")
option(USE_SANITIZER "Use santizer flags" OFF)
option(SANITIZER_PATH "Path to sanitizes.")
set(ENABLED_SANITIZERS "address" "leak" CACHE STRING
"Semicolon separated list of sanitizer names. E.g 'address;leak'. Supported sanitizers are
address, leak, undefined and thread.")
## Plugins
address, leak and thread.")
# Plugins
option(PLUGIN_LZ4 "Build lz4 plugin" OFF)
option(PLUGIN_DENSE_PARSER "Build dense parser plugin" OFF)
#-- Checks for building XGBoost
if (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug)))
message(SEND_ERROR "Do not enable `USE_DEBUG_OUTPUT' with release build.")
endif (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug)))
if (USE_NCCL AND NOT (USE_CUDA))
message(SEND_ERROR "`USE_NCCL` must be enabled with `USE_CUDA` flag.")
endif (USE_NCCL AND NOT (USE_CUDA))
if (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
message(SEND_ERROR "Build XGBoost with -DUSE_NCCL=ON to enable BUILD_WITH_SHARED_NCCL.")
endif (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
if (JVM_BINDINGS AND R_LIB)
message(SEND_ERROR "`R_LIB' is not compatible with `JVM_BINDINGS' as they both have customized configurations.")
endif (JVM_BINDINGS AND R_LIB)
if (R_LIB AND GOOGLE_TEST)
message(WARNING "Some C++ unittests will fail with `R_LIB` enabled,
as R package redirects some functions to R runtime implementation.")
endif (R_LIB AND GOOGLE_TEST)
# Deprecation warning
if(USE_AVX)
message(SEND_ERROR "The option 'USE_AVX' is deprecated as experimental AVX features have been removed from XGBoost.")
endif (USE_AVX)
message(WARNING "The option 'USE_AVX' is deprecated as experimental AVX features have been removed from xgboost.")
endif()
#-- Sanitizer
# Compiler flags
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
if(OpenMP_CXX_FOUND OR OPENMP_FOUND)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
endif()
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
if(MSVC)
# Multithreaded compilation
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP")
else()
# Correct error for GCC 5 and cuda
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_MWAITXINTRIN_H_INCLUDED -D_FORCE_INLINES")
# Performance
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -funroll-loops")
endif()
if(WIN32 AND MINGW)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -static-libstdc++")
endif()
# Sanitizer
if(USE_SANITIZER)
include(cmake/Sanitizer.cmake)
enable_sanitizers("${ENABLED_SANITIZERS}")
endif(USE_SANITIZER)
if (USE_CUDA)
SET(USE_OPENMP ON CACHE BOOL "CUDA requires OpenMP" FORCE)
# `export CXX=' is ignored by CMake CUDA.
set(CMAKE_CUDA_HOST_COMPILER ${CMAKE_CXX_COMPILER})
message(STATUS "Configured CUDA host compiler: ${CMAKE_CUDA_HOST_COMPILER}")
enable_language(CUDA)
set(GEN_CODE "")
format_gencode_flags("${GPU_COMPUTE_VER}" GEN_CODE)
message(STATUS "CUDA GEN_CODE: ${GEN_CODE}")
endif (USE_CUDA)
if (USE_OPENMP)
if (APPLE)
# Require CMake 3.16+ on Mac OSX, as previous versions of CMake had trouble locating
# OpenMP on Mac. See https://github.com/dmlc/xgboost/pull/5146#issuecomment-568312706
cmake_minimum_required(VERSION 3.16)
endif (APPLE)
find_package(OpenMP REQUIRED)
endif (USE_OPENMP)
# dmlc-core
msvc_use_static_runtime()
add_subdirectory(${xgboost_SOURCE_DIR}/dmlc-core)
set_target_properties(dmlc PROPERTIES
CXX_STANDARD 11
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON)
list(APPEND LINKED_LIBRARIES_PRIVATE dmlc)
add_subdirectory(dmlc-core)
set(LINK_LIBRARIES dmlc rabit)
# enable custom logging
add_definitions(-DDMLC_LOG_CUSTOMIZE=1)
# compiled code customizations for R package
if(R_LIB)
add_definitions(
-DXGBOOST_STRICT_R_MODE=1
-DXGBOOST_CUSTOMIZE_GLOBAL_PRNG=1
-DDMLC_LOG_BEFORE_THROW=0
-DDMLC_DISABLE_STDIN=1
-DDMLC_LOG_CUSTOMIZE=1
-DRABIT_CUSTOMIZE_MSG_
-DRABIT_STRICT_CXX98_
)
endif()
# Gather source files
include_directories (
${PROJECT_SOURCE_DIR}/include
${PROJECT_SOURCE_DIR}/dmlc-core/include
${PROJECT_SOURCE_DIR}/rabit/include
)
file(GLOB_RECURSE SOURCES
src/*.cc
src/*.h
include/*.h
)
# Only add main function for executable target
list(REMOVE_ITEM SOURCES ${PROJECT_SOURCE_DIR}/src/cli_main.cc)
file(GLOB_RECURSE TEST_SOURCES "tests/cpp/*.cc")
file(GLOB_RECURSE CUDA_SOURCES
src/*.cu
src/*.cuh
)
# Add plugins to source files
if(PLUGIN_LZ4)
list(APPEND SOURCES plugin/lz4/sparse_page_lz4_format.cc)
link_libraries(lz4)
endif()
if(PLUGIN_DENSE_PARSER)
list(APPEND SOURCES plugin/dense_parser/dense_libsvm.cc)
endif()
# rabit
set(RABIT_BUILD_DMLC OFF)
set(DMLC_ROOT ${xgboost_SOURCE_DIR}/dmlc-core)
set(RABIT_WITH_R_LIB ${R_LIB})
add_subdirectory(rabit)
if (RABIT_MOCK)
list(APPEND LINKED_LIBRARIES_PRIVATE rabit_mock_static)
# TODO: Create rabit cmakelists.txt
set(RABIT_SOURCES
rabit/src/allreduce_base.cc
rabit/src/allreduce_robust.cc
rabit/src/engine.cc
rabit/src/c_api.cc
)
set(RABIT_EMPTY_SOURCES
rabit/src/engine_empty.cc
rabit/src/c_api.cc
)
if(MINGW OR R_LIB)
# build a dummy rabit library
add_library(rabit STATIC ${RABIT_EMPTY_SOURCES})
else()
list(APPEND LINKED_LIBRARIES_PRIVATE rabit)
endif(RABIT_MOCK)
add_library(rabit STATIC ${RABIT_SOURCES})
endif()
# Exports some R specific definitions and objects
if(USE_CUDA)
find_package(CUDA 8.0 REQUIRED)
cmake_minimum_required(VERSION 3.5)
add_definitions(-DXGBOOST_USE_CUDA)
include_directories(cub)
if(USE_NCCL)
find_package(Nccl REQUIRED)
include_directories(${NCCL_INCLUDE_DIR})
add_definitions(-DXGBOOST_USE_NCCL)
endif()
set(GENCODE_FLAGS "")
format_gencode_flags("${GPU_COMPUTE_VER}" GENCODE_FLAGS)
message("cuda architecture flags: ${GENCODE_FLAGS}")
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS};--expt-extended-lambda;--expt-relaxed-constexpr;${GENCODE_FLAGS};-lineinfo;")
if(NOT MSVC)
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS};-Xcompiler -fPIC; -Xcompiler -Werror; -std=c++11")
endif()
cuda_add_library(gpuxgboost ${CUDA_SOURCES} STATIC)
if(USE_NCCL)
link_directories(${NCCL_LIBRARY})
target_link_libraries(gpuxgboost ${NCCL_LIB_NAME})
endif()
list(APPEND LINK_LIBRARIES gpuxgboost)
endif()
# flags and sources for R-package
if(R_LIB)
add_subdirectory(${xgboost_SOURCE_DIR}/R-package)
endif (R_LIB)
file(GLOB_RECURSE R_SOURCES
R-package/src/*.h
R-package/src/*.c
R-package/src/*.cc
)
list(APPEND SOURCES ${R_SOURCES})
endif()
# core xgboost
add_subdirectory(${xgboost_SOURCE_DIR}/plugin)
add_subdirectory(${xgboost_SOURCE_DIR}/src)
set(XGBOOST_OBJ_SOURCES "${XGBOOST_OBJ_SOURCES};$<TARGET_OBJECTS:objxgboost>")
add_library(objxgboost OBJECT ${SOURCES})
#-- Shared library
add_library(xgboost SHARED ${XGBOOST_OBJ_SOURCES})
target_include_directories(xgboost
INTERFACE
$<INSTALL_INTERFACE:${CMAKE_INSTALL_PREFIX}/include>
$<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}/include>)
target_link_libraries(xgboost PRIVATE ${LINKED_LIBRARIES_PRIVATE})
# This creates its own shared library `xgboost4j'.
if (JVM_BINDINGS)
add_subdirectory(${xgboost_SOURCE_DIR}/jvm-packages)
endif (JVM_BINDINGS)
#-- End shared library
#-- CLI for xgboost
add_executable(runxgboost ${xgboost_SOURCE_DIR}/src/cli_main.cc ${XGBOOST_OBJ_SOURCES})
target_include_directories(runxgboost
PRIVATE
${xgboost_SOURCE_DIR}/include
${xgboost_SOURCE_DIR}/dmlc-core/include
${xgboost_SOURCE_DIR}/rabit/include)
target_link_libraries(runxgboost PRIVATE ${LINKED_LIBRARIES_PRIVATE})
set_target_properties(
runxgboost PROPERTIES
OUTPUT_NAME xgboost
CXX_STANDARD 11
CXX_STANDARD_REQUIRED ON)
#-- End CLI for xgboost
set_output_directory(runxgboost ${xgboost_SOURCE_DIR})
set_output_directory(xgboost ${xgboost_SOURCE_DIR}/lib)
# Ensure these two targets do not build simultaneously, as they produce outputs with conflicting names
add_dependencies(xgboost runxgboost)
#-- Installing XGBoost
# building shared library for R package
if(R_LIB)
find_package(LibR REQUIRED)
list(APPEND LINK_LIBRARIES "${LIBR_CORE_LIBRARY}")
MESSAGE(STATUS "LIBR_CORE_LIBRARY " ${LIBR_CORE_LIBRARY})
include_directories(
"${LIBR_INCLUDE_DIRS}"
"${PROJECT_SOURCE_DIR}"
)
# Shared library target for the R package
add_library(xgboost SHARED $<TARGET_OBJECTS:objxgboost>)
target_link_libraries(xgboost ${LINK_LIBRARIES})
# R uses no lib prefix in shared library names of its packages
set_target_properties(xgboost PROPERTIES PREFIX "")
if(APPLE)
set_target_properties(xgboost PROPERTIES SUFFIX ".so")
endif (APPLE)
endif()
setup_rpackage_install_target(xgboost ${CMAKE_CURRENT_BINARY_DIR})
# use a dummy location for any other remaining installs
set(CMAKE_INSTALL_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/dummy_inst")
endif (R_LIB)
# main targets: shared library & exe
else()
# Executable
add_executable(runxgboost $<TARGET_OBJECTS:objxgboost> src/cli_main.cc)
set_target_properties(runxgboost PROPERTIES
OUTPUT_NAME xgboost
)
set_output_directory(runxgboost ${PROJECT_SOURCE_DIR})
target_link_libraries(runxgboost ${LINK_LIBRARIES})
# Shared library
add_library(xgboost SHARED $<TARGET_OBJECTS:objxgboost>)
target_link_libraries(xgboost ${LINK_LIBRARIES})
set_output_directory(xgboost ${PROJECT_SOURCE_DIR}/lib)
if(MINGW)
# remove the 'lib' prefix to conform to windows convention for shared library names
set_target_properties(xgboost PROPERTIES PREFIX "")
endif (MINGW)
endif()
if (BUILD_C_DOC)
include(cmake/Doc.cmake)
run_doxygen()
endif (BUILD_C_DOC)
#Ensure these two targets do not build simultaneously, as they produce outputs with conflicting names
add_dependencies(xgboost runxgboost)
endif()
include(GNUInstallDirs)
# Install all headers. Please note that currently the C++ headers does not form an "API".
install(DIRECTORY ${xgboost_SOURCE_DIR}/include/xgboost
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
install(TARGETS xgboost runxgboost
EXPORT XGBoostTargets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
INCLUDES DESTINATION ${LIBLEGACY_INCLUDE_DIRS})
install(EXPORT XGBoostTargets
FILE XGBoostTargets.cmake
NAMESPACE xgboost::
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/xgboost)
# JVM
if(JVM_BINDINGS)
find_package(JNI QUIET REQUIRED)
include(CMakePackageConfigHelpers)
configure_package_config_file(
${CMAKE_CURRENT_LIST_DIR}/cmake/xgboost-config.cmake.in
${CMAKE_CURRENT_BINARY_DIR}/cmake/xgboost-config.cmake
INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/xgboost)
write_basic_package_version_file(
${CMAKE_BINARY_DIR}/cmake/xgboost-config-version.cmake
VERSION ${XGBOOST_VERSION}
COMPATIBILITY AnyNewerVersion)
install(
FILES
${CMAKE_BINARY_DIR}/cmake/xgboost-config.cmake
${CMAKE_BINARY_DIR}/cmake/xgboost-config-version.cmake
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/xgboost)
include_directories(${JNI_INCLUDE_DIRS} jvm-packages/xgboost4j/src/native)
#-- Test
add_library(xgboost4j SHARED
$<TARGET_OBJECTS:objxgboost>
jvm-packages/xgboost4j/src/native/xgboost4j.cpp)
set_output_directory(xgboost4j ${PROJECT_SOURCE_DIR}/lib)
target_link_libraries(xgboost4j
${LINK_LIBRARIES}
${JAVA_JVM_LIBRARY})
endif()
# Test
if(GOOGLE_TEST)
enable_testing()
# Unittests.
add_subdirectory(${xgboost_SOURCE_DIR}/tests/cpp)
add_test(
NAME TestXGBoostLib
COMMAND testxgboost
WORKING_DIRECTORY ${xgboost_BINARY_DIR})
find_package(GTest REQUIRED)
# CLI tests
configure_file(
${xgboost_SOURCE_DIR}/tests/cli/machine.conf.in
${xgboost_BINARY_DIR}/tests/cli/machine.conf
@ONLY)
add_test(
NAME TestXGBoostCLI
COMMAND runxgboost ${xgboost_BINARY_DIR}/tests/cli/machine.conf
WORKING_DIRECTORY ${xgboost_BINARY_DIR})
set_tests_properties(TestXGBoostCLI
PROPERTIES
PASS_REGULAR_EXPRESSION ".*test-rmse:0.087.*")
endif (GOOGLE_TEST)
auto_source_group("${TEST_SOURCES}")
include_directories(${GTEST_INCLUDE_DIRS})
# For MSVC: Call msvc_use_static_runtime() once again to completely
# replace /MD with /MT. See https://github.com/dmlc/xgboost/issues/4462
# for issues caused by mixing of /MD and /MT flags
msvc_use_static_runtime()
if(USE_CUDA)
file(GLOB_RECURSE CUDA_TEST_SOURCES "tests/cpp/*.cu")
cuda_compile(CUDA_TEST_OBJS ${CUDA_TEST_SOURCES})
else()
set(CUDA_TEST_OBJS "")
endif()
add_executable(testxgboost ${TEST_SOURCES} ${CUDA_TEST_OBJS} $<TARGET_OBJECTS:objxgboost>)
set_output_directory(testxgboost ${PROJECT_SOURCE_DIR})
target_link_libraries(testxgboost ${GTEST_LIBRARIES} ${LINK_LIBRARIES})
add_test(TestXGBoost testxgboost)
endif()
# Group sources
auto_source_group("${SOURCES}")

View File

@@ -2,42 +2,34 @@ Contributors of DMLC/XGBoost
============================
XGBoost has been developed and used by a group of active community. Everyone is more than welcomed to is a great way to make the project better and more accessible to more users.
Project Management Committee(PMC)
----------
The Project Management Committee(PMC) consists group of active committers that moderate the discussion, manage the project release, and proposes new committer/PMC members.
* [Tianqi Chen](https://github.com/tqchen), University of Washington
- Tianqi is a Ph.D. student working on large-scale machine learning. He is the creator of the project.
* [Michael Benesty](https://github.com/pommedeterresautee)
- Michael is a lawyer and data scientist in France. He is the creator of XGBoost interactive analysis module in R.
* [Yuan Tang](https://github.com/terrytangyuan), Ant Financial
- Yuan is a software engineer in Ant Financial. He contributed mostly in R and Python packages.
* [Nan Zhu](https://github.com/CodingCat), Uber
- Nan is a software engineer in Uber. He contributed mostly in JVM packages.
* [Jiaming Yuan](https://github.com/trivialfis)
- Jiaming contributed to the GPU algorithms. He has also introduced new abstractions to improve the quality of the C++ codebase.
* [Hyunsu Cho](http://hyunsu-cho.io/), Amazon AI
- Hyunsu is an applied scientist in Amazon AI. He is the maintainer of the XGBoost Python package. He also manages the Jenkins continuous integration system (https://xgboost-ci.net/). He is the initial author of the CPU 'hist' updater.
* [Rory Mitchell](https://github.com/RAMitchell), University of Waikato
- Rory is a Ph.D. student at University of Waikato. He is the original creator of the GPU training algorithms. He improved the CMake build system and continuous integration.
* [Hongliang Liu](https://github.com/phunterlau)
Committers
----------
Committers are people who have made substantial contribution to the project and granted write access to the project.
* [Tianqi Chen](https://github.com/tqchen), University of Washington
- Tianqi is a Ph.D. student working on large-scale machine learning. He is the creator of the project.
* [Tong He](https://github.com/hetong007), Amazon AI
- Tong is an applied scientist in Amazon AI. He is the maintainer of XGBoost R package.
* [Vadim Khotilovich](https://github.com/khotilov)
- Vadim contributes many improvements in R and core packages.
* [Bing Xu](https://github.com/antinucleon)
- Bing is the original creator of XGBoost Python package and currently the maintainer of [XGBoost.jl](https://github.com/antinucleon/XGBoost.jl).
* [Michael Benesty](https://github.com/pommedeterresautee)
- Michael is a lawyer and data scientist in France. He is the creator of XGBoost interactive analysis module in R.
* [Yuan Tang](https://github.com/terrytangyuan), Ant Financial
- Yuan is a software engineer in Ant Financial. He contributed mostly in R and Python packages.
* [Nan Zhu](https://github.com/CodingCat), Uber
- Nan is a software engineer in Uber. He contributed mostly in JVM packages.
* [Sergei Lebedev](https://github.com/superbobry), Criteo
- Sergei is a software engineer in Criteo. He contributed mostly in JVM packages.
* [Hongliang Liu](https://github.com/phunterlau)
* [Scott Lundberg](http://scottlundberg.com/), University of Washington
- Scott is a Ph.D. student at University of Washington. He is the creator of SHAP, a unified approach to explain the output of machine learning models such as decision tree ensembles. He also helps maintain the XGBoost Julia package.
* [Rory Mitchell](https://github.com/RAMitchell), University of Waikato
- Rory is a Ph.D. student at University of Waikato. He is the original creator of the GPU training algorithms. He improved the CMake build system and continuous integration.
* [Hyunsu Cho](http://hyunsu-cho.io/), Amazon AI
- Hyunsu is an applied scientist in Amazon AI. He is the maintainer of the XGBoost Python package. He also manages the Jenkins continuous integration system (https://xgboost-ci.net/). He is the initial author of the CPU 'hist' updater.
* [Jiaming](https://github.com/trivialfis)
- Jiaming contributed to the GPU algorithms. He has also introduced new abstractions to improve the quality of the C++ codebase.
Become a Committer
------------------
@@ -94,11 +86,3 @@ List of Contributors
- Andrew Thia implemented feature interaction constraints
* [Wei Tian](https://github.com/weitian)
* [Chen Qin] (https://github.com/chenqin)
* [Sam Wilkinson](https://samwilkinson.io)
* [Matthew Jones](https://github.com/mt-jones)
* [Jiaxiang Li](https://github.com/JiaxiangBU)
* [Bryan Woods](https://github.com/bryan-woods)
- Bryan added support for cross-validation for the ranking objective
* [Haoda Fu](https://github.com/fuhaoda)
* [Evan Kepner](https://github.com/EvanKepner)
- Evan Kepner added support for os.PathLike file paths in Python

424
Jenkinsfile vendored
View File

@@ -3,406 +3,106 @@
// Jenkins pipeline
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
// Command to run command inside a docker container
dockerRun = 'tests/ci_build/ci_build.sh'
import groovy.transform.Field
/* Unrestricted tasks: tasks that do NOT generate artifacts */
// Command to run command inside a docker container
def dockerRun = 'tests/ci_build/ci_build.sh'
// Utility functions
@Field
def commit_id // necessary to pass a variable from one stage to another
def utils
def buildMatrix = [
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "9.2", "multiGpu": true],
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "9.2" ],
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": false, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
]
pipeline {
// Each stage specify its own agent
agent none
environment {
DOCKER_CACHE_ECR_ID = '492475357299'
DOCKER_CACHE_ECR_REGION = 'us-west-2'
}
// Setup common job properties
options {
ansiColor('xterm')
timestamps()
timeout(time: 240, unit: 'MINUTES')
timeout(time: 120, unit: 'MINUTES')
buildDiscarder(logRotator(numToKeepStr: '10'))
preserveStashes()
}
// Build stages
stages {
stage('Jenkins Linux: Get sources') {
agent { label 'linux && cpu' }
stage('Jenkins: Get sources') {
agent {
label 'unrestricted'
}
steps {
script {
checkoutSrcs()
commit_id = "${GIT_COMMIT}"
utils = load('tests/ci_build/jenkins_tools.Groovy')
utils.checkoutSrcs()
}
stash name: 'srcs'
milestone ordinal: 1
stash name: 'srcs', excludes: '.git/'
milestone label: 'Sources ready', ordinal: 1
}
}
stage('Jenkins Linux: Formatting Check') {
agent none
stage('Jenkins: Build & Test') {
steps {
script {
parallel ([
'clang-tidy': { ClangTidy() },
'lint': { Lint() },
'sphinx-doc': { SphinxDoc() },
'doxygen': { Doxygen() }
])
parallel (buildMatrix.findAll{it['enabled']}.collectEntries{ c ->
def buildName = utils.getBuildName(c)
utils.buildFactory(buildName, c, false, this.&buildPlatformCmake)
})
}
milestone ordinal: 2
}
}
stage('Jenkins Linux: Build') {
agent none
steps {
script {
parallel ([
'build-cpu': { BuildCPU() },
'build-cpu-rabit-mock': { BuildCPUMock() },
'build-gpu-cuda9.0': { BuildCUDA(cuda_version: '9.0') },
'build-gpu-cuda10.0': { BuildCUDA(cuda_version: '10.0') },
'build-gpu-cuda10.1': { BuildCUDA(cuda_version: '10.1') },
'build-jvm-packages': { BuildJVMPackages(spark_version: '2.4.3') },
'build-jvm-doc': { BuildJVMDoc() }
])
}
milestone ordinal: 3
}
}
stage('Jenkins Linux: Test') {
agent none
steps {
script {
parallel ([
'test-python-cpu': { TestPythonCPU() },
'test-python-gpu-cuda9.0': { TestPythonGPU(cuda_version: '9.0') },
'test-python-gpu-cuda10.0': { TestPythonGPU(cuda_version: '10.0') },
'test-python-gpu-cuda10.1': { TestPythonGPU(cuda_version: '10.1') },
'test-python-mgpu-cuda10.1': { TestPythonGPU(cuda_version: '10.1', multi_gpu: true) },
'test-cpp-gpu': { TestCppGPU(cuda_version: '10.1') },
'test-cpp-mgpu': { TestCppGPU(cuda_version: '10.1', multi_gpu: true) },
'test-jvm-jdk8': { CrossTestJVMwithJDK(jdk_version: '8', spark_version: '2.4.3') },
'test-jvm-jdk11': { CrossTestJVMwithJDK(jdk_version: '11') },
'test-jvm-jdk12': { CrossTestJVMwithJDK(jdk_version: '12') },
'test-r-3.4.4': { TestR(use_r35: false) },
'test-r-3.5.3': { TestR(use_r35: true) }
])
}
milestone ordinal: 4
}
}
stage('Jenkins Linux: Deploy') {
agent none
steps {
script {
parallel ([
'deploy-jvm-packages': { DeployJVMPackages(spark_version: '2.4.3') }
])
}
milestone ordinal: 5
}
}
}
}
// check out source code from git
def checkoutSrcs() {
retry(5) {
try {
timeout(time: 2, unit: 'MINUTES') {
checkout scm
sh 'git submodule update --init'
/**
* Build platform and test it via cmake.
*/
def buildPlatformCmake(buildName, conf, nodeReq, dockerTarget) {
def opts = utils.cmakeOptions(conf)
// Destination dir for artifacts
def distDir = "dist/${buildName}"
def dockerArgs = ""
if (conf["withGpu"]) {
dockerArgs = "--build-arg CUDA_VERSION=" + conf["cudaVersion"]
}
} catch (exc) {
deleteDir()
error "Failed to fetch source codes"
}
}
}
def ClangTidy() {
node('linux && cpu') {
unstash name: 'srcs'
echo "Running clang-tidy job..."
def container_type = "clang_tidy"
def docker_binary = "docker"
def dockerArgs = "--build-arg CUDA_VERSION=9.2"
sh """
${dockerRun} ${container_type} ${docker_binary} ${dockerArgs} python3 tests/ci_build/tidy.py
"""
deleteDir()
}
}
def Lint() {
node('linux && cpu') {
unstash name: 'srcs'
echo "Running lint..."
def container_type = "cpu"
def docker_binary = "docker"
sh """
${dockerRun} ${container_type} ${docker_binary} make lint
"""
deleteDir()
}
}
def SphinxDoc() {
node('linux && cpu') {
unstash name: 'srcs'
echo "Running sphinx-doc..."
def container_type = "cpu"
def docker_binary = "docker"
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e SPHINX_GIT_BRANCH=${BRANCH_NAME}'"
sh """#!/bin/bash
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} make -C doc html
"""
deleteDir()
}
}
def Doxygen() {
node('linux && cpu') {
unstash name: 'srcs'
echo "Running doxygen..."
def container_type = "cpu"
def docker_binary = "docker"
sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/doxygen.sh ${BRANCH_NAME}
"""
echo 'Uploading doc...'
s3Upload file: "build/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "doxygen/${BRANCH_NAME}.tar.bz2"
deleteDir()
}
}
def BuildCPU() {
node('linux && cpu') {
unstash name: 'srcs'
echo "Build CPU"
def container_type = "cpu"
def docker_binary = "docker"
sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh
${dockerRun} ${container_type} ${docker_binary} build/testxgboost
"""
// Sanitizer test
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e ASAN_SYMBOLIZER_PATH=/usr/bin/llvm-symbolizer -e ASAN_OPTIONS=symbolize=1 -e UBSAN_OPTIONS=print_stacktrace=1:log_path=ubsan_error.log --cap-add SYS_PTRACE'"
def docker_args = "--build-arg CMAKE_VERSION=3.12"
sh """
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak;undefined" \
-DCMAKE_BUILD_TYPE=Debug -DSANITIZER_PATH=/usr/lib/x86_64-linux-gnu/
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} build/testxgboost
"""
deleteDir()
}
}
def BuildCPUMock() {
node('linux && cpu') {
unstash name: 'srcs'
echo "Build CPU with rabit mock"
def container_type = "cpu"
def docker_binary = "docker"
sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_mock_cmake.sh
"""
echo 'Stashing rabit C++ test executable (xgboost)...'
stash name: 'xgboost_rabit_tests', includes: 'xgboost'
deleteDir()
}
}
def BuildCUDA(args) {
node('linux && cpu') {
unstash name: 'srcs'
echo "Build with CUDA ${args.cuda_version}"
def container_type = "gpu_build"
def docker_binary = "docker"
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
sh """
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_CUDA=ON -DUSE_NCCL=ON -DOPEN_MP:BOOL=ON
${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal"
${dockerRun} ${container_type} ${docker_binary} ${docker_args} python3 tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux1_x86_64
"""
// Stash wheel for CUDA 9.0 target
if (args.cuda_version == '9.0') {
echo 'Stashing Python wheel...'
stash name: 'xgboost_whl_cuda9', includes: 'python-package/dist/*.whl'
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
echo 'Stashing C++ test executable (testxgboost)...'
stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost'
}
deleteDir()
}
}
def BuildJVMPackages(args) {
node('linux && cpu') {
unstash name: 'srcs'
echo "Build XGBoost4J-Spark with Spark ${args.spark_version}"
def container_type = "jvm"
def docker_binary = "docker"
// Use only 4 CPU cores
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='--cpuset-cpus 0-3'"
sh """
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_packages.sh ${args.spark_version}
"""
echo 'Stashing XGBoost4J JAR...'
stash name: 'xgboost4j_jar', includes: 'jvm-packages/xgboost4j/target/*.jar,jvm-packages/xgboost4j-spark/target/*.jar,jvm-packages/xgboost4j-example/target/*.jar'
deleteDir()
}
}
def BuildJVMDoc() {
node('linux && cpu') {
unstash name: 'srcs'
echo "Building JVM doc..."
def container_type = "jvm"
def docker_binary = "docker"
sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_doc.sh ${BRANCH_NAME}
"""
echo 'Uploading doc...'
s3Upload file: "jvm-packages/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${BRANCH_NAME}.tar.bz2"
deleteDir()
}
}
def TestPythonCPU() {
node('linux && cpu') {
unstash name: 'xgboost_whl_cuda9'
unstash name: 'srcs'
echo "Test Python CPU"
def container_type = "cpu"
def docker_binary = "docker"
sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu-py35
"""
deleteDir()
}
}
def TestPythonGPU(args) {
nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu'
def test_suite = conf["withGpu"] ? (conf["multiGpu"] ? "mgpu" : "gpu") : "cpu"
// Build node - this is returned result
retry(3) {
node(nodeReq) {
unstash name: 'xgboost_whl_cuda9'
unstash name: 'srcs'
echo "Test Python GPU: CUDA ${args.cuda_version}"
def container_type = "gpu"
def docker_binary = "nvidia-docker"
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
if (args.multi_gpu) {
echo "Using multiple GPUs"
echo """
|===== XGBoost CMake build =====
| dockerTarget: ${dockerTarget}
| cmakeOpts : ${opts}
|=========================
""".stripMargin('|')
// Invoke command inside docker
sh """
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh mgpu
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/build_via_cmake.sh ${opts}
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/test_${test_suite}.sh
"""
} else {
echo "Using a single GPU"
if (!conf["multiGpu"]) {
sh """
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh gpu
${dockerRun} ${dockerTarget} ${dockerArgs} bash -c "cd python-package; rm -f dist/*; python setup.py bdist_wheel --universal"
rm -rf "${distDir}"; mkdir -p "${distDir}/py"
cp xgboost "${distDir}"
cp -r python-package/dist "${distDir}/py"
# Test the wheel for compatibility on a barebones CPU container
${dockerRun} release ${dockerArgs} bash -c " \
pip install --user python-package/dist/xgboost-*-none-any.whl && \
python -m nose -v tests/python"
# Test the wheel for compatibility on CUDA 10.0 container
${dockerRun} gpu --build-arg CUDA_VERSION=10.0 bash -c " \
pip install --user python-package/dist/xgboost-*-none-any.whl && \
python -m nose -v --eval-attr='(not slow) and (not mgpu)' tests/python-gpu"
"""
}
// For CUDA 10.0 target, run cuDF tests too
if (args.cuda_version == '10.0') {
echo "Running tests with cuDF..."
sh """
${dockerRun} cudf ${docker_binary} ${docker_args} tests/ci_build/test_python.sh cudf
"""
}
deleteDir()
}
}
def TestCppRabit() {
node(nodeReq) {
unstash name: 'xgboost_rabit_tests'
unstash name: 'srcs'
echo "Test C++, rabit mock on"
def container_type = "cpu"
def docker_binary = "docker"
sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/runxgb.sh xgboost tests/ci_build/approx.conf.in
"""
deleteDir()
}
}
def TestCppGPU(args) {
nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu'
node(nodeReq) {
unstash name: 'xgboost_cpp_tests'
unstash name: 'srcs'
echo "Test C++, CUDA ${args.cuda_version}"
def container_type = "gpu"
def docker_binary = "nvidia-docker"
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
if (args.multi_gpu) {
echo "Using multiple GPUs"
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost --gtest_filter=*.MGPU_*"
} else {
echo "Using a single GPU"
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost --gtest_filter=-*.MGPU_*"
}
deleteDir()
}
}
def CrossTestJVMwithJDK(args) {
node('linux && cpu') {
unstash name: 'xgboost4j_jar'
unstash name: 'srcs'
if (args.spark_version != null) {
echo "Test XGBoost4J on a machine with JDK ${args.jdk_version}, Spark ${args.spark_version}"
} else {
echo "Test XGBoost4J on a machine with JDK ${args.jdk_version}"
}
def container_type = "jvm_cross"
def docker_binary = "docker"
def spark_arg = (args.spark_version != null) ? "--build-arg SPARK_VERSION=${args.spark_version}" : ""
def docker_args = "--build-arg JDK_VERSION=${args.jdk_version} ${spark_arg}"
// Run integration tests only when spark_version is given
def docker_extra_params = (args.spark_version != null) ? "CI_DOCKER_EXTRA_PARAMS_INIT='-e RUN_INTEGRATION_TEST=1'" : ""
sh """
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_jvm_cross.sh
"""
deleteDir()
}
}
def TestR(args) {
node('linux && cpu') {
unstash name: 'srcs'
echo "Test R package"
def container_type = "rproject"
def docker_binary = "docker"
def use_r35_flag = (args.use_r35) ? "1" : "0"
def docker_args = "--build-arg USE_R35=${use_r35_flag}"
sh """
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_test_rpkg.sh || tests/ci_build/print_r_stacktrace.sh
"""
deleteDir()
}
}
def DeployJVMPackages(args) {
node('linux && cpu') {
unstash name: 'srcs'
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
echo 'Deploying to xgboost-maven-repo S3 repo...'
def container_type = "jvm"
def docker_binary = "docker"
sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/deploy_jvm_packages.sh ${args.spark_version}
"""
}
deleteDir()
}
}

123
Jenkinsfile-restricted Normal file
View File

@@ -0,0 +1,123 @@
#!/usr/bin/groovy
// -*- mode: groovy -*-
// Jenkins pipeline
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
import groovy.transform.Field
/* Restricted tasks: tasks generating artifacts, such as binary wheels and
documentation */
// Command to run command inside a docker container
def dockerRun = 'tests/ci_build/ci_build.sh'
// Utility functions
@Field
def utils
@Field
def commit_id
@Field
def branch_name
def buildMatrix = [
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "9.2" ],
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": false, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
]
pipeline {
// Each stage specify its own agent
agent none
// Setup common job properties
options {
ansiColor('xterm')
timestamps()
timeout(time: 120, unit: 'MINUTES')
buildDiscarder(logRotator(numToKeepStr: '10'))
}
// Build stages
stages {
stage('Jenkins: Get sources') {
agent {
label 'restricted'
}
steps {
script {
utils = load('tests/ci_build/jenkins_tools.Groovy')
utils.checkoutSrcs()
commit_id = "${GIT_COMMIT}"
branch_name = "${GIT_LOCAL_BRANCH}"
}
stash name: 'srcs', excludes: '.git/'
milestone label: 'Sources ready', ordinal: 1
}
}
stage('Jenkins: Build doc') {
steps {
script {
retry(3) {
node('linux && cpu && restricted') {
unstash name: 'srcs'
echo 'Building doc...'
dir ('jvm-packages') {
sh "bash ./build_doc.sh ${commit_id}"
archiveArtifacts artifacts: "${commit_id}.tar.bz2", allowEmptyArchive: true
echo 'Deploying doc...'
withAWS(credentials:'xgboost-doc-bucket') {
s3Upload file: "${commit_id}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${branch_name}.tar.bz2"
}
}
}
}
}
}
}
stage('Jenkins: Build artifacts') {
steps {
script {
parallel (buildMatrix.findAll{it['enabled']}.collectEntries{ c ->
def buildName = utils.getBuildName(c)
utils.buildFactory(buildName, c, true, this.&buildPlatformCmake)
})
}
}
}
}
}
/**
* Build platform and test it via cmake.
*/
def buildPlatformCmake(buildName, conf, nodeReq, dockerTarget) {
def opts = utils.cmakeOptions(conf)
// Destination dir for artifacts
def distDir = "dist/${buildName}"
def dockerArgs = ""
if(conf["withGpu"]){
dockerArgs = "--build-arg CUDA_VERSION=" + conf["cudaVersion"]
}
// Build node - this is returned result
retry(3) {
node(nodeReq) {
unstash name: 'srcs'
echo """
|===== XGBoost CMake build =====
| dockerTarget: ${dockerTarget}
| cmakeOpts : ${opts}
|=========================
""".stripMargin('|')
// Invoke command inside docker
sh """
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/build_via_cmake.sh ${opts}
${dockerRun} ${dockerTarget} ${dockerArgs} bash -c "cd python-package; rm -f dist/*; python setup.py bdist_wheel --universal"
rm -rf "${distDir}"; mkdir -p "${distDir}/py"
cp xgboost "${distDir}"
cp -r lib "${distDir}"
cp -r python-package/dist "${distDir}/py"
"""
archiveArtifacts artifacts: "${distDir}/**/*.*", allowEmptyArchive: true
}
}
}

View File

@@ -1,141 +0,0 @@
#!/usr/bin/groovy
// -*- mode: groovy -*-
/* Jenkins pipeline for Windows AMD64 target */
import groovy.transform.Field
@Field
def commit_id // necessary to pass a variable from one stage to another
pipeline {
agent none
// Build stages
stages {
stage('Jenkins Win64: Get sources') {
agent { label 'win64 && build' }
steps {
script {
checkoutSrcs()
commit_id = "${GIT_COMMIT}"
}
stash name: 'srcs'
milestone ordinal: 1
}
}
stage('Jenkins Win64: Build') {
agent none
steps {
script {
parallel ([
'build-win64-cuda9.0': { BuildWin64() }
])
}
milestone ordinal: 2
}
}
stage('Jenkins Win64: Test') {
agent none
steps {
script {
parallel ([
'test-win64-cpu': { TestWin64CPU() },
'test-win64-gpu-cuda9.0': { TestWin64GPU(cuda_target: 'cuda9') },
'test-win64-gpu-cuda10.0': { TestWin64GPU(cuda_target: 'cuda10_0') },
'test-win64-gpu-cuda10.1': { TestWin64GPU(cuda_target: 'cuda10_1') }
])
}
milestone ordinal: 3
}
}
}
}
// check out source code from git
def checkoutSrcs() {
retry(5) {
try {
timeout(time: 2, unit: 'MINUTES') {
checkout scm
sh 'git submodule update --init'
}
} catch (exc) {
deleteDir()
error "Failed to fetch source codes"
}
}
}
def BuildWin64() {
node('win64 && build') {
unstash name: 'srcs'
echo "Building XGBoost for Windows AMD64 target..."
bat "nvcc --version"
bat """
mkdir build
cd build
cmake .. -G"Visual Studio 15 2017 Win64" -DUSE_CUDA=ON -DCMAKE_VERBOSE_MAKEFILE=ON -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON
"""
bat """
cd build
"C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\MSBuild\\15.0\\Bin\\MSBuild.exe" xgboost.sln /m /p:Configuration=Release /nodeReuse:false
"""
bat """
cd python-package
conda activate && python setup.py bdist_wheel --universal && for /R %%i in (dist\\*.whl) DO python ../tests/ci_build/rename_whl.py "%%i" ${commit_id} win_amd64
"""
echo "Insert vcomp140.dll (OpenMP runtime) into the wheel..."
bat """
cd python-package\\dist
COPY /B ..\\..\\tests\\ci_build\\insert_vcomp140.py
conda activate && python insert_vcomp140.py *.whl
"""
echo 'Stashing Python wheel...'
stash name: 'xgboost_whl', includes: 'python-package/dist/*.whl'
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
echo 'Stashing C++ test executable (testxgboost)...'
stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost.exe'
deleteDir()
}
}
def TestWin64CPU() {
node('win64 && cpu') {
unstash name: 'srcs'
unstash name: 'xgboost_whl'
echo "Test Win64 CPU"
echo "Installing Python wheel..."
bat "conda activate && (python -m pip uninstall -y xgboost || cd .)"
bat """
conda activate && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i"
"""
echo "Running Python tests..."
bat "conda activate && python -m pytest -v -s --fulltrace tests\\python"
bat "conda activate && python -m pip uninstall -y xgboost"
deleteDir()
}
}
def TestWin64GPU(args) {
node("win64 && gpu && ${args.cuda_target}") {
unstash name: 'srcs'
unstash name: 'xgboost_whl'
unstash name: 'xgboost_cpp_tests'
echo "Test Win64 GPU (${args.cuda_target})"
bat "nvcc --version"
echo "Running C++ tests..."
bat "build\\testxgboost.exe"
echo "Installing Python wheel..."
bat "conda activate && (python -m pip uninstall -y xgboost || cd .)"
bat """
conda activate && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i"
"""
echo "Running Python tests..."
bat """
conda activate && python -m pytest -v -s --fulltrace -m "(not slow) and (not mgpu)" tests\\python-gpu
"""
bat "conda activate && python -m pip uninstall -y xgboost"
deleteDir()
}
}

View File

@@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright (c) 2019 by Contributors
Copyright (c) 2018 by Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -42,6 +42,11 @@ ifeq ($(USE_OPENMP), 0)
endif
include $(DMLC_CORE)/make/dmlc.mk
# include the plugins
ifdef XGB_PLUGINS
include $(XGB_PLUGINS)
endif
# set compiler defaults for OSX versus *nix
# let people override either
OS := $(shell uname)
@@ -62,8 +67,8 @@ export CXX = g++
endif
endif
export LDFLAGS= -pthread -lm $(ADD_LDFLAGS) $(DMLC_LDFLAGS)
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++11 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS)
export LDFLAGS= -pthread -lm $(ADD_LDFLAGS) $(DMLC_LDFLAGS) $(PLUGIN_LDFLAGS)
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++11 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS) $(PLUGIN_CFLAGS)
CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include -I$(GTEST_PATH)/include
#java include path
export JAVAINCFLAGS = -I${JAVA_HOME}/include -I./java
@@ -125,7 +130,7 @@ $(RABIT)/lib/$(LIB_RABIT): $(wildcard $(RABIT)/src/*.cc)
jvm: jvm-packages/lib/libxgboost4j.so
SRC = $(wildcard src/*.cc src/*/*.cc)
ALL_OBJ = $(patsubst src/%.cc, build/%.o, $(SRC))
ALL_OBJ = $(patsubst src/%.cc, build/%.o, $(SRC)) $(PLUGIN_OBJS)
AMALGA_OBJ = amalgamation/xgboost-all0.o
LIB_DEP = $(DMLC_CORE)/libdmlc.a $(RABIT)/lib/$(LIB_RABIT)
ALL_DEP = $(filter-out build/cli_main.o, $(ALL_OBJ)) $(LIB_DEP)
@@ -137,6 +142,11 @@ build/%.o: src/%.cc
$(CXX) $(CFLAGS) -MM -MT build/$*.o $< >build/$*.d
$(CXX) -c $(CFLAGS) $< -o $@
build_plugin/%.o: plugin/%.cc
@mkdir -p $(@D)
$(CXX) $(CFLAGS) -MM -MT build_plugin/$*.o $< >build_plugin/$*.d
$(CXX) -c $(CFLAGS) $< -o $@
# The should be equivalent to $(ALL_OBJ) except for build/cli_main.o
amalgamation/xgboost-all0.o: amalgamation/xgboost-all0.cc
$(CXX) -c $(CFLAGS) $< -o $@
@@ -163,14 +173,10 @@ xgboost: $(CLI_OBJ) $(ALL_DEP)
$(CXX) $(CFLAGS) -o $@ $(filter %.o %.a, $^) $(LDFLAGS)
rcpplint:
python3 dmlc-core/scripts/lint.py xgboost ${LINT_LANG} R-package/src
python2 dmlc-core/scripts/lint.py xgboost ${LINT_LANG} R-package/src
lint: rcpplint
python3 dmlc-core/scripts/lint.py --exclude_path python-package/xgboost/dmlc-core \
python-package/xgboost/include python-package/xgboost/lib \
python-package/xgboost/make python-package/xgboost/rabit \
python-package/xgboost/src --pylint-rc ${PWD}/python-package/.pylintrc xgboost \
${LINT_LANG} include src python-package
python2 dmlc-core/scripts/lint.py xgboost ${LINT_LANG} include src plugin python-package
pylint:
flake8 --ignore E501 python-package
@@ -190,7 +196,7 @@ cover: check
endif
clean:
$(RM) -rf build lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o #xgboost
$(RM) -rf build build_plugin lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o #xgboost
$(RM) -rf build_tests *.gcov tests/cpp/xgboost_test
if [ -d "R-package/src" ]; then \
cd R-package/src; \
@@ -221,9 +227,7 @@ pippack: clean_all
rm -rf python-package/xgboost/rabit
rm -rf python-package/xgboost/src
cp -r python-package xgboost-python
cp -r CMakeLists.txt xgboost-python/xgboost/
cp -r cmake xgboost-python/xgboost/
cp -r plugin xgboost-python/xgboost/
cp -r Makefile xgboost-python/xgboost/
cp -r make xgboost-python/xgboost/
cp -r src xgboost-python/xgboost/
cp -r tests xgboost-python/xgboost/
@@ -254,17 +258,9 @@ Rpack: clean_all
cp -r dmlc-core/include xgboost/src/dmlc-core/include
cp -r dmlc-core/src xgboost/src/dmlc-core/src
cp ./LICENSE xgboost
# Modify PKGROOT in Makevars.in
cat R-package/src/Makevars.in|sed '2s/.*/PKGROOT=./' > xgboost/src/Makevars.in
# Configure Makevars.win (Windows-specific Makevars, likely using MinGW)
cat R-package/src/Makevars.in|sed '2s/.*/PKGROOT=./' | sed '3s/.*/ENABLE_STD_THREAD=0/' > xgboost/src/Makevars.in
cp xgboost/src/Makevars.in xgboost/src/Makevars.win
cat xgboost/src/Makevars.in| sed '3s/.*/ENABLE_STD_THREAD=0/' > xgboost/src/Makevars.win
sed -i -e 's/@OPENMP_CXXFLAGS@/$$\(SHLIB_OPENMP_CXXFLAGS\)/g' xgboost/src/Makevars.win
sed -i -e 's/-pthread/$$\(SHLIB_PTHREAD_FLAGS\)/g' xgboost/src/Makevars.win
sed -i -e 's/@ENDIAN_FLAG@/-DDMLC_CMAKE_LITTLE_ENDIAN=1/g' xgboost/src/Makevars.win
sed -i -e 's/@BACKTRACE_LIB@//g' xgboost/src/Makevars.win
sed -i -e 's/@OPENMP_LIB@//g' xgboost/src/Makevars.win
rm -f xgboost/src/Makevars.win-e # OSX sed create this extra file; remove it
sed -i -e 's/@OPENMP_CXXFLAGS@/$$\(SHLIB_OPENMP_CFLAGS\)/g' xgboost/src/Makevars.win
bash R-package/remove_warning_suppression_pragma.sh
rm xgboost/remove_warning_suppression_pragma.sh
@@ -277,3 +273,4 @@ Rcheck: Rbuild
-include build/*.d
-include build/*/*.d
-include build_plugin/*/*.d

297
NEWS.md
View File

@@ -3,301 +3,6 @@ XGBoost Change Log
This file records the changes in xgboost library in reverse chronological order.
## v0.90 (2019.05.18)
### XGBoost Python package drops Python 2.x (#4379, #4381)
Python 2.x is reaching its end-of-life at the end of this year. [Many scientific Python packages are now moving to drop Python 2.x](https://python3statement.org/).
### XGBoost4J-Spark now requires Spark 2.4.x (#4377)
* Spark 2.3 is reaching its end-of-life soon. See discussion at #4389.
* **Consistent handling of missing values** (#4309, #4349, #4411): Many users had reported issue with inconsistent predictions between XGBoost4J-Spark and the Python XGBoost package. The issue was caused by Spark mis-handling non-zero missing values (NaN, -1, 999 etc). We now alert the user whenever Spark doesn't handle missing values correctly (#4309, #4349). See [the tutorial for dealing with missing values in XGBoost4J-Spark](https://xgboost.readthedocs.io/en/release_0.90/jvm/xgboost4j_spark_tutorial.html#dealing-with-missing-values). This fix also depends on the availability of Spark 2.4.x.
### Roadmap: better performance scaling for multi-core CPUs (#4310)
* Poor performance scaling of the `hist` algorithm for multi-core CPUs has been under investigation (#3810). #4310 optimizes quantile sketches and other pre-processing tasks. Special thanks to @SmirnovEgorRu.
### Roadmap: Harden distributed training (#4250)
* Make distributed training in XGBoost more robust by hardening [Rabit](https://github.com/dmlc/rabit), which implements [the AllReduce primitive](https://en.wikipedia.org/wiki/Reduce_%28parallel_pattern%29). In particular, improve test coverage on mechanisms for fault tolerance and recovery. Special thanks to @chenqin.
### New feature: Multi-class metric functions for GPUs (#4368)
* Metrics for multi-class classification have been ported to GPU: `merror`, `mlogloss`. Special thanks to @trivialfis.
* With supported metrics, XGBoost will select the correct devices based on your system and `n_gpus` parameter.
### New feature: Scikit-learn-like random forest API (#4148, #4255, #4258)
* XGBoost Python package now offers `XGBRFClassifier` and `XGBRFRegressor` API to train random forests. See [the tutorial](https://xgboost.readthedocs.io/en/release_0.90/tutorials/rf.html). Special thanks to @canonizer
### New feature: use external memory in GPU predictor (#4284, #4396, #4438, #4457)
* It is now possible to make predictions on GPU when the input is read from external memory. This is useful when you want to make predictions with big dataset that does not fit into the GPU memory. Special thanks to @rongou, @canonizer, @sriramch.
```python
dtest = xgboost.DMatrix('test_data.libsvm#dtest.cache')
bst.set_param('predictor', 'gpu_predictor')
bst.predict(dtest)
```
* Coming soon: GPU training (`gpu_hist`) with external memory
### New feature: XGBoost can now handle comments in LIBSVM files (#4430)
* Special thanks to @trivialfis and @hcho3
### New feature: Embed XGBoost in your C/C++ applications using CMake (#4323, #4333, #4453)
* It is now easier than ever to embed XGBoost in your C/C++ applications. In your CMakeLists.txt, add `xgboost::xgboost` as a linked library:
```cmake
find_package(xgboost REQUIRED)
add_executable(api-demo c-api-demo.c)
target_link_libraries(api-demo xgboost::xgboost)
```
[XGBoost C API documentation is available.](https://xgboost.readthedocs.io/en/release_0.90/dev) Special thanks to @trivialfis
### Performance improvements
* Use feature interaction constraints to narrow split search space (#4341, #4428)
* Additional optimizations for `gpu_hist` (#4248, #4283)
* Reduce OpenMP thread launches in `gpu_hist` (#4343)
* Additional optimizations for multi-node multi-GPU random forests. (#4238)
* Allocate unique prediction buffer for each input matrix, to avoid re-sizing GPU array (#4275)
* Remove various synchronisations from CUDA API calls (#4205)
* XGBoost4J-Spark
- Allow the user to control whether to cache partitioned training data, to potentially reduce execution time (#4268)
### Bug-fixes
* Fix node reuse in `hist` (#4404)
* Fix GPU histogram allocation (#4347)
* Fix matrix attributes not sliced (#4311)
* Revise AUC and AUCPR metrics now work with weighted ranking task (#4216, #4436)
* Fix timer invocation for InitDataOnce() in `gpu_hist` (#4206)
* Fix R-devel errors (#4251)
* Make gradient update in GPU linear updater thread-safe (#4259)
* Prevent out-of-range access in column matrix (#4231)
* Don't store DMatrix handle in Python object until it's initialized, to improve exception safety (#4317)
* XGBoost4J-Spark
- Fix non-deterministic order within a zipped partition on prediction (#4388)
- Remove race condition on tracker shutdown (#4224)
- Allow set the parameter `maxLeaves`. (#4226)
- Allow partial evaluation of dataframe before prediction (#4407)
- Automatically set `maximize_evaluation_metrics` if not explicitly given (#4446)
### API changes
* Deprecate `reg:linear` in favor of `reg:squarederror`. (#4267, #4427)
* Add attribute getter and setter to the Booster object in XGBoost4J (#4336)
### Maintenance: Refactor C++ code for legibility and maintainability
* Fix clang-tidy warnings. (#4149)
* Remove deprecated C APIs. (#4266)
* Use Monitor class to time functions in `hist`. (#4273)
* Retire DVec class in favour of c++20 style span for device memory. (#4293)
* Improve HostDeviceVector exception safety (#4301)
### Maintenance: testing, continuous integration, build system
* **Major refactor of CMakeLists.txt** (#4323, #4333, #4453): adopt modern CMake and export XGBoost as a target
* **Major improvement in Jenkins CI pipeline** (#4234)
- Migrate all Linux tests to Jenkins (#4401)
- Builds and tests are now de-coupled, to test an artifact against multiple versions of CUDA, JDK, and other dependencies (#4401)
- Add Windows GPU to Jenkins CI pipeline (#4463, #4469)
* Support CUDA 10.1 (#4223, #4232, #4265, #4468)
* Python wheels are now built with CUDA 9.0, so that JIT is not required on Volta architecture (#4459)
* Integrate with NVTX CUDA profiler (#4205)
* Add a test for cpu predictor using external memory (#4308)
* Refactor tests to get rid of duplication (#4358)
* Remove test dependency on `craigcitro/r-travis`, since it's deprecated (#4353)
* Add files from local R build to `.gitignore` (#4346)
* Make XGBoost4J compatible with Java 9+ by revising NativeLibLoader (#4351)
* Jenkins build for CUDA 10.0 (#4281)
* Remove remaining `silent` and `debug_verbose` in Python tests (#4299)
* Use all cores to build XGBoost4J lib on linux (#4304)
* Upgrade Jenkins Linux build environment to GCC 5.3.1, CMake 3.6.0 (#4306)
* Make CMakeLists.txt compatible with CMake 3.3 (#4420)
* Add OpenMP option in CMakeLists.txt (#4339)
* Get rid of a few trivial compiler warnings (#4312)
* Add external Docker build cache, to speed up builds on Jenkins CI (#4331, #4334, #4458)
* Fix Windows tests (#4403)
* Fix a broken python test (#4395)
* Use a fixed seed to split data in XGBoost4J-Spark tests, for reproducibility (#4417)
* Add additional Python tests to test training under constraints (#4426)
* Enable building with shared NCCL. (#4447)
### Usability Improvements, Documentation
* Document limitation of one-split-at-a-time Greedy tree learning heuristic (#4233)
* Update build doc: PyPI wheel now support multi-GPU (#4219)
* Fix docs for `num_parallel_tree` (#4221)
* Fix document about `colsample_by*` parameter (#4340)
* Make the train and test input with same colnames. (#4329)
* Update R contribute link. (#4236)
* Fix travis R tests (#4277)
* Log version number in crash log in XGBoost4J-Spark (#4271, #4303)
* Allow supression of Rabit output in Booster::train in XGBoost4J (#4262)
* Add tutorial on handling missing values in XGBoost4J-Spark (#4425)
* Fix typos (#4345, #4393, #4432, #4435)
* Added language classifier in setup.py (#4327)
* Added Travis CI badge (#4344)
* Add BentoML to use case section (#4400)
* Remove subtly sexist remark (#4418)
* Add R vignette about parsing JSON dumps (#4439)
### Acknowledgement
**Contributors**: Nan Zhu (@CodingCat), Adam Pocock (@Craigacp), Daniel Hen (@Daniel8hen), Jiaxiang Li (@JiaxiangBU), Rory Mitchell (@RAMitchell), Egor Smirnov (@SmirnovEgorRu), Andy Adinets (@canonizer), Jonas (@elcombato), Harry Braviner (@harrybraviner), Philip Hyunsu Cho (@hcho3), Tong He (@hetong007), James Lamb (@jameslamb), Jean-Francois Zinque (@jeffzi), Yang Yang (@jokerkeny), Mayank Suman (@mayanksuman), jess (@monkeywithacupcake), Hajime Morrita (@omo), Ravi Kalia (@project-delphi), @ras44, Rong Ou (@rongou), Shaochen Shi (@shishaochen), Xu Xiao (@sperlingxx), @sriramch, Jiaming Yuan (@trivialfis), Christopher Suchanek (@wsuchy), Bozhao (@yubozhao)
**Reviewers**: Nan Zhu (@CodingCat), Adam Pocock (@Craigacp), Daniel Hen (@Daniel8hen), Jiaxiang Li (@JiaxiangBU), Laurae (@Laurae2), Rory Mitchell (@RAMitchell), Egor Smirnov (@SmirnovEgorRu), @alois-bissuel, Andy Adinets (@canonizer), Chen Qin (@chenqin), Harry Braviner (@harrybraviner), Philip Hyunsu Cho (@hcho3), Tong He (@hetong007), @jakirkham, James Lamb (@jameslamb), Julien Schueller (@jschueller), Mayank Suman (@mayanksuman), Hajime Morrita (@omo), Rong Ou (@rongou), Sara Robinson (@sararob), Shaochen Shi (@shishaochen), Xu Xiao (@sperlingxx), @sriramch, Sean Owen (@srowen), Sergei Lebedev (@superbobry), Yuan (Terry) Tang (@terrytangyuan), Theodore Vasiloudis (@thvasilo), Matthew Tovbin (@tovbinm), Jiaming Yuan (@trivialfis), Xin Yin (@xydrolase)
## v0.82 (2019.03.03)
This release is packed with many new features and bug fixes.
### Roadmap: better performance scaling for multi-core CPUs (#3957)
* Poor performance scaling of the `hist` algorithm for multi-core CPUs has been under investigation (#3810). #3957 marks an important step toward better performance scaling, by using software pre-fetching and replacing STL vectors with C-style arrays. Special thanks to @Laurae2 and @SmirnovEgorRu.
* See #3810 for latest progress on this roadmap.
### New feature: Distributed Fast Histogram Algorithm (`hist`) (#4011, #4102, #4140, #4128)
* It is now possible to run the `hist` algorithm in distributed setting. Special thanks to @CodingCat. The benefits include:
1. Faster local computation via feature binning
2. Support for monotonic constraints and feature interaction constraints
3. Simpler codebase than `approx`, allowing for future improvement
* Depth-wise tree growing is now performed in a separate code path, so that cross-node syncronization is performed only once per level.
### New feature: Multi-Node, Multi-GPU training (#4095)
* Distributed training is now able to utilize clusters equipped with NVIDIA GPUs. In particular, the rabit AllReduce layer will communicate GPU device information. Special thanks to @mt-jones, @RAMitchell, @rongou, @trivialfis, @canonizer, and @jeffdk.
* Resource management systems will be able to assign a rank for each GPU in the cluster.
* In Dask, users will be able to construct a collection of XGBoost processes over an inhomogeneous device cluster (i.e. workers with different number and/or kinds of GPUs).
### New feature: Multiple validation datasets in XGBoost4J-Spark (#3904, #3910)
* You can now track the performance of the model during training with multiple evaluation datasets. By specifying `eval_sets` or call `setEvalSets` over a `XGBoostClassifier` or `XGBoostRegressor`, you can pass in multiple evaluation datasets typed as a `Map` from `String` to `DataFrame`. Special thanks to @CodingCat.
* See the usage of multiple validation datasets [here](https://github.com/dmlc/xgboost/blob/0c1d5f1120c0a159f2567b267f0ec4ffadee00d0/jvm-packages/xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/spark/SparkTraining.scala#L66-L78)
### New feature: Additional metric functions for GPUs (#3952)
* Element-wise metrics have been ported to GPU: `rmse`, `mae`, `logloss`, `poisson-nloglik`, `gamma-deviance`, `gamma-nloglik`, `error`, `tweedie-nloglik`. Special thanks to @trivialfis and @RAMitchell.
* With supported metrics, XGBoost will select the correct devices based on your system and `n_gpus` parameter.
### New feature: Column sampling at individual nodes (splits) (#3971)
* Columns (features) can now be sampled at individual tree nodes, in addition to per-tree and per-level sampling. To enable per-node sampling, set `colsample_bynode` parameter, which represents the fraction of columns sampled at each node. This parameter is set to 1.0 by default (i.e. no sampling per node). Special thanks to @canonizer.
* The `colsample_bynode` parameter works cumulatively with other `colsample_by*` parameters: for example, `{'colsample_bynode':0.5, 'colsample_bytree':0.5}` with 100 columns will give 25 features to choose from at each split.
### Major API change: consistent logging level via `verbosity` (#3982, #4002, #4138)
* XGBoost now allows fine-grained control over logging. You can set `verbosity` to 0 (silent), 1 (warning), 2 (info), and 3 (debug). This is useful for controlling the amount of logging outputs. Special thanks to @trivialfis.
* Parameters `silent` and `debug_verbose` are now deprecated.
* Note: Sometimes XGBoost tries to change configurations based on heuristics, which is displayed as warning message. If there's unexpected behaviour, please try to increase value of verbosity.
### Major bug fix: external memory (#4040, #4193)
* Clarify object ownership in multi-threaded prefetcher, to avoid memory error.
* Correctly merge two column batches (which uses [CSC layout](https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_column_(CSC_or_CCS))).
* Add unit tests for external memory.
* Special thanks to @trivialfis and @hcho3.
### Major bug fix: early stopping fixed in XGBoost4J and XGBoost4J-Spark (#3928, #4176)
* Early stopping in XGBoost4J and XGBoost4J-Spark is now consistent with its counterpart in the Python package. Training stops if the current iteration is `earlyStoppingSteps` away from the best iteration. If there are multiple evaluation sets, only the last one is used to determinate early stop.
* See the updated documentation [here](https://xgboost.readthedocs.io/en/release_0.82/jvm/xgboost4j_spark_tutorial.html#early-stopping)
* Special thanks to @CodingCat, @yanboliang, and @mingyang.
### Major bug fix: infrequent features should not crash distributed training (#4045)
* For infrequently occuring features, some partitions may not get any instance. This scenario used to crash distributed training due to mal-formed ranges. The problem has now been fixed.
* In practice, one-hot-encoded categorical variables tend to produce rare features, particularly when the cardinality is high.
* Special thanks to @CodingCat.
### Performance improvements
* Faster, more space-efficient radix sorting in `gpu_hist` (#3895)
* Subtraction trick in histogram calculation in `gpu_hist` (#3945)
* More performant re-partition in XGBoost4J-Spark (#4049)
### Bug-fixes
* Fix semantics of `gpu_id` when running multiple XGBoost processes on a multi-GPU machine (#3851)
* Fix page storage path for external memory on Windows (#3869)
* Fix configuration setup so that DART utilizes GPU (#4024)
* Eliminate NAN values from SHAP prediction (#3943)
* Prevent empty quantile sketches in `hist` (#4155)
* Enable running objectives with 0 GPU (#3878)
* Parameters are no longer dependent on system locale (#3891, #3907)
* Use consistent data type in the GPU coordinate descent code (#3917)
* Remove undefined behavior in the CLI config parser on the ARM platform (#3976)
* Initialize counters in GPU AllReduce (#3987)
* Prevent deadlocks in GPU AllReduce (#4113)
* Load correct values from sliced NumPy arrays (#4147, #4165)
* Fix incorrect GPU device selection (#4161)
* Make feature binning logic in `hist` aware of query groups when running a ranking task (#4115). For ranking task, query groups are weighted, not individual instances.
* Generate correct C++ exception type for `LOG(FATAL)` macro (#4159)
* Python package
- Python package should run on system without `PATH` environment variable (#3845)
- Fix `coef_` and `intercept_` signature to be compatible with `sklearn.RFECV` (#3873)
- Use UTF-8 encoding in Python package README, to support non-English locale (#3867)
- Add AUC-PR to list of metrics to maximize for early stopping (#3936)
- Allow loading pickles without `self.booster` attribute, for backward compatibility (#3938, #3944)
- White-list DART for feature importances (#4073)
- Update usage of [h2oai/datatable](https://github.com/h2oai/datatable) (#4123)
* XGBoost4J-Spark
- Address scalability issue in prediction (#4033)
- Enforce the use of per-group weights for ranking task (#4118)
- Fix vector size of `rawPredictionCol` in `XGBoostClassificationModel` (#3932)
- More robust error handling in Spark tracker (#4046, #4108)
- Fix return type of `setEvalSets` (#4105)
- Return correct value of `getMaxLeaves` (#4114)
### API changes
* Add experimental parameter `single_precision_histogram` to use single-precision histograms for the `gpu_hist` algorithm (#3965)
* Python package
- Add option to select type of feature importances in the scikit-learn inferface (#3876)
- Add `trees_to_df()` method to dump decision trees as Pandas data frame (#4153)
- Add options to control node shapes in the GraphViz plotting function (#3859)
- Add `xgb_model` option to `XGBClassifier`, to load previously saved model (#4092)
- Passing lists into `DMatrix` is now deprecated (#3970)
* XGBoost4J
- Support multiple feature importance features (#3801)
### Maintenance: Refactor C++ code for legibility and maintainability
* Refactor `hist` algorithm code and add unit tests (#3836)
* Minor refactoring of split evaluator in `gpu_hist` (#3889)
* Removed unused leaf vector field in the tree model (#3989)
* Simplify the tree representation by combining `TreeModel` and `RegTree` classes (#3995)
* Simplify and harden tree expansion code (#4008, #4015)
* De-duplicate parameter classes in the linear model algorithms (#4013)
* Robust handling of ranges with C++20 span in `gpu_exact` and `gpu_coord_descent` (#4020, #4029)
* Simplify tree training code (#3825). Also use Span class for robust handling of ranges.
### Maintenance: testing, continuous integration, build system
* Disallow `std::regex` since it's not supported by GCC 4.8.x (#3870)
* Add multi-GPU tests for coordinate descent algorithm for linear models (#3893, #3974)
* Enforce naming style in Python lint (#3896)
* Refactor Python tests (#3897, #3901): Use pytest exclusively, display full trace upon failure
* Address `DeprecationWarning` when using Python collections (#3909)
* Use correct group for maven site plugin (#3937)
* Jenkins CI is now using on-demand EC2 instances exclusively, due to unreliability of Spot instances (#3948)
* Better GPU performance logging (#3945)
* Fix GPU tests on machines with only 1 GPU (#4053)
* Eliminate CRAN check warnings and notes (#3988)
* Add unit tests for tree serialization (#3989)
* Add unit tests for tree fitting functions in `hist` (#4155)
* Add a unit test for `gpu_exact` algorithm (#4020)
* Correct JVM CMake GPU flag (#4071)
* Fix failing Travis CI on Mac (#4086)
* Speed up Jenkins by not compiling CMake (#4099)
* Analyze C++ and CUDA code using clang-tidy, as part of Jenkins CI pipeline (#4034)
* Fix broken R test: Install Homebrew GCC (#4142)
* Check for empty datasets in GPU unit tests (#4151)
* Fix Windows compilation (#4139)
* Comply with latest convention of cpplint (#4157)
* Fix a unit test in `gpu_hist` (#4158)
* Speed up data generation in Python tests (#4164)
### Usability Improvements
* Add link to [InfoWorld 2019 Technology of the Year Award](https://www.infoworld.com/article/3336072/application-development/infoworlds-2019-technology-of-the-year-award-winners.html) (#4116)
* Remove outdated AWS YARN tutorial (#3885)
* Document current limitation in number of features (#3886)
* Remove unnecessary warning when `gblinear` is selected (#3888)
* Document limitation of CSV parser: header not supported (#3934)
* Log training parameters in XGBoost4J-Spark (#4091)
* Clarify early stopping behavior in the scikit-learn interface (#3967)
* Clarify behavior of `max_depth` parameter (#4078)
* Revise Python docstrings for ranking task (#4121). In particular, weights must be per-group in learning-to-rank setting.
* Document parameter `num_parallel_tree` (#4022)
* Add Jenkins status badge (#4090)
* Warn users against using internal functions of `Booster` object (#4066)
* Reformat `benchmark_tree.py` to comply with Python style convention (#4126)
* Clarify a comment in `objectiveTrait` (#4174)
* Fix typos and broken links in documentation (#3890, #3872, #3902, #3919, #3975, #4027, #4156, #4167)
### Acknowledgement
**Contributors** (in no particular order): Jiaming Yuan (@trivialfis), Hyunsu Cho (@hcho3), Nan Zhu (@CodingCat), Rory Mitchell (@RAMitchell), Yanbo Liang (@yanboliang), Andy Adinets (@canonizer), Tong He (@hetong007), Yuan Tang (@terrytangyuan)
**First-time Contributors** (in no particular order): Jelle Zijlstra (@JelleZijlstra), Jiacheng Xu (@jiachengxu), @ajing, Kashif Rasul (@kashif), @theycallhimavi, Joey Gao (@pjgao), Prabakaran Kumaresshan (@nixphix), Huafeng Wang (@huafengw), @lyxthe, Sam Wilkinson (@scwilkinson), Tatsuhito Kato (@stabacov), Shayak Banerjee (@shayakbanerjee), Kodi Arfer (@Kodiologist), @KyleLi1985, Egor Smirnov (@SmirnovEgorRu), @tmitanitky, Pasha Stetsenko (@st-pasha), Kenichi Nagahara (@keni-chi), Abhai Kollara Dilip (@abhaikollara), Patrick Ford (@pford221), @hshujuan, Matthew Jones (@mt-jones), Thejaswi Rao (@teju85), Adam November (@anovember)
**First-time Reviewers** (in no particular order): Mingyang Hu (@mingyang), Theodore Vasiloudis (@thvasilo), Jakub Troszok (@troszok), Rong Ou (@rongou), @Denisevi4, Matthew Jones (@mt-jones), Jeff Kaplan (@jeffdk)
## v0.81 (2018.11.04)
### New feature: feature interaction constraints
* Users are now able to control which features (independent variables) are allowed to interact by specifying feature interaction constraints (#3466).
@@ -474,7 +179,7 @@ This release is packed with many new features and bug fixes.
- Latest master: https://xgboost.readthedocs.io/en/latest
- 0.80 stable: https://xgboost.readthedocs.io/en/release_0.80
- 0.72 stable: https://xgboost.readthedocs.io/en/release_0.72
* Support for per-group weights in ranking objective (#3379)
* Ranking task now uses instance weights (#3379)
* Fix inaccurate decimal parsing (#3546)
* New functionality
- Query ID column support in LIBSVM data files (#2749). This is convenient for performing ranking task in distributed setting.

View File

@@ -1,34 +0,0 @@
find_package(LibR REQUIRED)
message(STATUS "LIBR_CORE_LIBRARY " ${LIBR_CORE_LIBRARY})
file(GLOB_RECURSE R_SOURCES
${CMAKE_CURRENT_LIST_DIR}/src/*.cc
${CMAKE_CURRENT_LIST_DIR}/src/*.c)
# Use object library to expose symbols
add_library(xgboost-r OBJECT ${R_SOURCES})
set(R_DEFINITIONS
-DXGBOOST_STRICT_R_MODE=1
-DXGBOOST_CUSTOMIZE_GLOBAL_PRNG=1
-DDMLC_LOG_BEFORE_THROW=0
-DDMLC_DISABLE_STDIN=1
-DDMLC_LOG_CUSTOMIZE=1
-DRABIT_CUSTOMIZE_MSG_
-DRABIT_STRICT_CXX98_)
target_compile_definitions(xgboost-r
PRIVATE ${R_DEFINITIONS})
target_include_directories(xgboost-r
PRIVATE
${LIBR_INCLUDE_DIRS}
${PROJECT_SOURCE_DIR}/include
${PROJECT_SOURCE_DIR}/dmlc-core/include
${PROJECT_SOURCE_DIR}/rabit/include)
set_target_properties(
xgboost-r PROPERTIES
CXX_STANDARD 11
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON)
set(XGBOOST_DEFINITIONS "${XGBOOST_DEFINITIONS};${R_DEFINITIONS}" PARENT_SCOPE)
set(XGBOOST_OBJ_SOURCES $<TARGET_OBJECTS:xgboost-r> PARENT_SCOPE)
set(LINKED_LIBRARIES_PRIVATE ${LINKED_LIBRARIES_PRIVATE} ${LIBR_CORE_LIBRARY} PARENT_SCOPE)

View File

@@ -1,8 +1,8 @@
Package: xgboost
Type: Package
Title: Extreme Gradient Boosting
Version: 1.0.0.1
Date: 2019-07-23
Version: 0.81.0.1
Date: 2018-08-13
Authors@R: c(
person("Tianqi", "Chen", role = c("aut"),
email = "tianqi.tchen@gmail.com"),
@@ -52,9 +52,7 @@ Suggests:
vcd (>= 1.3),
testthat,
lintr,
igraph (>= 1.0.1),
jsonlite,
float
igraph (>= 1.0.1)
Depends:
R (>= 3.3.0)
Imports:
@@ -63,5 +61,5 @@ Imports:
data.table (>= 1.9.6),
magrittr (>= 1.5),
stringi (>= 0.5.2)
RoxygenNote: 7.0.2
RoxygenNote: 6.1.0
SystemRequirements: GNU make, C++11

View File

@@ -14,7 +14,7 @@
#' WARNING: side-effects!!! Be aware that these callback functions access and modify things in
#' the environment from which they are called from, which is a fairly uncommon thing to do in R.
#'
#' To write a custom callback closure, make sure you first understand the main concepts about R environments.
#' To write a custom callback closure, make sure you first understand the main concepts about R envoronments.
#' Check either R documentation on \code{\link[base]{environment}} or the
#' \href{http://adv-r.had.co.nz/Environments.html}{Environments chapter} from the "Advanced R"
#' book by Hadley Wickham. Further, the best option is to read the code of some of the existing callbacks -
@@ -154,7 +154,7 @@ cb.evaluation.log <- function() {
callback
}
#' Callback closure for resetting the booster's parameters at each iteration.
#' Callback closure for restetting the booster's parameters at each iteration.
#'
#' @param new_params a list where each element corresponds to a parameter that needs to be reset.
#' Each element's value must be either a vector of values of length \code{nrounds}
@@ -470,7 +470,7 @@ cb.save.model <- function(save_period = 0, save_name = "xgboost.model") {
#' to the order of rows in the original dataset. Note that when a custom \code{folds} list is
#' provided in \code{xgb.cv}, the predictions would only be returned properly when this list is a
#' non-overlapping list of k sets of indices, as in a standard k-fold CV. The predictions would not be
#' meaningful when user-provided folds have overlapping indices as in, e.g., random sampling splits.
#' meaningful when user-profided folds have overlapping indices as in, e.g., random sampling splits.
#' When some of the indices in the training dataset are not included into user-provided \code{folds},
#' their prediction value would be \code{NA}.
#'
@@ -681,7 +681,7 @@ cb.gblinear.history <- function(sparse=FALSE) {
#' using the \code{cb.gblinear.history()} callback.
#' @param class_index zero-based class index to extract the coefficients for only that
#' specific class in a multinomial multiclass model. When it is NULL, all the
#' coefficients are returned. Has no effect in non-multiclass models.
#' coeffients are returned. Has no effect in non-multiclass models.
#'
#' @return
#' For an \code{xgb.train} result, a matrix (either dense or sparse) with the columns

View File

@@ -145,7 +145,7 @@ xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
if (is.null(obj)) {
.Call(XGBoosterUpdateOneIter_R, booster_handle, as.integer(iter), dtrain)
} else {
pred <- predict(booster_handle, dtrain, training = TRUE)
pred <- predict(booster_handle, dtrain)
gpair <- obj(pred, dtrain)
.Call(XGBoosterBoostOneIter_R, booster_handle, dtrain, gpair$grad, gpair$hess)
}
@@ -209,14 +209,13 @@ generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
if (exists('objective', where = params) &&
is.character(params$objective)) {
# If 'objective' provided in params, assume that y is a classification label
# unless objective is reg:squarederror
if (params$objective != 'reg:squarederror')
# unless objective is reg:linear
if (params$objective != 'reg:linear')
y <- factor(y)
} else {
# If no 'objective' given in params, it means that user either wants to
# use the default 'reg:squarederror' objective or has provided a custom
# obj function. Here, assume classification setting when y has 5 or less
# unique values:
# If no 'objective' given in params, it means that user either wants to use
# the default 'reg:linear' objective or has provided a custom obj function.
# Here, assume classification setting when y has 5 or less unique values:
if (length(unique(y)) <= 5)
y <- factor(y)
}

View File

@@ -51,13 +51,11 @@ is.null.handle <- function(handle) {
# Return a verified to be valid handle out of either xgb.Booster.handle or xgb.Booster
# internal utility function
xgb.get.handle <- function(object) {
if (inherits(object, "xgb.Booster")) {
handle <- object$handle
} else if (inherits(object, "xgb.Booster.handle")) {
handle <- object
} else {
handle <- switch(class(object)[1],
xgb.Booster = object$handle,
xgb.Booster.handle = object,
stop("argument must be of either xgb.Booster or xgb.Booster.handle class")
}
)
if (is.null.handle(handle)) {
stop("invalid xgb.Booster.handle")
}
@@ -83,7 +81,7 @@ xgb.get.handle <- function(object) {
#' its handle (pointer) to an internal xgboost model would be invalid. The majority of xgboost methods
#' should still work for such a model object since those methods would be using
#' \code{xgb.Booster.complete} internally. However, one might find it to be more efficient to call the
#' \code{xgb.Booster.complete} function explicitly once after loading a model as an R-object.
#' \code{xgb.Booster.complete} function explicitely once after loading a model as an R-object.
#' That would prevent further repeated implicit reconstruction of an internal booster model.
#'
#' @return
@@ -97,7 +95,6 @@ xgb.get.handle <- function(object) {
#' saveRDS(bst, "xgb.model.rds")
#'
#' bst1 <- readRDS("xgb.model.rds")
#' if (file.exists("xgb.model.rds")) file.remove("xgb.model.rds")
#' # the handle is invalid:
#' print(bst1$handle)
#'
@@ -139,8 +136,6 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
#' @param reshape whether to reshape the vector of predictions to a matrix form when there are several
#' prediction outputs per case. This option has no effect when either of predleaf, predcontrib,
#' or predinteraction flags is TRUE.
#' @param training whether is the prediction result used for training. For dart booster,
#' training predicting will perform dropout.
#' @param ... Parameters passed to \code{predict.xgb.Booster}
#'
#' @details
@@ -167,7 +162,7 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
#'
#' With \code{predinteraction = TRUE}, SHAP values of contributions of interaction of each pair of features
#' are computed. Note that this operation might be rather expensive in terms of compute and memory.
#' Since it quadratically depends on the number of features, it is recommended to perform selection
#' Since it quadratically depends on the number of features, it is recommended to perfom selection
#' of the most important features first. See below about the format of the returned results.
#'
#' @return
@@ -290,7 +285,7 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
#' @export
predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FALSE, ntreelimit = NULL,
predleaf = FALSE, predcontrib = FALSE, approxcontrib = FALSE, predinteraction = FALSE,
reshape = FALSE, training = FALSE, ...) {
reshape = FALSE, ...) {
object <- xgb.Booster.complete(object, saveraw = FALSE)
if (!inherits(newdata, "xgb.DMatrix"))
@@ -309,8 +304,7 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
option <- 0L + 1L * as.logical(outputmargin) + 2L * as.logical(predleaf) + 4L * as.logical(predcontrib) +
8L * as.logical(approxcontrib) + 16L * as.logical(predinteraction)
ret <- .Call(XGBoosterPredict_R, object$handle, newdata, option[1],
as.integer(ntreelimit), as.integer(training))
ret <- .Call(XGBoosterPredict_R, object$handle, newdata, option[1], as.integer(ntreelimit))
n_ret <- length(ret)
n_row <- nrow(newdata)
@@ -424,7 +418,6 @@ predict.xgb.Booster.handle <- function(object, ...) {
#'
#' xgb.save(bst, 'xgb.model')
#' bst1 <- xgb.load('xgb.model')
#' if (file.exists('xgb.model')) file.remove('xgb.model')
#' print(xgb.attr(bst1, "my_attribute"))
#' print(xgb.attributes(bst1))
#'

View File

@@ -19,7 +19,6 @@
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
#' dtrain <- xgb.DMatrix('xgb.DMatrix.data')
#' if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
#' @export
xgb.DMatrix <- function(data, info = list(), missing = NA, silent = FALSE, ...) {
cnames <- NULL
@@ -105,7 +104,7 @@ dim.xgb.DMatrix <- function(x) {
#' Handling of column names of \code{xgb.DMatrix}
#'
#' Only column names are supported for \code{xgb.DMatrix}, thus setting of
#' row names would have no effect and returned row names would be NULL.
#' row names would have no effect and returnten row names would be NULL.
#'
#' @param x object of class \code{xgb.DMatrix}
#' @param value a list of two elements: the first one is ignored
@@ -267,10 +266,10 @@ setinfo.xgb.DMatrix <- function(object, name, info, ...) {
#' Get a new DMatrix containing the specified rows of
#' original xgb.DMatrix object
#' orginal xgb.DMatrix object
#'
#' Get a new DMatrix containing the specified rows of
#' original xgb.DMatrix object
#' orginal xgb.DMatrix object
#'
#' @param object Object of class "xgb.DMatrix"
#' @param idxset a integer vector of indices of rows needed
@@ -302,17 +301,12 @@ slice.xgb.DMatrix <- function(object, idxset, ...) {
attr_list <- attributes(object)
nr <- nrow(object)
len <- sapply(attr_list, NROW)
len <- sapply(attr_list, length)
ind <- which(len == nr)
if (length(ind) > 0) {
nms <- names(attr_list)[ind]
for (i in seq_along(ind)) {
obj_attr <- attr(object, nms[i])
if (NCOL(obj_attr) > 1) {
attr(ret, nms[i]) <- obj_attr[idxset,]
} else {
attr(ret, nms[i]) <- obj_attr[idxset]
}
attr(ret, nms[i]) <- attr(object, nms[i])[idxset]
}
}
return(structure(ret, class = "xgb.DMatrix"))

View File

@@ -11,7 +11,6 @@
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
#' dtrain <- xgb.DMatrix('xgb.DMatrix.data')
#' if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
#' @export
xgb.DMatrix.save <- function(dmatrix, fname) {
if (typeof(fname) != "character")

View File

@@ -6,7 +6,7 @@
#' \itemize{
#' \item \code{objective} objective function, common ones are
#' \itemize{
#' \item \code{reg:squarederror} Regression with squared loss
#' \item \code{reg:linear} linear regression
#' \item \code{binary:logistic} logistic regression for classification
#' }
#' \item \code{eta} step size of each boosting step
@@ -39,7 +39,7 @@
#' }
#' @param obj customized objective function. Returns gradient and second order
#' gradient with given prediction and dtrain.
#' @param feval customized evaluation function. Returns
#' @param feval custimized evaluation function. Returns
#' \code{list(metric='metric-name', value='metric-value')} with given
#' prediction and dtrain.
#' @param stratified a \code{boolean} indicating whether sampling of folds should be stratified
@@ -47,8 +47,6 @@
#' @param folds \code{list} provides a possibility to use a list of pre-defined CV folds
#' (each element must be a vector of test fold's indices). When folds are supplied,
#' the \code{nfold} and \code{stratified} parameters are ignored.
#' @param train_folds \code{list} list specifying which indicies to use for training. If \code{NULL}
#' (the default) all indices not specified in \code{folds} will be used for training.
#' @param verbose \code{boolean}, print the statistics during the process
#' @param print_every_n Print each n-th iteration evaluation messages when \code{verbose>0}.
#' Default is 1 which means all messages are printed. This parameter is passed to the
@@ -86,7 +84,7 @@
#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
#' \item \code{callbacks} callback functions that were either automatically assigned or
#' explicitly passed.
#' \item \code{evaluation_log} evaluation history stored as a \code{data.table} with the
#' \item \code{evaluation_log} evaluation history storead as a \code{data.table} with the
#' first column corresponding to iteration number and the rest corresponding to the
#' CV-based evaluation means and standard deviations for the training and test CV-sets.
#' It is created by the \code{\link{cb.evaluation.log}} callback.
@@ -116,7 +114,7 @@
#' @export
xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing = NA,
prediction = FALSE, showsd = TRUE, metrics=list(),
obj = NULL, feval = NULL, stratified = TRUE, folds = NULL, train_folds = NULL,
obj = NULL, feval = NULL, stratified = TRUE, folds = NULL,
verbose = TRUE, print_every_n=1L,
early_stopping_rounds = NULL, maximize = NULL, callbacks = list(), ...) {
@@ -135,15 +133,8 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
# Check the labels
if ( (inherits(data, 'xgb.DMatrix') && is.null(getinfo(data, 'label'))) ||
(!inherits(data, 'xgb.DMatrix') && is.null(label))) {
(!inherits(data, 'xgb.DMatrix') && is.null(label)))
stop("Labels must be provided for CV either through xgb.DMatrix, or through 'label=' when 'data' is matrix")
} else if (inherits(data, 'xgb.DMatrix')) {
if (!is.null(label))
warning("xgb.cv: label will be ignored, since data is of type xgb.DMatrix")
cv_label = getinfo(data, 'label')
} else {
cv_label = label
}
# CV folds
if(!is.null(folds)) {
@@ -153,7 +144,7 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
} else {
if (nfold <= 1)
stop("'nfold' must be > 1")
folds <- generate.cv.folds(nfold, nrow(data), stratified, cv_label, params)
folds <- generate.cv.folds(nfold, nrow(data), stratified, label, params)
}
# Potential TODO: sequential CV
@@ -188,15 +179,10 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
# create the booster-folds
# train_folds
dall <- xgb.get.DMatrix(data, label, missing)
bst_folds <- lapply(seq_along(folds), function(k) {
dtest <- slice(dall, folds[[k]])
# code originally contributed by @RolandASc on stackoverflow
if(is.null(train_folds))
dtrain <- slice(dall, unlist(folds[-k]))
else
dtrain <- slice(dall, train_folds[[k]])
handle <- xgb.Booster.handle(params, list(dtrain, dtest))
list(dtrain = dtrain, bst = handle, watchlist = list(train = dtrain, test=dtest), index = folds[[k]])
})

View File

@@ -28,7 +28,6 @@
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
#' xgb.save(bst, 'xgb.model')
#' bst <- xgb.load('xgb.model')
#' if (file.exists('xgb.model')) file.remove('xgb.model')
#' pred <- predict(bst, test$data)
#' @export
xgb.load <- function(modelfile) {

View File

@@ -27,7 +27,7 @@
#' a tree's median absolute leaf weight changes through the iterations.
#'
#' This function was inspired by the blog post
#' \url{https://github.com/aysent/random-forest-leaf-visualization}.
#' \url{http://aysent.github.io/2015/11/08/random-forest-leaf-visualization.html}.
#'
#' @return
#'

View File

@@ -30,8 +30,8 @@
#' Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of
#' "what is feature's importance contribution relative to the most important feature?"
#'
#' The ggplot-backend method also performs 1-D clustering of the importance values,
#' with bar colors corresponding to different clusters that have somewhat similar importance values.
#' The ggplot-backend method also performs 1-D custering of the importance values,
#' with bar colors coresponding to different clusters that have somewhat similar importance values.
#'
#' @return
#' The \code{xgb.plot.importance} function creates a \code{barplot} (when \code{plot=TRUE})

View File

@@ -31,7 +31,7 @@
#' @param plot_loess whether to plot loess-smoothed curves. The smoothing is only done for features with
#' more than 5 distinct values.
#' @param col_loess a color to use for the loess curves.
#' @param span_loess the \code{span} parameter in \code{\link[stats]{loess}}'s call.
#' @param span_loess the \code{span} paramerer in \code{\link[stats]{loess}}'s call.
#' @param which whether to do univariate or bivariate plotting. NOTE: only 1D is implemented so far.
#' @param plot whether a plot should be drawn. If FALSE, only a lits of matrices is returned.
#' @param ... other parameters passed to \code{plot}.

View File

@@ -27,7 +27,6 @@
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
#' xgb.save(bst, 'xgb.model')
#' bst <- xgb.load('xgb.model')
#' if (file.exists('xgb.model')) file.remove('xgb.model')
#' pred <- predict(bst, test$data)
#' @export
xgb.save <- function(model, fname) {

View File

@@ -42,7 +42,7 @@
#' \itemize{
#' \item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
#' \itemize{
#' \item \code{reg:squarederror} Regression with squared loss (Default).
#' \item \code{reg:linear} linear regression (Default).
#' \item \code{reg:logistic} logistic regression.
#' \item \code{binary:logistic} logistic regression for binary classification. Output probability.
#' \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
@@ -68,7 +68,7 @@
#' the performance of each round's model on mat1 and mat2.
#' @param obj customized objective function. Returns gradient and second order
#' gradient with given prediction and dtrain.
#' @param feval customized evaluation function. Returns
#' @param feval custimized evaluation function. Returns
#' \code{list(metric='metric-name', value='metric-value')} with given
#' prediction and dtrain.
#' @param verbose If 0, xgboost will stay silent. If 1, it will print information about performance.
@@ -118,7 +118,7 @@
#' when the \code{eval_metric} parameter is not provided.
#' User may set one or several \code{eval_metric} parameters.
#' Note that when using a customized metric, only this single metric can be used.
#' The following is the list of built-in metrics for which Xgboost provides optimized implementation:
#' The folloiwing is the list of built-in metrics for which Xgboost provides optimized implementation:
#' \itemize{
#' \item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error}
#' \item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood}
@@ -147,14 +147,14 @@
#' \item \code{handle} a handle (pointer) to the xgboost model in memory.
#' \item \code{raw} a cached memory dump of the xgboost model saved as R's \code{raw} type.
#' \item \code{niter} number of boosting iterations.
#' \item \code{evaluation_log} evaluation history stored as a \code{data.table} with the
#' \item \code{evaluation_log} evaluation history storead as a \code{data.table} with the
#' first column corresponding to iteration number and the rest corresponding to evaluation
#' metrics' values. It is created by the \code{\link{cb.evaluation.log}} callback.
#' \item \code{call} a function call.
#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not
#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
#' \item \code{callbacks} callback functions that were either automatically assigned or
#' explicitly passed.
#' explicitely passed.
#' \item \code{best_iteration} iteration number with the best evaluation metric value
#' (only available with early stopping).
#' \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
@@ -163,7 +163,7 @@
#' \item \code{best_score} the best evaluation metric value during early stopping.
#' (only available with early stopping).
#' \item \code{feature_names} names of the training dataset features
#' (only when column names were defined in training data).
#' (only when comun names were defined in training data).
#' \item \code{nfeatures} number of features in training data.
#' }
#'
@@ -186,7 +186,7 @@
#' watchlist <- list(train = dtrain, eval = dtest)
#'
#' ## A simple xgb.train example:
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
#' param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2,
#' objective = "binary:logistic", eval_metric = "auc")
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
#'
@@ -207,12 +207,12 @@
#'
#' # These functions could be used by passing them either:
#' # as 'objective' and 'eval_metric' parameters in the params list:
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
#' param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2,
#' objective = logregobj, eval_metric = evalerror)
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
#'
#' # or through the ... arguments:
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2)
#' param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2)
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
#' objective = logregobj, eval_metric = evalerror)
#'
@@ -222,7 +222,7 @@
#'
#'
#' ## An xgb.train example of using variable learning rates at each iteration:
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
#' param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2,
#' objective = "binary:logistic", eval_metric = "auc")
#' my_etas <- list(eta = c(0.5, 0.1))
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
@@ -293,9 +293,6 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
}
# Sort the callbacks into categories
cb <- categorize.callbacks(callbacks)
if (!is.null(params[['seed']])) {
warning("xgb.train: `seed` is ignored in R package. Use `set.seed()` instead.")
}
# The tree updating process would need slightly different handling
is_update <- NVL(params[['process_type']], '.') == 'update'

View File

@@ -30,4 +30,4 @@ Examples
Development
-----------
* See the [R Package section](https://xgboost.readthedocs.io/en/latest/contribute.html#r-package) of the contributors guide.
* See the [R Package section](https://xgboost.readthedocs.io/en/latest/how_to/contribute.html#r-package) of the contributors guide.

View File

@@ -1,4 +1,3 @@
#!/bin/sh
rm -f src/Makevars
rm -f CMakeLists.txt

1045
R-package/configure vendored

File diff suppressed because it is too large Load Diff

View File

@@ -4,52 +4,28 @@ AC_PREREQ(2.62)
AC_INIT([xgboost],[0.6-3],[],[xgboost],[])
# Use this line to set CC variable to a C compiler
AC_PROG_CC
### Check whether backtrace() is part of libc or the external lib libexecinfo
AC_MSG_CHECKING([Backtrace lib])
AC_MSG_RESULT([])
AC_CHECK_LIB([execinfo], [backtrace], [BACKTRACE_LIB=-lexecinfo], [BACKTRACE_LIB=''])
### Endian detection
AC_MSG_CHECKING([endian])
AC_MSG_RESULT([])
AC_RUN_IFELSE([AC_LANG_PROGRAM([[#include <stdint.h>]], [[const uint16_t endianness = 256; return !!(*(const uint8_t *)&endianness);]])],
[ENDIAN_FLAG="-DDMLC_CMAKE_LITTLE_ENDIAN=1"],
[ENDIAN_FLAG="-DDMLC_CMAKE_LITTLE_ENDIAN=0"])
OPENMP_CXXFLAGS=""
if test `uname -s` = "Linux"
then
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CXXFLAGS)"
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CFLAGS)"
fi
if test `uname -s` = "Darwin"
then
OPENMP_CXXFLAGS='-Xclang -fopenmp'
OPENMP_LIB='/usr/local/lib/libomp.dylib'
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CFLAGS)"
ac_pkg_openmp=no
AC_MSG_CHECKING([whether OpenMP will work in a package])
AC_LANG_CONFTEST([AC_LANG_PROGRAM([[#include <omp.h>]], [[ return (omp_get_max_threads() <= 1); ]])])
${CC} -o conftest conftest.c /usr/local/lib/libomp.dylib -Xclang -fopenmp 2>/dev/null && ./conftest && ac_pkg_openmp=yes
AC_LANG_CONFTEST(
[AC_LANG_PROGRAM([[#include <omp.h>]], [[ return omp_get_num_threads (); ]])])
PKG_CFLAGS="${OPENMP_CFLAGS}" PKG_LIBS="${OPENMP_CFLAGS}" "$RBIN" CMD SHLIB conftest.c 1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD && "$RBIN" --vanilla -q -e "dyn.load(paste('conftest',.Platform\$dynlib.ext,sep=''))" 1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD && ac_pkg_openmp=yes
AC_MSG_RESULT([${ac_pkg_openmp}])
if test "${ac_pkg_openmp}" = no; then
OPENMP_CXXFLAGS=''
OPENMP_LIB=''
echo '*****************************************************************************************'
echo 'WARNING: OpenMP is unavailable on this Mac OSX system. Training speed may be suboptimal.'
echo ' To use all CPU cores for training jobs, you should install OpenMP by running\n'
echo ' brew install libomp'
echo '*****************************************************************************************'
fi
fi
AC_SUBST(OPENMP_CXXFLAGS)
AC_SUBST(OPENMP_LIB)
AC_SUBST(ENDIAN_FLAG)
AC_SUBST(BACKTRACE_LIB)
AC_CONFIG_FILES([src/Makevars])
AC_OUTPUT

View File

@@ -33,7 +33,7 @@ evalerror <- function(preds, dtrain) {
return(list(metric = "error", value = err))
}
param <- list(max_depth=2, eta=1, nthread = 2, verbosity=0,
param <- list(max_depth=2, eta=1, nthread = 2, silent=1,
objective=logregobj, eval_metric=evalerror)
print ('start training with user customized objective')
# training with customized objective, we can also do step by step training
@@ -57,7 +57,7 @@ logregobjattr <- function(preds, dtrain) {
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
param <- list(max_depth=2, eta=1, nthread = 2, verbosity=0,
param <- list(max_depth=2, eta=1, nthread = 2, silent=1,
objective=logregobjattr, eval_metric=evalerror)
print ('start training with user customized objective, with additional attributes in DMatrix')
# training with customized objective, we can also do step by step training

View File

@@ -7,7 +7,7 @@ dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
# note: for customized objective function, we leave objective as default
# note: what we are getting is margin value in prediction
# you must know what you are doing
param <- list(max_depth=2, eta=1, nthread=2, verbosity=0)
param <- list(max_depth=2, eta=1, nthread = 2, silent=1)
watchlist <- list(eval = dtest)
num_round <- 20
# user define objective function, given prediction, return gradient and second order gradient

View File

@@ -30,7 +30,7 @@ wl <- list(train = dtrain, test = dtest)
# - similar to the 'hist'
# - the fastest option for moderately large datasets
# - current limitations: max_depth < 16, does not implement guided loss
# You can use tree_method = 'gpu_hist' for another GPU accelerated algorithm,
# You can use tree_method = 'gpu_exact' for another GPU accelerated algorithm,
# which is slower, more memory-hungry, but does not use binning.
param <- list(objective = 'reg:logistic', eval_metric = 'auc', subsample = 0.5, nthread = 4,
max_bin = 64, tree_method = 'gpu_hist')

View File

@@ -38,7 +38,6 @@ create.new.tree.features <- function(model, original.features){
# Convert previous features to one hot encoding
new.features.train <- create.new.tree.features(bst, agaricus.train$data)
new.features.test <- create.new.tree.features(bst, agaricus.test$data)
colnames(new.features.test) <- colnames(new.features.train)
# learning with new features
new.dtrain <- xgb.DMatrix(data = new.features.train, label = agaricus.train$label)

View File

@@ -18,7 +18,7 @@ the boosting is completed.
WARNING: side-effects!!! Be aware that these callback functions access and modify things in
the environment from which they are called from, which is a fairly uncommon thing to do in R.
To write a custom callback closure, make sure you first understand the main concepts about R environments.
To write a custom callback closure, make sure you first understand the main concepts about R envoronments.
Check either R documentation on \code{\link[base]{environment}} or the
\href{http://adv-r.had.co.nz/Environments.html}{Environments chapter} from the "Advanced R"
book by Hadley Wickham. Further, the best option is to read the code of some of the existing callbacks -

View File

@@ -15,7 +15,7 @@ depending on the number of prediction outputs per data row. The order of predict
to the order of rows in the original dataset. Note that when a custom \code{folds} list is
provided in \code{xgb.cv}, the predictions would only be returned properly when this list is a
non-overlapping list of k sets of indices, as in a standard k-fold CV. The predictions would not be
meaningful when user-provided folds have overlapping indices as in, e.g., random sampling splits.
meaningful when user-profided folds have overlapping indices as in, e.g., random sampling splits.
When some of the indices in the training dataset are not included into user-provided \code{folds},
their prediction value would be \code{NA}.
}

View File

@@ -4,12 +4,8 @@
\alias{cb.early.stop}
\title{Callback closure to activate the early stopping.}
\usage{
cb.early.stop(
stopping_rounds,
maximize = FALSE,
metric_name = NULL,
verbose = TRUE
)
cb.early.stop(stopping_rounds, maximize = FALSE, metric_name = NULL,
verbose = TRUE)
}
\arguments{
\item{stopping_rounds}{The number of rounds with no improvement in

View File

@@ -2,7 +2,7 @@
% Please edit documentation in R/callbacks.R
\name{cb.reset.parameters}
\alias{cb.reset.parameters}
\title{Callback closure for resetting the booster's parameters at each iteration.}
\title{Callback closure for restetting the booster's parameters at each iteration.}
\usage{
cb.reset.parameters(new_params)
}
@@ -15,7 +15,7 @@ which returns a new parameter value by using the current iteration number
and the total number of boosting rounds.}
}
\description{
Callback closure for resetting the booster's parameters at each iteration.
Callback closure for restetting the booster's parameters at each iteration.
}
\details{
This is a "pre-iteration" callback function used to reset booster's parameters

View File

@@ -17,7 +17,7 @@ and the second one is column names}
}
\description{
Only column names are supported for \code{xgb.DMatrix}, thus setting of
row names would have no effect and returned row names would be NULL.
row names would have no effect and returnten row names would be NULL.
}
\details{
Generic \code{dimnames} methods are used by \code{colnames}.

View File

@@ -5,20 +5,10 @@
\alias{predict.xgb.Booster.handle}
\title{Predict method for eXtreme Gradient Boosting model}
\usage{
\method{predict}{xgb.Booster}(
object,
newdata,
missing = NA,
outputmargin = FALSE,
ntreelimit = NULL,
predleaf = FALSE,
predcontrib = FALSE,
approxcontrib = FALSE,
predinteraction = FALSE,
reshape = FALSE,
training = FALSE,
...
)
\method{predict}{xgb.Booster}(object, newdata, missing = NA,
outputmargin = FALSE, ntreelimit = NULL, predleaf = FALSE,
predcontrib = FALSE, approxcontrib = FALSE,
predinteraction = FALSE, reshape = FALSE, ...)
\method{predict}{xgb.Booster.handle}(object, ...)
}
@@ -49,9 +39,6 @@ It will use all the trees by default (\code{NULL} value).}
prediction outputs per case. This option has no effect when either of predleaf, predcontrib,
or predinteraction flags is TRUE.}
\item{training}{whether is the prediction result used for training. For dart booster,
training predicting will perform dropout.}
\item{...}{Parameters passed to \code{predict.xgb.Booster}}
}
\value{
@@ -104,7 +91,7 @@ in \url{http://blog.datadive.net/interpreting-random-forests/}.
With \code{predinteraction = TRUE}, SHAP values of contributions of interaction of each pair of features
are computed. Note that this operation might be rather expensive in terms of compute and memory.
Since it quadratically depends on the number of features, it is recommended to perform selection
Since it quadratically depends on the number of features, it is recommended to perfom selection
of the most important features first. See below about the format of the returned results.
}
\examples{

View File

@@ -5,7 +5,7 @@
\alias{slice.xgb.DMatrix}
\alias{[.xgb.DMatrix}
\title{Get a new DMatrix containing the specified rows of
original xgb.DMatrix object}
orginal xgb.DMatrix object}
\usage{
slice(object, ...)
@@ -24,7 +24,7 @@ slice(object, ...)
}
\description{
Get a new DMatrix containing the specified rows of
original xgb.DMatrix object
orginal xgb.DMatrix object
}
\examples{
data(agaricus.train, package='xgboost')

View File

@@ -28,7 +28,7 @@ E.g., when an \code{xgb.Booster} model is saved as an R object and then is loade
its handle (pointer) to an internal xgboost model would be invalid. The majority of xgboost methods
should still work for such a model object since those methods would be using
\code{xgb.Booster.complete} internally. However, one might find it to be more efficient to call the
\code{xgb.Booster.complete} function explicitly once after loading a model as an R-object.
\code{xgb.Booster.complete} function explicitely once after loading a model as an R-object.
That would prevent further repeated implicit reconstruction of an internal booster model.
}
\examples{
@@ -39,7 +39,6 @@ bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_dep
saveRDS(bst, "xgb.model.rds")
bst1 <- readRDS("xgb.model.rds")
if (file.exists("xgb.model.rds")) file.remove("xgb.model.rds")
# the handle is invalid:
print(bst1$handle)

View File

@@ -31,5 +31,4 @@ train <- agaricus.train
dtrain <- xgb.DMatrix(train$data, label=train$label)
xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
dtrain <- xgb.DMatrix('xgb.DMatrix.data')
if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
}

View File

@@ -20,5 +20,4 @@ train <- agaricus.train
dtrain <- xgb.DMatrix(train$data, label=train$label)
xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
dtrain <- xgb.DMatrix('xgb.DMatrix.data')
if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
}

View File

@@ -73,7 +73,6 @@ xgb.attributes(bst) <- list(a = 123, b = "abc")
xgb.save(bst, 'xgb.model')
bst1 <- xgb.load('xgb.model')
if (file.exists('xgb.model')) file.remove('xgb.model')
print(xgb.attr(bst1, "my_attribute"))
print(xgb.attributes(bst1))

View File

@@ -87,6 +87,6 @@ accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) /
# Here the accuracy was already good and is now perfect.
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now",
accuracy.after, "!\n"))
accuracy.after, "!\\n"))
}

View File

@@ -4,35 +4,19 @@
\alias{xgb.cv}
\title{Cross Validation}
\usage{
xgb.cv(
params = list(),
data,
nrounds,
nfold,
label = NULL,
missing = NA,
prediction = FALSE,
showsd = TRUE,
metrics = list(),
obj = NULL,
feval = NULL,
stratified = TRUE,
folds = NULL,
train_folds = NULL,
verbose = TRUE,
print_every_n = 1L,
early_stopping_rounds = NULL,
maximize = NULL,
callbacks = list(),
...
)
xgb.cv(params = list(), data, nrounds, nfold, label = NULL,
missing = NA, prediction = FALSE, showsd = TRUE,
metrics = list(), obj = NULL, feval = NULL, stratified = TRUE,
folds = NULL, verbose = TRUE, print_every_n = 1L,
early_stopping_rounds = NULL, maximize = NULL, callbacks = list(),
...)
}
\arguments{
\item{params}{the list of parameters. Commonly used ones are:
\itemize{
\item \code{objective} objective function, common ones are
\itemize{
\item \code{reg:squarederror} Regression with squared loss
\item \code{reg:linear} linear regression
\item \code{binary:logistic} logistic regression for classification
}
\item \code{eta} step size of each boosting step
@@ -75,7 +59,7 @@ from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callb
\item{obj}{customized objective function. Returns gradient and second order
gradient with given prediction and dtrain.}
\item{feval}{customized evaluation function. Returns
\item{feval}{custimized evaluation function. Returns
\code{list(metric='metric-name', value='metric-value')} with given
prediction and dtrain.}
@@ -86,9 +70,6 @@ by the values of outcome labels.}
(each element must be a vector of test fold's indices). When folds are supplied,
the \code{nfold} and \code{stratified} parameters are ignored.}
\item{train_folds}{\code{list} list specifying which indicies to use for training. If \code{NULL}
(the default) all indices not specified in \code{folds} will be used for training.}
\item{verbose}{\code{boolean}, print the statistics during the process}
\item{print_every_n}{Print each n-th iteration evaluation messages when \code{verbose>0}.
@@ -120,7 +101,7 @@ An object of class \code{xgb.cv.synchronous} with the following elements:
capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
\item \code{callbacks} callback functions that were either automatically assigned or
explicitly passed.
\item \code{evaluation_log} evaluation history stored as a \code{data.table} with the
\item \code{evaluation_log} evaluation history storead as a \code{data.table} with the
first column corresponding to iteration number and the rest corresponding to the
CV-based evaluation means and standard deviations for the training and test CV-sets.
It is created by the \code{\link{cb.evaluation.log}} callback.

View File

@@ -4,14 +4,8 @@
\alias{xgb.dump}
\title{Dump an xgboost model in text format.}
\usage{
xgb.dump(
model,
fname = NULL,
fmap = "",
with_stats = FALSE,
dump_format = c("text", "json"),
...
)
xgb.dump(model, fname = NULL, fmap = "", with_stats = FALSE,
dump_format = c("text", "json"), ...)
}
\arguments{
\item{model}{the model object.}

View File

@@ -12,7 +12,7 @@ using the \code{cb.gblinear.history()} callback.}
\item{class_index}{zero-based class index to extract the coefficients for only that
specific class in a multinomial multiclass model. When it is NULL, all the
coefficients are returned. Has no effect in non-multiclass models.}
coeffients are returned. Has no effect in non-multiclass models.}
}
\value{
For an \code{xgb.train} result, a matrix (either dense or sparse) with the columns

View File

@@ -4,14 +4,8 @@
\alias{xgb.importance}
\title{Importance of features in a model.}
\usage{
xgb.importance(
feature_names = NULL,
model = NULL,
trees = NULL,
data = NULL,
label = NULL,
target = NULL
)
xgb.importance(feature_names = NULL, model = NULL, trees = NULL,
data = NULL, label = NULL, target = NULL)
}
\arguments{
\item{feature_names}{character vector of feature names. If the model already

View File

@@ -33,7 +33,6 @@ bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
xgb.save(bst, 'xgb.model')
bst <- xgb.load('xgb.model')
if (file.exists('xgb.model')) file.remove('xgb.model')
pred <- predict(bst, test$data)
}
\seealso{

View File

@@ -4,14 +4,8 @@
\alias{xgb.model.dt.tree}
\title{Parse a boosted tree model text dump}
\usage{
xgb.model.dt.tree(
feature_names = NULL,
model = NULL,
text = NULL,
trees = NULL,
use_int_id = FALSE,
...
)
xgb.model.dt.tree(feature_names = NULL, model = NULL, text = NULL,
trees = NULL, use_int_id = FALSE, ...)
}
\arguments{
\item{feature_names}{character vector of feature names. If the model already

View File

@@ -5,17 +5,11 @@
\alias{xgb.plot.deepness}
\title{Plot model trees deepness}
\usage{
xgb.ggplot.deepness(
model = NULL,
which = c("2x1", "max.depth", "med.depth", "med.weight")
)
xgb.ggplot.deepness(model = NULL, which = c("2x1", "max.depth",
"med.depth", "med.weight"))
xgb.plot.deepness(
model = NULL,
which = c("2x1", "max.depth", "med.depth", "med.weight"),
plot = TRUE,
...
)
xgb.plot.deepness(model = NULL, which = c("2x1", "max.depth",
"med.depth", "med.weight"), plot = TRUE, ...)
}
\arguments{
\item{model}{either an \code{xgb.Booster} model generated by the \code{xgb.train} function
@@ -56,7 +50,7 @@ per tree with respect to tree number are created. And \code{which="med.weight"}
a tree's median absolute leaf weight changes through the iterations.
This function was inspired by the blog post
\url{https://github.com/aysent/random-forest-leaf-visualization}.
\url{http://aysent.github.io/2015/11/08/random-forest-leaf-visualization.html}.
}
\examples{

View File

@@ -5,25 +5,12 @@
\alias{xgb.plot.importance}
\title{Plot feature importance as a bar graph}
\usage{
xgb.ggplot.importance(
importance_matrix = NULL,
top_n = NULL,
measure = NULL,
rel_to_first = FALSE,
n_clusters = c(1:10),
...
)
xgb.ggplot.importance(importance_matrix = NULL, top_n = NULL,
measure = NULL, rel_to_first = FALSE, n_clusters = c(1:10), ...)
xgb.plot.importance(
importance_matrix = NULL,
top_n = NULL,
measure = NULL,
rel_to_first = FALSE,
left_margin = 10,
cex = NULL,
plot = TRUE,
...
)
xgb.plot.importance(importance_matrix = NULL, top_n = NULL,
measure = NULL, rel_to_first = FALSE, left_margin = 10,
cex = NULL, plot = TRUE, ...)
}
\arguments{
\item{importance_matrix}{a \code{data.table} returned by \code{\link{xgb.importance}}.}
@@ -72,8 +59,8 @@ For linear models, \code{rel_to_first = FALSE} would show actual values of the c
Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of
"what is feature's importance contribution relative to the most important feature?"
The ggplot-backend method also performs 1-D clustering of the importance values,
with bar colors corresponding to different clusters that have somewhat similar importance values.
The ggplot-backend method also performs 1-D custering of the importance values,
with bar colors coresponding to different clusters that have somewhat similar importance values.
}
\examples{
data(agaricus.train)

View File

@@ -4,15 +4,8 @@
\alias{xgb.plot.multi.trees}
\title{Project all trees on one tree and plot it}
\usage{
xgb.plot.multi.trees(
model,
feature_names = NULL,
features_keep = 5,
plot_width = NULL,
plot_height = NULL,
render = TRUE,
...
)
xgb.plot.multi.trees(model, feature_names = NULL, features_keep = 5,
plot_width = NULL, plot_height = NULL, render = TRUE, ...)
}
\arguments{
\item{model}{produced by the \code{xgb.train} function.}

View File

@@ -4,33 +4,13 @@
\alias{xgb.plot.shap}
\title{SHAP contribution dependency plots}
\usage{
xgb.plot.shap(
data,
shap_contrib = NULL,
features = NULL,
top_n = 1,
model = NULL,
trees = NULL,
target_class = NULL,
approxcontrib = FALSE,
subsample = NULL,
n_col = 1,
col = rgb(0, 0, 1, 0.2),
pch = ".",
discrete_n_uniq = 5,
discrete_jitter = 0.01,
ylab = "SHAP",
plot_NA = TRUE,
col_NA = rgb(0.7, 0, 1, 0.6),
pch_NA = ".",
pos_NA = 1.07,
plot_loess = TRUE,
col_loess = 2,
span_loess = 0.5,
which = c("1d", "2d"),
plot = TRUE,
...
)
xgb.plot.shap(data, shap_contrib = NULL, features = NULL, top_n = 1,
model = NULL, trees = NULL, target_class = NULL,
approxcontrib = FALSE, subsample = NULL, n_col = 1, col = rgb(0,
0, 1, 0.2), pch = ".", discrete_n_uniq = 5, discrete_jitter = 0.01,
ylab = "SHAP", plot_NA = TRUE, col_NA = rgb(0.7, 0, 1, 0.6),
pch_NA = ".", pos_NA = 1.07, plot_loess = TRUE, col_loess = 2,
span_loess = 0.5, which = c("1d", "2d"), plot = TRUE, ...)
}
\arguments{
\item{data}{data as a \code{matrix} or \code{dgCMatrix}.}
@@ -83,7 +63,7 @@ more than 5 distinct values.}
\item{col_loess}{a color to use for the loess curves.}
\item{span_loess}{the \code{span} parameter in \code{\link[stats]{loess}}'s call.}
\item{span_loess}{the \code{span} paramerer in \code{\link[stats]{loess}}'s call.}
\item{which}{whether to do univariate or bivariate plotting. NOTE: only 1D is implemented so far.}

View File

@@ -4,16 +4,9 @@
\alias{xgb.plot.tree}
\title{Plot a boosted tree model}
\usage{
xgb.plot.tree(
feature_names = NULL,
model = NULL,
trees = NULL,
plot_width = NULL,
plot_height = NULL,
render = TRUE,
show_node_id = FALSE,
...
)
xgb.plot.tree(feature_names = NULL, model = NULL, trees = NULL,
plot_width = NULL, plot_height = NULL, render = TRUE,
show_node_id = FALSE, ...)
}
\arguments{
\item{feature_names}{names of each feature as a \code{character} vector.}

View File

@@ -33,7 +33,6 @@ bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
xgb.save(bst, 'xgb.model')
bst <- xgb.load('xgb.model')
if (file.exists('xgb.model')) file.remove('xgb.model')
pred <- predict(bst, test$data)
}
\seealso{

View File

@@ -5,41 +5,17 @@
\alias{xgboost}
\title{eXtreme Gradient Boosting Training}
\usage{
xgb.train(
params = list(),
data,
nrounds,
watchlist = list(),
obj = NULL,
feval = NULL,
verbose = 1,
print_every_n = 1L,
early_stopping_rounds = NULL,
maximize = NULL,
save_period = NULL,
save_name = "xgboost.model",
xgb_model = NULL,
callbacks = list(),
...
)
xgb.train(params = list(), data, nrounds, watchlist = list(),
obj = NULL, feval = NULL, verbose = 1, print_every_n = 1L,
early_stopping_rounds = NULL, maximize = NULL, save_period = NULL,
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(),
...)
xgboost(
data = NULL,
label = NULL,
missing = NA,
weight = NULL,
params = list(),
nrounds,
verbose = 1,
print_every_n = 1L,
early_stopping_rounds = NULL,
maximize = NULL,
save_period = NULL,
save_name = "xgboost.model",
xgb_model = NULL,
callbacks = list(),
...
)
xgboost(data = NULL, label = NULL, missing = NA, weight = NULL,
params = list(), nrounds, verbose = 1, print_every_n = 1L,
early_stopping_rounds = NULL, maximize = NULL, save_period = NULL,
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(),
...)
}
\arguments{
\item{params}{the list of parameters.
@@ -65,7 +41,6 @@ xgboost(
\item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
\item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
\item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
\item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
}
2.2. Parameter for Linear Booster
@@ -81,7 +56,7 @@ xgboost(
\itemize{
\item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
\itemize{
\item \code{reg:squarederror} Regression with squared loss (Default).
\item \code{reg:linear} linear regression (Default).
\item \code{reg:logistic} logistic regression.
\item \code{binary:logistic} logistic regression for binary classification. Output probability.
\item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
@@ -111,7 +86,7 @@ the performance of each round's model on mat1 and mat2.}
\item{obj}{customized objective function. Returns gradient and second order
gradient with given prediction and dtrain.}
\item{feval}{customized evaluation function. Returns
\item{feval}{custimized evaluation function. Returns
\code{list(metric='metric-name', value='metric-value')} with given
prediction and dtrain.}
@@ -165,14 +140,14 @@ An object of class \code{xgb.Booster} with the following elements:
\item \code{handle} a handle (pointer) to the xgboost model in memory.
\item \code{raw} a cached memory dump of the xgboost model saved as R's \code{raw} type.
\item \code{niter} number of boosting iterations.
\item \code{evaluation_log} evaluation history stored as a \code{data.table} with the
\item \code{evaluation_log} evaluation history storead as a \code{data.table} with the
first column corresponding to iteration number and the rest corresponding to evaluation
metrics' values. It is created by the \code{\link{cb.evaluation.log}} callback.
\item \code{call} a function call.
\item \code{params} parameters that were passed to the xgboost library. Note that it does not
capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
\item \code{callbacks} callback functions that were either automatically assigned or
explicitly passed.
explicitely passed.
\item \code{best_iteration} iteration number with the best evaluation metric value
(only available with early stopping).
\item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
@@ -181,7 +156,7 @@ An object of class \code{xgb.Booster} with the following elements:
\item \code{best_score} the best evaluation metric value during early stopping.
(only available with early stopping).
\item \code{feature_names} names of the training dataset features
(only when column names were defined in training data).
(only when comun names were defined in training data).
\item \code{nfeatures} number of features in training data.
}
}
@@ -203,7 +178,7 @@ The evaluation metric is chosen automatically by Xgboost (according to the objec
when the \code{eval_metric} parameter is not provided.
User may set one or several \code{eval_metric} parameters.
Note that when using a customized metric, only this single metric can be used.
The following is the list of built-in metrics for which Xgboost provides optimized implementation:
The folloiwing is the list of built-in metrics for which Xgboost provides optimized implementation:
\itemize{
\item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error}
\item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood}
@@ -235,7 +210,7 @@ dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
watchlist <- list(train = dtrain, eval = dtest)
## A simple xgb.train example:
param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2,
objective = "binary:logistic", eval_metric = "auc")
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
@@ -256,12 +231,12 @@ evalerror <- function(preds, dtrain) {
# These functions could be used by passing them either:
# as 'objective' and 'eval_metric' parameters in the params list:
param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2,
objective = logregobj, eval_metric = evalerror)
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
# or through the ... arguments:
param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2)
param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2)
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
objective = logregobj, eval_metric = evalerror)
@@ -271,7 +246,7 @@ bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
## An xgb.train example of using variable learning rates at each iteration:
param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2,
objective = "binary:logistic", eval_metric = "auc")
my_etas <- list(eta = c(0.5, 0.1))
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,

View File

@@ -17,8 +17,8 @@ endif
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ @ENDIAN_FLAG@ -pthread
PKG_LIBS = @OPENMP_CXXFLAGS@ @OPENMP_LIB@ @ENDIAN_FLAG@ @BACKTRACE_LIB@ -pthread
PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ $(SHLIB_PTHREAD_FLAGS)
PKG_LIBS = @OPENMP_CXXFLAGS@ $(SHLIB_PTHREAD_FLAGS)
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o\
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o

View File

@@ -29,8 +29,8 @@ endif
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
PKG_CXXFLAGS= $(SHLIB_OPENMP_CXXFLAGS) $(SHLIB_PTHREAD_FLAGS)
PKG_LIBS = $(SHLIB_OPENMP_CXXFLAGS) $(SHLIB_PTHREAD_FLAGS)
PKG_CXXFLAGS= $(SHLIB_OPENMP_CFLAGS) $(SHLIB_PTHREAD_FLAGS)
PKG_LIBS = $(SHLIB_OPENMP_CFLAGS) $(SHLIB_PTHREAD_FLAGS)
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o\
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o

View File

@@ -24,7 +24,7 @@ extern SEXP XGBoosterGetAttr_R(SEXP, SEXP);
extern SEXP XGBoosterLoadModelFromRaw_R(SEXP, SEXP);
extern SEXP XGBoosterLoadModel_R(SEXP, SEXP);
extern SEXP XGBoosterModelToRaw_R(SEXP);
extern SEXP XGBoosterPredict_R(SEXP, SEXP, SEXP, SEXP, SEXP);
extern SEXP XGBoosterPredict_R(SEXP, SEXP, SEXP, SEXP);
extern SEXP XGBoosterSaveModel_R(SEXP, SEXP);
extern SEXP XGBoosterSetAttr_R(SEXP, SEXP, SEXP);
extern SEXP XGBoosterSetParam_R(SEXP, SEXP, SEXP);
@@ -50,7 +50,7 @@ static const R_CallMethodDef CallEntries[] = {
{"XGBoosterLoadModelFromRaw_R", (DL_FUNC) &XGBoosterLoadModelFromRaw_R, 2},
{"XGBoosterLoadModel_R", (DL_FUNC) &XGBoosterLoadModel_R, 2},
{"XGBoosterModelToRaw_R", (DL_FUNC) &XGBoosterModelToRaw_R, 1},
{"XGBoosterPredict_R", (DL_FUNC) &XGBoosterPredict_R, 5},
{"XGBoosterPredict_R", (DL_FUNC) &XGBoosterPredict_R, 4},
{"XGBoosterSaveModel_R", (DL_FUNC) &XGBoosterSaveModel_R, 2},
{"XGBoosterSetAttr_R", (DL_FUNC) &XGBoosterSetAttr_R, 3},
{"XGBoosterSetParam_R", (DL_FUNC) &XGBoosterSetParam_R, 3},
@@ -70,7 +70,7 @@ static const R_CallMethodDef CallEntries[] = {
#if defined(_WIN32)
__declspec(dllexport)
#endif // defined(_WIN32)
#endif
void R_init_xgboost(DllInfo *dll) {
R_registerRoutines(dll, NULL, CallEntries, NULL, NULL);
R_useDynamicSymbols(dll, FALSE);

View File

@@ -136,10 +136,9 @@ SEXP XGDMatrixSliceDMatrix_R(SEXP handle, SEXP idxset) {
idxvec[i] = INTEGER(idxset)[i] - 1;
}
DMatrixHandle res;
CHECK_CALL(XGDMatrixSliceDMatrixEx(R_ExternalPtrAddr(handle),
CHECK_CALL(XGDMatrixSliceDMatrix(R_ExternalPtrAddr(handle),
BeginPtr(idxvec), len,
&res,
0));
&res));
ret = PROTECT(R_MakeExternalPtr(res, R_NilValue, R_NilValue));
R_RegisterCFinalizerEx(ret, _DMatrixFinalizer, TRUE);
R_API_END();
@@ -166,9 +165,7 @@ SEXP XGDMatrixSetInfo_R(SEXP handle, SEXP field, SEXP array) {
for (int i = 0; i < len; ++i) {
vec[i] = static_cast<unsigned>(INTEGER(array)[i]);
}
CHECK_CALL(XGDMatrixSetUIntInfo(R_ExternalPtrAddr(handle),
CHAR(asChar(field)),
BeginPtr(vec), len));
CHECK_CALL(XGDMatrixSetGroup(R_ExternalPtrAddr(handle), BeginPtr(vec), len));
} else {
std::vector<float> vec(len);
#pragma omp parallel for schedule(static)
@@ -303,8 +300,7 @@ SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evnames) {
return mkString(ret);
}
SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask,
SEXP ntree_limit, SEXP training) {
SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask, SEXP ntree_limit) {
SEXP ret;
R_API_BEGIN();
bst_ulong olen;
@@ -313,7 +309,6 @@ SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask,
R_ExternalPtrAddr(dmat),
asInteger(option_mask),
asInteger(ntree_limit),
asInteger(training),
&olen, &res));
ret = PROTECT(allocVector(REALSXP, olen));
for (size_t i = 0; i < olen; ++i) {

View File

@@ -1,6 +1,6 @@
/*!
* Copyright 2014 (c) by Contributors
* \file xgboost_R.h
* \file xgboost_wrapper_R.h
* \author Tianqi Chen
* \brief R wrapper of xgboost
*/
@@ -148,10 +148,8 @@ XGB_DLL SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evn
* \param dmat data matrix
* \param option_mask output_margin:1 predict_leaf:2
* \param ntree_limit limit number of trees used in prediction
* \param training Whether the prediction value is used for training.
*/
XGB_DLL SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask,
SEXP ntree_limit, SEXP training);
XGB_DLL SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask, SEXP ntree_limit);
/*!
* \brief load model from existing file
* \param handle handle

View File

@@ -3,7 +3,7 @@
// to change behavior of libxgboost
#include <xgboost/logging.h>
#include "../../src/common/random.h"
#include "src/common/random.h"
#include "./xgboost_R.h"
// redirect the messages to R's console.
@@ -32,11 +32,8 @@ extern "C" {
namespace xgboost {
ConsoleLogger::~ConsoleLogger() {
if (cur_verbosity_ == LogVerbosity::kIgnore ||
cur_verbosity_ <= global_verbosity_) {
dmlc::CustomLogMessage::Log(log_stream_.str());
}
}
TrackerLogger::~TrackerLogger() {
dmlc::CustomLogMessage::Log(log_stream_.str());
}
@@ -49,11 +46,10 @@ namespace common {
bool CheckNAN(double v) {
return ISNAN(v);
}
#if !defined(XGBOOST_USE_CUDA)
double LogGamma(double v) {
return lgammafn(v);
}
#endif // !defined(XGBOOST_USE_CUDA)
// customize random engine.
void CustomGlobalRandomEngine::seed(CustomGlobalRandomEngine::result_type val) {
// ignore the seed

View File

@@ -35,54 +35,6 @@ test_that("train and predict binary classification", {
expect_lt(abs(err_pred1 - err_log), 10e-6)
})
test_that("dart prediction works", {
nrounds = 32
set.seed(1994)
d <- cbind(
x1 = rnorm(100),
x2 = rnorm(100),
x3 = rnorm(100))
y <- d[,"x1"] + d[,"x2"]^2 +
ifelse(d[,"x3"] > .5, d[,"x3"]^2, 2^d[,"x3"]) +
rnorm(100)
set.seed(1994)
booster_by_xgboost <- xgboost(data = d, label = y, max_depth = 2, booster = "dart",
rate_drop = 0.5, one_drop = TRUE,
eta = 1, nthread = 2, nrounds = nrounds, objective = "reg:squarederror")
pred_by_xgboost_0 <- predict(booster_by_xgboost, newdata = d, ntreelimit = 0)
pred_by_xgboost_1 <- predict(booster_by_xgboost, newdata = d, ntreelimit = nrounds)
expect_true(all(matrix(pred_by_xgboost_0, byrow=TRUE) == matrix(pred_by_xgboost_1, byrow=TRUE)))
pred_by_xgboost_2 <- predict(booster_by_xgboost, newdata = d, training = TRUE)
expect_false(all(matrix(pred_by_xgboost_0, byrow=TRUE) == matrix(pred_by_xgboost_2, byrow=TRUE)))
set.seed(1994)
dtrain <- xgb.DMatrix(data=d, info = list(label=y))
booster_by_train <- xgb.train( params = list(
booster = "dart",
max_depth = 2,
eta = 1,
rate_drop = 0.5,
one_drop = TRUE,
nthread = 1,
tree_method= "exact",
verbosity = 3,
objective = "reg:squarederror"
),
data = dtrain,
nrounds = nrounds
)
pred_by_train_0 <- predict(booster_by_train, newdata = dtrain, ntreelimit = 0)
pred_by_train_1 <- predict(booster_by_train, newdata = dtrain, ntreelimit = nrounds)
pred_by_train_2 <- predict(booster_by_train, newdata = dtrain, training = TRUE)
expect_true(all(matrix(pred_by_train_0, byrow=TRUE) == matrix(pred_by_xgboost_0, byrow=TRUE)))
expect_true(all(matrix(pred_by_train_1, byrow=TRUE) == matrix(pred_by_xgboost_1, byrow=TRUE)))
expect_true(all(matrix(pred_by_train_2, byrow=TRUE) == matrix(pred_by_xgboost_2, byrow=TRUE)))
})
test_that("train and predict softprob", {
lb <- as.numeric(iris$Species) - 1
set.seed(11)
@@ -230,7 +182,7 @@ test_that("xgb.cv works", {
expect_is(cv, 'xgb.cv.synchronous')
expect_false(is.null(cv$evaluation_log))
expect_lt(cv$evaluation_log[, min(test_error_mean)], 0.03)
expect_lt(cv$evaluation_log[, min(test_error_std)], 0.008)
expect_lt(cv$evaluation_log[, min(test_error_std)], 0.004)
expect_equal(cv$niter, 2)
expect_false(is.null(cv$folds) && is.list(cv$folds))
expect_length(cv$folds, 5)
@@ -239,20 +191,6 @@ test_that("xgb.cv works", {
expect_false(is.null(cv$call))
})
test_that("xgb.cv works with stratified folds", {
dtrain <- xgb.DMatrix(train$data, label = train$label)
set.seed(314159)
cv <- xgb.cv(data = dtrain, max_depth = 2, nfold = 5,
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
verbose=TRUE, stratified = FALSE)
set.seed(314159)
cv2 <- xgb.cv(data = dtrain, max_depth = 2, nfold = 5,
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
verbose=TRUE, stratified = TRUE)
# Stratified folds should result in a different evaluation logs
expect_true(all(cv$evaluation_log[, test_error_mean] != cv2$evaluation_log[, test_error_mean]))
})
test_that("train and predict with non-strict classes", {
# standard dense matrix input
train_dense <- as.matrix(train$data)

View File

@@ -236,7 +236,7 @@ test_that("early stopping using a specific metric works", {
expect_equal(length(pred), 1611)
logloss_pred <- sum(-ltest * log(pred) - (1 - ltest) * log(1 - pred)) / length(ltest)
logloss_log <- bst$evaluation_log[bst$best_iteration, test_logloss]
expect_equal(logloss_log, logloss_pred, tolerance = 1e-5)
expect_equal(logloss_log, logloss_pred, tolerance = 5e-6)
})
test_that("early stopping xgb.cv works", {
@@ -282,11 +282,10 @@ test_that("prediction in xgb.cv works for gblinear too", {
})
test_that("prediction in early-stopping xgb.cv works", {
set.seed(11)
set.seed(1)
expect_output(
cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.1, nrounds = 20,
early_stopping_rounds = 5, maximize = FALSE, stratified = FALSE,
prediction = TRUE)
early_stopping_rounds = 5, maximize = FALSE, prediction = TRUE)
, "Stopping. Best iteration")
expect_false(is.null(cv$best_iteration))

View File

@@ -31,6 +31,7 @@ num_round <- 2
test_that("custom objective works", {
bst <- xgb.train(param, dtrain, num_round, watchlist)
expect_equal(class(bst), "xgb.Booster")
expect_equal(length(bst$raw), 1094)
expect_false(is.null(bst$evaluation_log))
expect_false(is.null(bst$evaluation_log$eval_error))
expect_lt(bst$evaluation_log[num_round, eval_error], 0.03)
@@ -57,4 +58,5 @@ test_that("custom objective using DMatrix attr works", {
param$objective = logregobjattr
bst <- xgb.train(param, dtrain, num_round, watchlist)
expect_equal(class(bst), "xgb.Booster")
expect_equal(length(bst$raw), 1094)
})

View File

@@ -7,8 +7,8 @@ require(vcd, quietly = TRUE)
float_tolerance = 5e-6
# disable some tests for 32-bit environment
flag_32bit = .Machine$sizeof.pointer != 8
# disable some tests for Win32
win32_flag = .Platform$OS.type == "windows" && .Machine$sizeof.pointer != 8
set.seed(1982)
data(Arthritis)
@@ -44,7 +44,7 @@ mbst.GLM <- xgboost(data = as.matrix(iris[, -5]), label = mlabel, verbose = 0,
test_that("xgb.dump works", {
if (!flag_32bit)
if (!win32_flag)
expect_length(xgb.dump(bst.Tree), 200)
dump_file = file.path(tempdir(), 'xgb.model.dump')
expect_true(xgb.dump(bst.Tree, dump_file, with_stats = T))
@@ -54,7 +54,7 @@ test_that("xgb.dump works", {
# JSON format
dmp <- xgb.dump(bst.Tree, dump_format = "json")
expect_length(dmp, 1)
if (!flag_32bit)
if (!win32_flag)
expect_length(grep('nodeid', strsplit(dmp, '\n')[[1]]), 188)
})
@@ -142,44 +142,6 @@ test_that("predict feature contributions works", {
}
})
test_that("SHAPs sum to predictions, with or without DART", {
d <- cbind(
x1 = rnorm(100),
x2 = rnorm(100),
x3 = rnorm(100))
y <- d[,"x1"] + d[,"x2"]^2 +
ifelse(d[,"x3"] > .5, d[,"x3"]^2, 2^d[,"x3"]) +
rnorm(100)
nrounds <- 30
for (booster in list("gbtree", "dart")) {
fit <- xgboost(
params = c(
list(
booster = booster,
objective = "reg:squarederror",
eval_metric = "rmse"),
if (booster == "dart")
list(rate_drop = .01, one_drop = T)),
data = d,
label = y,
nrounds = nrounds)
pr <- function(...)
predict(fit, newdata = d, ...)
pred <- pr()
shap <- pr(predcontrib = T)
shapi <- pr(predinteraction = T)
tol = 1e-5
expect_equal(rowSums(shap), pred, tol = tol)
expect_equal(apply(shapi, 1, sum), pred, tol = tol)
for (i in 1 : nrow(d))
for (f in list(rowSums, colSums))
expect_equal(f(shapi[i,,]), shap[i,], tol = tol)
}
})
test_that("xgb-attribute functionality", {
val <- "my attribute value"
list.val <- list(my_attr=val, a=123, b='ok')
@@ -201,7 +163,6 @@ test_that("xgb-attribute functionality", {
# serializing:
xgb.save(bst.Tree, 'xgb.model')
bst <- xgb.load('xgb.model')
if (file.exists('xgb.model')) file.remove('xgb.model')
expect_equal(xgb.attr(bst, "my_attr"), val)
expect_equal(xgb.attributes(bst), list.ch)
# deletion:
@@ -238,12 +199,10 @@ if (grepl('Windows', Sys.info()[['sysname']]) ||
test_that("xgb.Booster serializing as R object works", {
saveRDS(bst.Tree, 'xgb.model.rds')
bst <- readRDS('xgb.model.rds')
if (file.exists('xgb.model.rds')) file.remove('xgb.model.rds')
dtrain <- xgb.DMatrix(sparse_matrix, label = label)
expect_equal(predict(bst.Tree, dtrain), predict(bst, dtrain), tolerance = float_tolerance)
expect_equal(xgb.dump(bst.Tree), xgb.dump(bst))
xgb.save(bst, 'xgb.model')
if (file.exists('xgb.model')) file.remove('xgb.model')
nil_ptr <- new("externalptr")
class(nil_ptr) <- "xgb.Booster.handle"
expect_true(identical(bst$handle, nil_ptr))
@@ -256,7 +215,7 @@ test_that("xgb.model.dt.tree works with and without feature names", {
names.dt.trees <- c("Tree", "Node", "ID", "Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover")
dt.tree <- xgb.model.dt.tree(feature_names = feature.names, model = bst.Tree)
expect_equal(names.dt.trees, names(dt.tree))
if (!flag_32bit)
if (!win32_flag)
expect_equal(dim(dt.tree), c(188, 10))
expect_output(str(dt.tree), 'Feature.*\\"Age\\"')
@@ -283,7 +242,7 @@ test_that("xgb.model.dt.tree throws error for gblinear", {
test_that("xgb.importance works with and without feature names", {
importance.Tree <- xgb.importance(feature_names = feature.names, model = bst.Tree)
if (!flag_32bit)
if (!win32_flag)
expect_equal(dim(importance.Tree), c(7, 4))
expect_equal(colnames(importance.Tree), c("Feature", "Gain", "Cover", "Frequency"))
expect_output(str(importance.Tree), 'Feature.*\\"Age\\"')

View File

@@ -81,39 +81,6 @@ test_that("predict feature interactions works", {
expect_lt(max(abs(intr - gt_intr)), 0.1)
})
test_that("SHAP contribution values are not NAN", {
d <- data.frame(
x1 = c(-2.3, 1.4, 5.9, 2, 2.5, 0.3, -3.6, -0.2, 0.5, -2.8, -4.6, 3.3, -1.2,
-1.1, -2.3, 0.4, -1.5, -0.2, -1, 3.7),
x2 = c(291.179171, 269.198331, 289.942097, 283.191669, 269.673332,
294.158346, 287.255835, 291.530838, 285.899586, 269.290833,
268.649586, 291.530841, 280.074593, 269.484168, 293.94042,
294.327506, 296.20709, 295.441669, 283.16792, 270.227085),
y = c(9, 15, 5.7, 9.2, 22.4, 5, 9, 3.2, 7.2, 13.1, 7.8, 16.9, 6.5, 22.1,
5.3, 10.4, 11.1, 13.9, 11, 20.5),
fold = c(2, 2, 2, 1, 2, 2, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2))
ivs <- c("x1", "x2")
fit <- xgboost(
verbose = 0,
params = list(
objective = "reg:squarederror",
eval_metric = "rmse"),
data = as.matrix(subset(d, fold == 2)[, ivs]),
label = subset(d, fold == 2)$y,
nthread = 1,
nrounds = 3)
shaps <- as.data.frame(predict(fit,
newdata = as.matrix(subset(d, fold == 1)[, ivs]),
predcontrib = T))
result <- cbind(shaps, sum = rowSums(shaps), pred = predict(fit,
newdata = as.matrix(subset(d, fold == 1)[, ivs])))
expect_true(identical(TRUE, all.equal(result$sum, result$pred, tol = 1e-6)))
})
test_that("multiclass feature interactions work", {
dm <- xgb.DMatrix(as.matrix(iris[,-5]), label=as.numeric(iris$Species)-1)

View File

@@ -138,7 +138,7 @@ levels(df[,Treatment])
Next step, we will transform the categorical data to dummy variables.
Several encoding methods exist, e.g., [one-hot encoding](http://en.wikipedia.org/wiki/One-hot) is a common approach.
We will use the [dummy contrast coding](http://www.ats.ucla.edu/stat/r/library/contrast_coding.htm#dummy) which is popular because it produces "full rank" encoding (also see [this blog post by Max Kuhn](http://appliedpredictivemodeling.com/blog/2013/10/23/the-basics-of-encoding-categorical-data-for-predictive-models)).
We will use the [dummy contrast coding](http://www.ats.ucla.edu/stat/r/library/contrast_coding.htm#dummy) which is popular because it producess "full rank" encoding (also see [this blog post by Max Kuhn](http://appliedpredictivemodeling.com/blog/2013/10/23/the-basics-of-encoding-categorical-data-for-predictive-models)).
The purpose is to transform each value of each *categorical* feature into a *binary* feature `{0, 1}`.
@@ -268,7 +268,7 @@ c2 <- chisq.test(df$Age, output_vector)
print(c2)
```
Pearson correlation between Age and illness disappearing is **`r round(c2$statistic, 2 )`**.
Pearson correlation between Age and illness disapearing is **`r round(c2$statistic, 2 )`**.
```{r, warning=FALSE, message=FALSE}
c2 <- chisq.test(df$AgeDiscret, output_vector)

View File

@@ -313,7 +313,7 @@ Until now, all the learnings we have performed were based on boosting trees. **X
bst <- xgb.train(data=dtrain, booster = "gblinear", max_depth=2, nthread = 2, nrounds=2, watchlist=watchlist, eval_metric = "error", eval_metric = "logloss", objective = "binary:logistic")
```
In this specific case, *linear boosting* gets slightly better performance metrics than decision trees based algorithm.
In this specific case, *linear boosting* gets sligtly better performance metrics than decision trees based algorithm.
In simple cases, it will happen because there is nothing better than a linear algorithm to catch a linear link. However, decision trees are much better to catch a non linear link between predictors and outcome. Because there is no silver bullet, we advise you to check both algorithms with your own datasets to have an idea of what to use.

View File

@@ -1,189 +0,0 @@
---
title: "XGBoost from JSON"
output:
rmarkdown::html_vignette:
number_sections: yes
toc: yes
author: Roland Stevenson
vignette: >
%\VignetteIndexEntry{XGBoost from JSON}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
XGBoost from JSON
=================
## Introduction
The purpose of this Vignette is to show you how to correctly load and work with an **Xgboost** model that has been dumped to JSON. **Xgboost** internally converts all data to [32-bit floats](https://en.wikipedia.org/wiki/Single-precision_floating-point_format), and the values dumped to JSON are decimal representations of these values. When working with a model that has been parsed from a JSON file, care must be taken to correctly treat:
- the input data, which should be converted to 32-bit floats
- any 32-bit floats that were stored in JSON as decimal representations
- any calculations must be done with 32-bit mathematical operators
## Setup
For the purpose of this tutorial we will load the xgboost, jsonlite, and float packages. We'll also set `digits=22` in our options in case we want to inspect many digits of our results.
```{r}
require(xgboost)
require(jsonlite)
require(float)
options(digits=22)
```
We will create a toy binary logistic model based on the example first provided [here](https://github.com/dmlc/xgboost/issues/3960), so that we can easily understand the structure of the dumped JSON model object. This will allow us to understand where discrepancies can occur and how they should be handled.
```{r}
dates <- c(20180130, 20180130, 20180130,
20180130, 20180130, 20180130,
20180131, 20180131, 20180131,
20180131, 20180131, 20180131,
20180131, 20180131, 20180131,
20180134, 20180134, 20180134)
labels <- c(1, 1, 1,
1, 1, 1,
0, 0, 0,
0, 0, 0,
0, 0, 0,
0, 0, 0)
data <- data.frame(dates = dates, labels=labels)
bst <- xgboost(
data = as.matrix(data$dates),
label = labels,
nthread = 2,
nrounds = 1,
objective = "binary:logistic",
missing = NA,
max_depth = 1
)
```
## Comparing results
We will now dump the model to JSON and attempt to illustrate a variety of issues that can arise, and how to properly deal with them.
First let's dump the model to JSON:
```{r}
bst_json <- xgb.dump(bst, with_stats = FALSE, dump_format='json')
bst_from_json <- fromJSON(bst_json, simplifyDataFrame = FALSE)
node <- bst_from_json[[1]]
cat(bst_json)
```
The tree JSON shown by the above code-chunk tells us that if the data is less than 20180132, the tree will output the value in the first leaf. Otherwise it will output the value in the second leaf. Let's try to reproduce this manually with the data we have and confirm that it matches the model predictions we've already calculated.
```{r}
bst_preds_logodds <- predict(bst,as.matrix(data$dates), outputmargin = TRUE)
# calculate the logodds values using the JSON representation
bst_from_json_logodds <- ifelse(data$dates<node$split_condition,
node$children[[1]]$leaf,
node$children[[2]]$leaf)
bst_preds_logodds
bst_from_json_logodds
# test that values are equal
bst_preds_logodds == bst_from_json_logodds
```
None are equal. What happened?
At this stage two things happened:
- input data was not converted to 32-bit floats
- the JSON variables were not converted to 32-bit floats
### Lesson 1: All data is 32-bit floats
> When working with imported JSON, all data must be converted to 32-bit floats
To explain this, let's repeat the comparison and round to two decimals:
```{r}
round(bst_preds_logodds,2) == round(bst_from_json_logodds,2)
```
If we round to two decimals, we see that only the elements related to data values of `20180131` don't agree. If we convert the data to floats, they agree:
```{r}
# now convert the dates to floats first
bst_from_json_logodds <- ifelse(fl(data$dates)<node$split_condition,
node$children[[1]]$leaf,
node$children[[2]]$leaf)
# test that values are equal
round(bst_preds_logodds,2) == round(bst_from_json_logodds,2)
```
What's the lesson? If we are going to work with an imported JSON model, any data must be converted to floats first. In this case, since '20180131' cannot be represented as a 32-bit float, it is rounded up to 20180132, as shown here:
```{r}
fl(20180131)
```
### Lesson 2: JSON parameters are 32-bit floats
> All JSON parameters stored as floats must be converted to floats.
Let's now say we do care about numbers past the first two decimals.
```{r}
# test that values are equal
bst_preds_logodds == bst_from_json_logodds
```
None are exactly equal. What happened? Although we've converted the data to 32-bit floats, we also need to convert the JSON parameters to 32-bit floats. Let's do this:
```{r}
# now convert the dates to floats first
bst_from_json_logodds <- ifelse(fl(data$dates)<fl(node$split_condition),
as.numeric(fl(node$children[[1]]$leaf)),
as.numeric(fl(node$children[[2]]$leaf)))
# test that values are equal
bst_preds_logodds == bst_from_json_logodds
```
All equal. What's the lesson? If we are going to work with an imported JSON model, any JSON parameters that were stored as floats must also be converted to floats first.
### Lesson 3: Use 32-bit math
> Always use 32-bit numbers and operators
We were able to get the log-odds to agree, so now let's manually calculate the sigmoid of the log-odds. This should agree with the xgboost predictions.
```{r}
bst_preds <- predict(bst,as.matrix(data$dates))
# calculate the predictions casting doubles to floats
bst_from_json_preds <- ifelse(fl(data$dates)<fl(node$split_condition),
as.numeric(1/(1+exp(-1*fl(node$children[[1]]$leaf)))),
as.numeric(1/(1+exp(-1*fl(node$children[[2]]$leaf))))
)
# test that values are equal
bst_preds == bst_from_json_preds
```
None are exactly equal again. What is going on here? Well, since we are using the value `1` in the calcuations, we have introduced a double into the calculation. Because of this, all float values are promoted to 64-bit doubles and the 64-bit version of the exponentiation operator `exp` is also used. On the other hand, xgboost uses the 32-bit version of the exponentation operator in its [sigmoid function](https://github.com/dmlc/xgboost/blob/54980b8959680a0da06a3fc0ec776e47c8cbb0a1/src/common/math.h#L25-L27).
How do we fix this? We have to ensure we use the correct datatypes everywhere and the correct operators. If we use only floats, the float library that we have loaded will ensure the 32-bit float exponention operator is applied.
```{r}
# calculate the predictions casting doubles to floats
bst_from_json_preds <- ifelse(fl(data$dates)<fl(node$split_condition),
as.numeric(fl(1)/(fl(1)+exp(fl(-1)*fl(node$children[[1]]$leaf)))),
as.numeric(fl(1)/(fl(1)+exp(fl(-1)*fl(node$children[[2]]$leaf))))
)
# test that values are equal
bst_preds == bst_from_json_preds
```
All equal. What's the lesson? We have to ensure that all calculations are done with 32-bit floating point operators if we want to reproduce the results that we see with xgboost.

View File

@@ -1,13 +1,11 @@
<img src=https://raw.githubusercontent.com/dmlc/dmlc.github.io/master/img/logo-m/xgboost.png width=135/> eXtreme Gradient Boosting
===========
[![Build Status](https://xgboost-ci.net/job/xgboost/job/master/badge/icon?style=plastic)](https://xgboost-ci.net/blue/organizations/jenkins/xgboost/activity)
[![Build Status](https://img.shields.io/travis/dmlc/xgboost.svg?label=build&logo=travis&branch=master)](https://travis-ci.org/dmlc/xgboost)
[![Build Status](https://travis-ci.org/dmlc/xgboost.svg?branch=master)](https://travis-ci.org/dmlc/xgboost)
[![Build Status](https://ci.appveyor.com/api/projects/status/5ypa8vaed6kpmli8?svg=true)](https://ci.appveyor.com/project/tqchen/xgboost)
[![Documentation Status](https://readthedocs.org/projects/xgboost/badge/?version=latest)](https://xgboost.readthedocs.org)
[![GitHub license](http://dmlc.github.io/img/apache2.svg)](./LICENSE)
[![CRAN Status Badge](http://www.r-pkg.org/badges/version/xgboost)](http://cran.r-project.org/web/packages/xgboost)
[![PyPI version](https://badge.fury.io/py/xgboost.svg)](https://pypi.python.org/pypi/xgboost/)
[![Optuna](https://img.shields.io/badge/Optuna-integrated-blue)](https://optuna.org)
[Community](https://xgboost.ai/community) |
[Documentation](https://xgboost.readthedocs.org) |
@@ -18,11 +16,11 @@
XGBoost is an optimized distributed gradient boosting library designed to be highly ***efficient***, ***flexible*** and ***portable***.
It implements machine learning algorithms under the [Gradient Boosting](https://en.wikipedia.org/wiki/Gradient_boosting) framework.
XGBoost provides a parallel tree boosting (also known as GBDT, GBM) that solve many data science problems in a fast and accurate way.
The same code runs on major distributed environment (Kubernetes, Hadoop, SGE, MPI, Dask) and can solve problems beyond billions of examples.
The same code runs on major distributed environment (Hadoop, SGE, MPI) and can solve problems beyond billions of examples.
License
-------
© Contributors, 2019. Licensed under an [Apache-2](https://github.com/dmlc/xgboost/blob/master/LICENSE) license.
© Contributors, 2016. Licensed under an [Apache-2](https://github.com/dmlc/xgboost/blob/master/LICENSE) license.
Contribute to XGBoost
---------------------
@@ -33,35 +31,3 @@ Reference
---------
- Tianqi Chen and Carlos Guestrin. [XGBoost: A Scalable Tree Boosting System](http://arxiv.org/abs/1603.02754). In 22nd SIGKDD Conference on Knowledge Discovery and Data Mining, 2016
- XGBoost originates from research project at University of Washington.
Sponsors
--------
Become a sponsor and get a logo here. See details at [Sponsoring the XGBoost Project](https://xgboost.ai/sponsors). The funds are used to defray the cost of continuous integration and testing infrastructure (https://xgboost-ci.net).
## Open Source Collective sponsors
[![Backers on Open Collective](https://opencollective.com/xgboost/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/xgboost/sponsors/badge.svg)](#sponsors)
### Sponsors
[[Become a sponsor](https://opencollective.com/xgboost#sponsor)]
<!--<a href="https://opencollective.com/xgboost/sponsor/0/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/0/avatar.svg"></a>-->
<a href="https://www.nvidia.com/en-us/" target="_blank"><img src="https://raw.githubusercontent.com/xgboost-ai/xgboost-ai.github.io/master/images/sponsors/nvidia.jpg" alt="NVIDIA" width="72" height="72"></a>
<a href="https://opencollective.com/xgboost/sponsor/1/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/1/avatar.svg"></a>
<a href="https://opencollective.com/xgboost/sponsor/2/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/2/avatar.svg"></a>
<a href="https://opencollective.com/xgboost/sponsor/3/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/3/avatar.svg"></a>
<a href="https://opencollective.com/xgboost/sponsor/4/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/4/avatar.svg"></a>
<a href="https://opencollective.com/xgboost/sponsor/5/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/5/avatar.svg"></a>
<a href="https://opencollective.com/xgboost/sponsor/6/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/6/avatar.svg"></a>
<a href="https://opencollective.com/xgboost/sponsor/7/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/7/avatar.svg"></a>
<a href="https://opencollective.com/xgboost/sponsor/8/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/8/avatar.svg"></a>
<a href="https://opencollective.com/xgboost/sponsor/9/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/9/avatar.svg"></a>
### Backers
[[Become a backer](https://opencollective.com/xgboost#backer)]
<a href="https://opencollective.com/xgboost#backers" target="_blank"><img src="https://opencollective.com/xgboost/backers.svg?width=890"></a>
## Other sponsors
The sponsors in this list are donating cloud hours in lieu of cash donation.
<a href="https://aws.amazon.com/" target="_blank"><img src="https://raw.githubusercontent.com/xgboost-ai/xgboost-ai.github.io/master/images/sponsors/aws.png" alt="Amazon Web Services" width="72" height="72"></a>

View File

@@ -1,5 +1,5 @@
/*!
* Copyright 2015-2019 by Contributors.
* Copyright 2015 by Contributors.
* \brief XGBoost Amalgamation.
* This offers an alternative way to compile the entire library from this single file.
*
@@ -25,39 +25,35 @@
// gbms
#include "../src/gbm/gbm.cc"
#include "../src/gbm/gbtree.cc"
#include "../src/gbm/gbtree_model.cc"
#include "../src/gbm/gblinear.cc"
#include "../src/gbm/gblinear_model.cc"
// data
#include "../src/data/data.cc"
#include "../src/data/simple_csr_source.cc"
#include "../src/data/simple_dmatrix.cc"
#include "../src/data/sparse_page_raw_format.cc"
#include "../src/data/ellpack_page.cc"
#include "../src/data/ellpack_page_source.cc"
// prediction
#include "../src/predictor/predictor.cc"
#include "../src/predictor/cpu_predictor.cc"
#if DMLC_ENABLE_STD_THREAD
#include "../src/data/sparse_page_source.cc"
#include "../src/data/sparse_page_dmatrix.cc"
#include "../src/data/sparse_page_writer.cc"
#endif
// tress
#include "../src/tree/param.cc"
#include "../src/tree/split_evaluator.cc"
#include "../src/tree/tree_model.cc"
#include "../src/tree/tree_updater.cc"
#include "../src/tree/updater_colmaker.cc"
#include "../src/tree/updater_quantile_hist.cc"
#include "../src/tree/updater_fast_hist.cc"
#include "../src/tree/updater_prune.cc"
#include "../src/tree/updater_refresh.cc"
#include "../src/tree/updater_sync.cc"
#include "../src/tree/updater_histmaker.cc"
#include "../src/tree/updater_skmaker.cc"
#include "../src/tree/constraints.cc"
// linear
#include "../src/linear/linear_updater.cc"
@@ -68,12 +64,8 @@
#include "../src/learner.cc"
#include "../src/logging.cc"
#include "../src/common/common.cc"
#include "../src/common/timer.cc"
#include "../src/common/host_device_vector.cc"
#include "../src/common/hist_util.cc"
#include "../src/common/json.cc"
#include "../src/common/io.cc"
#include "../src/common/version.cc"
// c_api
#include "../src/c_api/c_api.cc"

View File

@@ -2,6 +2,10 @@ environment:
R_ARCH: x64
USE_RTOOLS: true
matrix:
- target: msvc
ver: 2013
generator: "Visual Studio 12 2013 Win64"
configuration: Release
- target: msvc
ver: 2015
generator: "Visual Studio 14 2015 Win64"
@@ -32,32 +36,26 @@ install:
- set PATH=C:\msys64\mingw64\bin;C:\msys64\usr\bin;%PATH%
- gcc -v
- ls -l C:\
# Miniconda3
- call C:\Miniconda3-x64\Scripts\activate.bat
- conda info
# Miniconda2
- set PATH=;C:\Miniconda-x64;C:\Miniconda-x64\Scripts;%PATH%
- where python
- python --version
# do python build for mingw and one of the msvc jobs
- set DO_PYTHON=off
- if /i "%target%" == "mingw" set DO_PYTHON=on
- if /i "%target%_%ver%_%configuration%" == "msvc_2015_Release" set DO_PYTHON=on
- if /i "%DO_PYTHON%" == "on" (
conda config --set always_yes true &&
conda update -q conda &&
conda install -y numpy scipy pandas matplotlib pytest scikit-learn graphviz python-graphviz
)
- set PATH=C:\Miniconda3-x64\Library\bin\graphviz;%PATH%
- if /i "%DO_PYTHON%" == "on" conda install -y numpy scipy pandas matplotlib nose scikit-learn graphviz python-graphviz
# R: based on https://github.com/krlmlr/r-appveyor
- ps: |
if($env:target -eq 'rmingw' -or $env:target -eq 'rmsvc') {
#$ErrorActionPreference = "Stop"
Invoke-WebRequest https://raw.githubusercontent.com/krlmlr/r-appveyor/master/scripts/appveyor-tool.ps1 -OutFile "$Env:TEMP\appveyor-tool.ps1"
Invoke-WebRequest http://raw.github.com/krlmlr/r-appveyor/master/scripts/appveyor-tool.ps1 -OutFile "$Env:TEMP\appveyor-tool.ps1"
Import-Module "$Env:TEMP\appveyor-tool.ps1"
Bootstrap
$BINARY_DEPS = "c('XML','igraph')"
cmd.exe /c "R.exe -q -e ""install.packages($BINARY_DEPS, repos='$CRAN', type='win.binary')"" 2>&1"
$DEPS = "c('data.table','magrittr','stringi','ggplot2','DiagrammeR','Ckmeans.1d.dp','vcd','testthat','lintr','knitr','rmarkdown')"
cmd.exe /c "R.exe -q -e ""install.packages($DEPS, repos='$CRAN', type='both')"" 2>&1"
$BINARY_DEPS = "c('XML','igraph')"
cmd.exe /c "R.exe -q -e ""install.packages($BINARY_DEPS, repos='$CRAN', type='win.binary')"" 2>&1"
}
build_script:
@@ -94,15 +92,14 @@ build_script:
cmake .. -G"%generator%" -DCMAKE_CONFIGURATION_TYPES="Release" -DR_LIB=ON &&
cmake --build . --target install --config Release
)
- if /i "%target%" == "jvm" cd jvm-packages && mvn test -pl :xgboost4j_2.12
- if /i "%target%" == "jvm" cd jvm-packages && mvn test -pl :xgboost4j
test_script:
- cd %APPVEYOR_BUILD_FOLDER%
- if /i "%DO_PYTHON%" == "on" python -m pytest tests/python
- if /i "%DO_PYTHON%" == "on" python -m nose tests/python
# mingw R package: run the R check (which includes unit tests), and also keep the built binary package
- if /i "%target%" == "rmingw" (
set _R_CHECK_CRAN_INCOMING_=FALSE&&
set _R_CHECK_FORCE_SUGGESTS_=FALSE&&
R.exe CMD check xgboost*.tar.gz --no-manual --no-build-vignettes --as-cran --install-args=--build
)
# MSVC R package: run only the unit tests

51
build.sh Executable file
View File

@@ -0,0 +1,51 @@
#!/bin/bash
# This is a simple script to make xgboost in MAC and Linux
# Basically, it first try to make with OpenMP, if fails, disable OpenMP and make it again.
# This will automatically make xgboost for MAC users who don't have OpenMP support.
# In most cases, type make will give what you want.
# See additional instruction in doc/build.md
set -e
if make; then
echo "Successfully build multi-thread xgboost"
else
not_ready=0
if [[ ! -e ./rabit/Makefile ]]; then
echo ""
echo "Please init the rabit submodule:"
echo "git submodule update --init --recursive -- rabit"
not_ready=1
fi
if [[ ! -e ./dmlc-core/Makefile ]]; then
echo ""
echo "Please init the dmlc-core submodule:"
echo "git submodule update --init --recursive -- dmlc-core"
not_ready=1
fi
if [[ "${not_ready}" == "1" ]]; then
echo ""
echo "Please fix the errors above and retry the build, or reclone the repository with:"
echo "git clone --recursive https://github.com/dmlc/xgboost.git"
echo ""
exit 1
fi
echo "-----------------------------"
echo "Building multi-thread xgboost failed"
echo "Start to build single-thread xgboost"
make clean_all
make config=make/minimum.mk
if [ $? -eq 0 ] ;then
echo "Successfully build single-thread xgboost"
echo "If you want multi-threaded version"
echo "See additional instructions in doc/build.md"
else
echo "Failed to build single-thread xgboost"
fi
fi

View File

@@ -1,16 +0,0 @@
function (run_doxygen)
find_package(Doxygen REQUIRED)
if (NOT DOXYGEN_DOT_FOUND)
message(FATAL_ERROR "Command `dot` not found. Please install graphviz.")
endif (NOT DOXYGEN_DOT_FOUND)
configure_file(
${xgboost_SOURCE_DIR}/doc/Doxyfile.in
${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY)
add_custom_target( doc_doxygen ALL
COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generate C APIs documentation."
VERBATIM)
endfunction (run_doxygen)

View File

@@ -1,22 +0,0 @@
function (find_prefetch_intrinsics)
include(CheckCXXSourceCompiles)
check_cxx_source_compiles("
#include <xmmintrin.h>
int main() {
char data = 0;
const char* address = &data;
_mm_prefetch(address, _MM_HINT_NTA);
return 0;
}
" XGBOOST_MM_PREFETCH_PRESENT)
check_cxx_source_compiles("
int main() {
char data = 0;
const char* address = &data;
__builtin_prefetch(address, 0, 0);
return 0;
}
" XGBOOST_BUILTIN_PREFETCH_PRESENT)
set(XGBOOST_MM_PREFETCH_PRESENT ${XGBOOST_MM_PREFETCH_PRESENT} PARENT_SCOPE)
set(XGBOOST_BUILTIN_PREFETCH_PRESENT ${XGBOOST_BUILTIN_PREFETCH_PRESENT} PARENT_SCOPE)
endfunction (find_prefetch_intrinsics)

View File

@@ -1 +0,0 @@
@xgboost_VERSION_MAJOR@.@xgboost_VERSION_MINOR@.@xgboost_VERSION_PATCH@

View File

@@ -4,29 +4,24 @@
# enable_sanitizers("address;leak")
# Add flags
macro(enable_sanitizer sanitizer)
if(${sanitizer} MATCHES "address")
macro(enable_sanitizer santizer)
if(${santizer} MATCHES "address")
find_package(ASan REQUIRED)
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=address")
link_libraries(${ASan_LIBRARY})
elseif(${sanitizer} MATCHES "thread")
elseif(${santizer} MATCHES "thread")
find_package(TSan REQUIRED)
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=thread")
link_libraries(${TSan_LIBRARY})
elseif(${sanitizer} MATCHES "leak")
elseif(${santizer} MATCHES "leak")
find_package(LSan REQUIRED)
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=leak")
link_libraries(${LSan_LIBRARY})
elseif(${sanitizer} MATCHES "undefined")
find_package(UBSan REQUIRED)
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=undefined -fno-sanitize-recover=undefined")
link_libraries(${UBSan_LIBRARY})
else()
message(FATAL_ERROR "Santizer ${sanitizer} not supported.")
message(FATAL_ERROR "Santizer ${santizer} not supported.")
endif()
endmacro()

View File

@@ -1,3 +1,4 @@
# Automatically set source group based on folder
function(auto_source_group SOURCES)
@@ -17,10 +18,6 @@ endfunction(auto_source_group)
function(msvc_use_static_runtime)
if(MSVC)
set(variables
CMAKE_C_FLAGS_DEBUG
CMAKE_C_FLAGS_MINSIZEREL
CMAKE_C_FLAGS_RELEASE
CMAKE_C_FLAGS_RELWITHDEBINFO
CMAKE_CXX_FLAGS_DEBUG
CMAKE_CXX_FLAGS_MINSIZEREL
CMAKE_CXX_FLAGS_RELEASE
@@ -32,23 +29,6 @@ function(msvc_use_static_runtime)
set(${variable} "${${variable}}" PARENT_SCOPE)
endif()
endforeach()
set(variables
CMAKE_CUDA_FLAGS
CMAKE_CUDA_FLAGS_DEBUG
CMAKE_CUDA_FLAGS_MINSIZEREL
CMAKE_CUDA_FLAGS_RELEASE
CMAKE_CUDA_FLAGS_RELWITHDEBINFO
)
foreach(variable ${variables})
if(${variable} MATCHES "-MD")
string(REGEX REPLACE "-MD" "-MT" ${variable} "${${variable}}")
set(${variable} "${${variable}}" PARENT_SCOPE)
endif()
if(${variable} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${variable} "${${variable}}")
set(${variable} "${${variable}}" PARENT_SCOPE)
endif()
endforeach()
endif()
endfunction(msvc_use_static_runtime)
@@ -58,13 +38,9 @@ function(set_output_directory target dir)
RUNTIME_OUTPUT_DIRECTORY ${dir}
RUNTIME_OUTPUT_DIRECTORY_DEBUG ${dir}
RUNTIME_OUTPUT_DIRECTORY_RELEASE ${dir}
RUNTIME_OUTPUT_DIRECTORY_RELWITHDEBINFO ${dir}
RUNTIME_OUTPUT_DIRECTORY_MINSIZEREL ${dir}
LIBRARY_OUTPUT_DIRECTORY ${dir}
LIBRARY_OUTPUT_DIRECTORY_DEBUG ${dir}
LIBRARY_OUTPUT_DIRECTORY_RELEASE ${dir}
LIBRARY_OUTPUT_DIRECTORY_RELWITHDEBINFO ${dir}
LIBRARY_OUTPUT_DIRECTORY_MINSIZEREL ${dir}
)
endfunction(set_output_directory)
@@ -81,14 +57,9 @@ endfunction(set_default_configuration_release)
# Generate nvcc compiler flags given a list of architectures
# Also generates PTX for the most recent architecture for forwards compatibility
function(format_gencode_flags flags out)
if(CMAKE_CUDA_COMPILER_VERSION MATCHES "^([0-9]+\\.[0-9]+)")
set(CUDA_VERSION "${CMAKE_MATCH_1}")
endif()
# Set up architecture flags
if(NOT flags)
if(CUDA_VERSION VERSION_GREATER_EQUAL "10.0")
set(flags "35;50;52;60;61;70;75")
elseif(CUDA_VERSION VERSION_GREATER_EQUAL "9.0")
if((CUDA_VERSION_MAJOR EQUAL 9) OR (CUDA_VERSION_MAJOR GREATER 9))
set(flags "35;50;52;60;61;70")
else()
set(flags "35;50;52;60;61")
@@ -96,11 +67,11 @@ function(format_gencode_flags flags out)
endif()
# Generate SASS
foreach(ver ${flags})
set(${out} "${${out}}--generate-code=arch=compute_${ver},code=sm_${ver};")
set(${out} "${${out}}-gencode arch=compute_${ver},code=sm_${ver};")
endforeach()
# Generate PTX for last architecture
list(GET flags -1 ver)
set(${out} "${${out}}--generate-code=arch=compute_${ver},code=compute_${ver};")
set(${out} "${${out}}-gencode arch=compute_${ver},code=compute_${ver};")
set(${out} "${${out}}" PARENT_SCOPE)
endfunction(format_gencode_flags flags)
@@ -109,13 +80,9 @@ endfunction(format_gencode_flags flags)
# if necessary, installs the main R package dependencies;
# runs R CMD INSTALL.
function(setup_rpackage_install_target rlib_target build_dir)
# backup cmake_install.cmake
install(CODE "file(COPY \"${build_dir}/R-package/cmake_install.cmake\"
DESTINATION \"${build_dir}/bak\")")
install(CODE "file(REMOVE_RECURSE \"${build_dir}/R-package\")")
install(
DIRECTORY "${xgboost_SOURCE_DIR}/R-package"
DIRECTORY "${PROJECT_SOURCE_DIR}/R-package"
DESTINATION "${build_dir}"
REGEX "src/*" EXCLUDE
REGEX "R-package/configure" EXCLUDE
@@ -131,8 +98,4 @@ DESTINATION \"${build_dir}/bak\")")
install(CODE "execute_process(COMMAND \"${LIBR_EXECUTABLE}\" \"-q\" \"-e\" \"${XGB_DEPS_SCRIPT}\")")
install(CODE "execute_process(COMMAND \"${LIBR_EXECUTABLE}\" CMD INSTALL\
\"--no-multiarch\" \"--build\" \"${build_dir}/R-package\")")
# restore cmake_install.cmake
install(CODE "file(RENAME \"${build_dir}/bak/cmake_install.cmake\"
\"${build_dir}/R-package/cmake_install.cmake\")")
endfunction(setup_rpackage_install_target)

View File

@@ -1,9 +0,0 @@
function (write_version)
message(STATUS "xgboost VERSION: ${xgboost_VERSION}")
configure_file(
${xgboost_SOURCE_DIR}/cmake/version_config.h.in
${xgboost_SOURCE_DIR}/include/xgboost/version_config.h @ONLY)
configure_file(
${xgboost_SOURCE_DIR}/cmake/Python_version.in
${xgboost_SOURCE_DIR}/python-package/xgboost/VERSION @ONLY)
endfunction (write_version)

View File

@@ -1,7 +1,7 @@
set(ASan_LIB_NAME ASan)
find_library(ASan_LIBRARY
NAMES libasan.so libasan.so.5 libasan.so.4 libasan.so.3 libasan.so.2 libasan.so.1 libasan.so.0
NAMES libasan.so libasan.so.4 libasan.so.3 libasan.so.2 libasan.so.1 libasan.so.0
PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib)
include(FindPackageHandleStandardArgs)

View File

@@ -1,23 +0,0 @@
if (NVML_LIBRARY)
unset(NVML_LIBRARY CACHE)
endif(NVML_LIBRARY)
set(NVML_LIB_NAME nvml)
find_path(NVML_INCLUDE_DIR
NAMES nvml.h
PATHS ${CUDA_HOME}/include ${CUDA_INCLUDE} /usr/local/cuda/include)
find_library(NVML_LIBRARY
NAMES nvidia-ml)
message(STATUS "Using nvml library: ${NVML_LIBRARY}")
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(NVML DEFAULT_MSG
NVML_INCLUDE_DIR NVML_LIBRARY)
mark_as_advanced(
NVML_INCLUDE_DIR
NVML_LIBRARY
)

View File

@@ -32,28 +32,20 @@
#
# This module assumes that the user has already called find_package(CUDA)
if (NCCL_LIBRARY)
# Don't cache NCCL_LIBRARY to enable switching between static and shared.
unset(NCCL_LIBRARY CACHE)
endif()
if (BUILD_WITH_SHARED_NCCL)
# libnccl.so
set(NCCL_LIB_NAME nccl)
else ()
# libnccl_static.a
set(NCCL_LIB_NAME nccl_static)
endif (BUILD_WITH_SHARED_NCCL)
find_path(NCCL_INCLUDE_DIR
NAMES nccl.h
PATHS $ENV{NCCL_ROOT}/include ${NCCL_ROOT}/include)
PATHS $ENV{NCCL_ROOT}/include ${NCCL_ROOT}/include ${CUDA_INCLUDE_DIRS} /usr/include)
find_library(NCCL_LIBRARY
NAMES ${NCCL_LIB_NAME}
PATHS $ENV{NCCL_ROOT}/lib/ ${NCCL_ROOT}/lib)
PATHS $ENV{NCCL_ROOT}/lib ${NCCL_ROOT}/lib ${CUDA_INCLUDE_DIRS}/../lib /usr/lib)
message(STATUS "Using nccl library: ${NCCL_LIBRARY}")
if (NCCL_INCLUDE_DIR AND NCCL_LIBRARY)
get_filename_component(NCCL_LIBRARY ${NCCL_LIBRARY} PATH)
endif ()
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Nccl DEFAULT_MSG
@@ -62,4 +54,5 @@ find_package_handle_standard_args(Nccl DEFAULT_MSG
mark_as_advanced(
NCCL_INCLUDE_DIR
NCCL_LIBRARY
NCCL_LIB_NAME
)

View File

@@ -1,13 +0,0 @@
set(UBSan_LIB_NAME UBSan)
find_library(UBSan_LIBRARY
NAMES libubsan.so libubsan.so.5 libubsan.so.4 libubsan.so.3 libubsan.so.2 libubsan.so.1 libubsan.so.0
PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(UBSan DEFAULT_MSG
UBSan_LIBRARY)
mark_as_advanced(
UBSan_LIBRARY
UBSan_LIB_NAME)

View File

@@ -1,11 +0,0 @@
/*!
* Copyright 2019 XGBoost contributors
*/
#ifndef XGBOOST_VERSION_CONFIG_H_
#define XGBOOST_VERSION_CONFIG_H_
#define XGBOOST_VER_MAJOR @xgboost_VERSION_MAJOR@
#define XGBOOST_VER_MINOR @xgboost_VERSION_MINOR@
#define XGBOOST_VER_PATCH @xgboost_VERSION_PATCH@
#endif // XGBOOST_VERSION_CONFIG_H_

View File

@@ -1,5 +0,0 @@
@PACKAGE_INIT@
if(NOT TARGET xgboost::xgboost)
include(${CMAKE_CURRENT_LIST_DIR}/XGBoostTargets.cmake)
endif()

View File

@@ -119,7 +119,6 @@ If you have particular usecase of xgboost that you would like to highlight.
Send a PR to add a one sentence description:)
- XGBoost is used in [Kaggle Script](https://www.kaggle.com/scripts) to solve data science challenges.
- Distribute XGBoost as Rest API server from Jupyter notebook with [BentoML](https://github.com/bentoml/bentoml). [Link to notebook](https://github.com/bentoml/BentoML/blob/master/examples/xgboost-predict-titanic-survival/XGBoost-titanic-survival-prediction.ipynb)
- [Seldon predictive service powered by XGBoost](http://docs.seldon.io/iris-demo.html)
- XGBoost Distributed is used in [ODPS Cloud Service by Alibaba](https://yq.aliyun.com/articles/6355) (in Chinese)
- XGBoost is incoporated as part of [Graphlab Create](https://dato.com/products/create/) for scalable machine learning.
@@ -136,7 +135,6 @@ Send a PR to add a one sentence description:)
## Awards
- [John Chambers Award](http://stat-computing.org/awards/jmc/winners.html) - 2016 Winner: XGBoost R Package, by Tong He (Simon Fraser University) and Tianqi Chen (University of Washington)
- [InfoWorlds 2019 Technology of the Year Award](https://www.infoworld.com/article/3336072/application-development/infoworlds-2019-technology-of-the-year-award-winners.html)
## Windows Binaries
Unofficial windows binaries and instructions on how to use them are hosted on [Guido Tapia's blog](http://www.picnet.com.au/blogs/guido/post/2016/09/22/xgboost-windows-x64-binaries-for-download/)

View File

@@ -62,7 +62,7 @@ test:data = "agaricus.txt.test"
We use the tree booster and logistic regression objective in our setting. This indicates that we accomplish our task using classic gradient boosting regression tree(GBRT), which is a promising method for binary classification.
The parameters shown in the example gives the most common ones that are needed to use xgboost.
If you are interested in more parameter settings, the complete parameter settings and detailed descriptions are [here](../../doc/parameter.rst). Besides putting the parameters in the configuration file, we can set them by passing them as arguments as below:
If you are interested in more parameter settings, the complete parameter settings and detailed descriptions are [here](../../doc/parameter.md). Besides putting the parameters in the configuration file, we can set them by passing them as arguments as below:
```
../../xgboost mushroom.conf max_depth=6

View File

@@ -1,4 +0,0 @@
cmake_minimum_required(VERSION 3.12)
find_package(xgboost REQUIRED)
add_executable(api-demo c-api-demo.c)
target_link_libraries(api-demo xgboost::xgboost)

View File

@@ -1,19 +0,0 @@
SRC=c-api-demo.c
TGT=c-api-demo
cc=cc
CFLAGS ?=-O3
XGBOOST_ROOT ?=../..
INCLUDE_DIR=-I$(XGBOOST_ROOT)/include -I$(XGBOOST_ROOT)/dmlc-core/include -I$(XGBOOST_ROOT)/rabit/include
LIB_DIR=-L$(XGBOOST_ROOT)/lib
build: $(TGT)
$(TGT): $(SRC) Makefile
$(cc) $(CFLAGS) $(INCLUDE_DIR) $(LIB_DIR) -o $(TGT) $(SRC) -lxgboost
run: $(TGT)
LD_LIBRARY_PATH=$(XGBOOST_ROOT)/lib ./$(TGT)
clean:
rm -f $(TGT)

View File

@@ -1,30 +0,0 @@
C-APIs
===
**XGBoost** implements a C API originally designed for various language
bindings. For detailed reference, please check xgboost/c_api.h. Here is a
demonstration of using the API.
# CMake
If you use **CMake** for your project, you can either install **XGBoost**
somewhere in your system and tell CMake to find it by calling
`find_package(xgboost)`, or put **XGBoost** inside your project's source tree
and call **CMake** command: `add_subdirectory(xgboost)`. To use
`find_package()`, put the following in your **CMakeLists.txt**:
``` CMake
find_package(xgboost REQUIRED)
add_executable(api-demo c-api-demo.c)
target_link_libraries(api-demo xgboost::xgboost)
```
If you want to put XGBoost inside your project (like git submodule), use this
instead:
``` CMake
add_subdirectory(xgboost)
add_executable(api-demo c-api-demo.c)
target_link_libraries(api-demo xgboost)
```
# make
You can start by modifying the makefile in this directory to fit your need.

View File

@@ -1,88 +0,0 @@
/*!
* Copyright 2019 XGBoost contributors
*
* \file c-api-demo.c
* \brief A simple example of using xgboost C API.
*/
#include <stdio.h>
#include <stdlib.h>
#include <xgboost/c_api.h>
#define safe_xgboost(call) { \
int err = (call); \
if (err != 0) { \
fprintf(stderr, "%s:%d: error in %s: %s\n", __FILE__, __LINE__, #call, XGBGetLastError()); \
exit(1); \
} \
}
int main(int argc, char** argv) {
int silent = 0;
int use_gpu = 0; // set to 1 to use the GPU for training
// load the data
DMatrixHandle dtrain, dtest;
safe_xgboost(XGDMatrixCreateFromFile("../data/agaricus.txt.train", silent, &dtrain));
safe_xgboost(XGDMatrixCreateFromFile("../data/agaricus.txt.test", silent, &dtest));
// create the booster
BoosterHandle booster;
DMatrixHandle eval_dmats[2] = {dtrain, dtest};
safe_xgboost(XGBoosterCreate(eval_dmats, 2, &booster));
// configure the training
// available parameters are described here:
// https://xgboost.readthedocs.io/en/latest/parameter.html
safe_xgboost(XGBoosterSetParam(booster, "tree_method", use_gpu ? "gpu_hist" : "hist"));
if (use_gpu) {
// set the GPU to use;
// this is not necessary, but provided here as an illustration
safe_xgboost(XGBoosterSetParam(booster, "gpu_id", "0"));
} else {
// avoid evaluating objective and metric on a GPU
safe_xgboost(XGBoosterSetParam(booster, "gpu_id", "-1"));
}
safe_xgboost(XGBoosterSetParam(booster, "objective", "binary:logistic"));
safe_xgboost(XGBoosterSetParam(booster, "min_child_weight", "1"));
safe_xgboost(XGBoosterSetParam(booster, "gamma", "0.1"));
safe_xgboost(XGBoosterSetParam(booster, "max_depth", "3"));
safe_xgboost(XGBoosterSetParam(booster, "verbosity", silent ? "0" : "1"));
// train and evaluate for 10 iterations
int n_trees = 10;
const char* eval_names[2] = {"train", "test"};
const char* eval_result = NULL;
for (int i = 0; i < n_trees; ++i) {
safe_xgboost(XGBoosterUpdateOneIter(booster, i, dtrain));
safe_xgboost(XGBoosterEvalOneIter(booster, i, eval_dmats, eval_names, 2, &eval_result));
printf("%s\n", eval_result);
}
// predict
bst_ulong out_len = 0;
const float* out_result = NULL;
int n_print = 10;
safe_xgboost(XGBoosterPredict(booster, dtest, 0, 0, 0, &out_len, &out_result));
printf("y_pred: ");
for (int i = 0; i < n_print; ++i) {
printf("%1.4f ", out_result[i]);
}
printf("\n");
// print true labels
safe_xgboost(XGDMatrixGetFloatInfo(dtest, "label", &out_len, &out_result));
printf("y_test: ");
for (int i = 0; i < n_print; ++i) {
printf("%1.4f ", out_result[i]);
}
printf("\n");
// free everything
safe_xgboost(XGBoosterFree(booster));
safe_xgboost(XGDMatrixFree(dtrain));
safe_xgboost(XGDMatrixFree(dtest));
return 0;
}

View File

@@ -1,6 +0,0 @@
Dask
====
This directory contains some demonstrations for using `dask` with `XGBoost`.
For an overview, see
https://xgboost.readthedocs.io/en/latest/tutorials/dask.html .

View File

@@ -1,42 +0,0 @@
import xgboost as xgb
from xgboost.dask import DaskDMatrix
from dask.distributed import Client
from dask.distributed import LocalCluster
from dask import array as da
def main(client):
# generate some random data for demonstration
m = 100000
n = 100
X = da.random.random(size=(m, n), chunks=100)
y = da.random.random(size=(m, ), chunks=100)
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
output = xgb.dask.train(client,
{'verbosity': 1,
'nthread': 1,
'tree_method': 'hist'},
dtrain,
num_boost_round=4, evals=[(dtrain, 'train')])
bst = output['booster']
history = output['history']
# you can pass output directly into `predict` too.
prediction = xgb.dask.predict(client, bst, dtrain)
print('Evaluation history:', history)
return prediction
if __name__ == '__main__':
# or use other clusters for scaling
with LocalCluster(n_workers=7, threads_per_worker=1) as cluster:
with Client(cluster) as client:
main(client)

View File

@@ -1,46 +0,0 @@
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
from dask import array as da
import xgboost as xgb
from xgboost.dask import DaskDMatrix
def main(client):
# generate some random data for demonstration
m = 100000
n = 100
X = da.random.random(size=(m, n), chunks=100)
y = da.random.random(size=(m, ), chunks=100)
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
output = xgb.dask.train(client,
{'verbosity': 2,
'nthread': 1,
# Golden line for GPU training
'tree_method': 'gpu_hist'},
dtrain,
num_boost_round=4, evals=[(dtrain, 'train')])
bst = output['booster']
history = output['history']
# you can pass output directly into `predict` too.
prediction = xgb.dask.predict(client, bst, dtrain)
prediction = prediction.compute()
print('Evaluation history:', history)
return prediction
if __name__ == '__main__':
# `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here
# `n_workers` represents the number of GPUs since we use one GPU per worker
# process.
with LocalCUDACluster(n_workers=2, threads_per_worker=1) as cluster:
with Client(cluster) as client:
main(client)

View File

@@ -1,39 +0,0 @@
'''Dask interface demo:
Use scikit-learn regressor interface with CPU histogram tree method.'''
from dask.distributed import Client
from dask.distributed import LocalCluster
from dask import array as da
import xgboost
def main(client):
# generate some random data for demonstration
n = 100
m = 10000
partition_size = 100
X = da.random.random((m, n), partition_size)
y = da.random.random(m, partition_size)
regressor = xgboost.dask.DaskXGBRegressor(verbosity=1, n_estimators=2)
regressor.set_params(tree_method='hist')
# assigning client here is optional
regressor.client = client
regressor.fit(X, y, eval_set=[(X, y)])
prediction = regressor.predict(X)
bst = regressor.get_booster()
history = regressor.evals_result()
print('Evaluation history:', history)
# returned prediction is always a dask array.
assert isinstance(prediction, da.Array)
return bst # returning the trained model
if __name__ == '__main__':
# or use other clusters for scaling
with LocalCluster(n_workers=4, threads_per_worker=1) as cluster:
with Client(cluster) as client:
main(client)

View File

@@ -1,42 +0,0 @@
'''Dask interface demo:
Use scikit-learn regressor interface with GPU histogram tree method.'''
from dask.distributed import Client
# It's recommended to use dask_cuda for GPU assignment
from dask_cuda import LocalCUDACluster
from dask import array as da
import xgboost
def main(client):
# generate some random data for demonstration
n = 100
m = 1000000
partition_size = 10000
X = da.random.random((m, n), partition_size)
y = da.random.random(m, partition_size)
regressor = xgboost.dask.DaskXGBRegressor(verbosity=1)
regressor.set_params(tree_method='gpu_hist')
# assigning client here is optional
regressor.client = client
regressor.fit(X, y, eval_set=[(X, y)])
prediction = regressor.predict(X)
bst = regressor.get_booster()
history = regressor.evals_result()
print('Evaluation history:', history)
# returned prediction is always a dask array.
assert isinstance(prediction, da.Array)
return bst # returning the trained model
if __name__ == '__main__':
# With dask cuda, one can scale up XGBoost to arbitrary GPU clusters.
# `LocalCUDACluster` used here is only for demonstration purpose.
with LocalCUDACluster() as cluster:
with Client(cluster) as client:
main(client)

View File

@@ -1,5 +1,7 @@
# GPU Acceleration Demo
`cover_type.py` shows how to train a model on the [forest cover type](https://archive.ics.uci.edu/ml/datasets/covertype) dataset using GPU acceleration. The forest cover type dataset has 581,012 rows and 54 features, making it time consuming to process. We compare the run-time and accuracy of the GPU and CPU histogram algorithms.
This demo shows how to train a model on the [forest cover type](https://archive.ics.uci.edu/ml/datasets/covertype) dataset using GPU acceleration. The forest cover type dataset has 581,012 rows and 54 features, making it time consuming to process. We compare the run-time and accuracy of the GPU and CPU histogram algorithms.
`memory.py` shows how to repeatedly train xgboost models while freeing memory between iterations.
This demo requires the [GPU plug-in](https://xgboost.readthedocs.io/en/latest/gpu/index.html) to be built and installed.
The dataset is automatically loaded via the sklearn script.

Some files were not shown because too many files have changed in this diff Show More