Compare commits
16 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
74e2f652de | ||
|
|
e02fff53f2 | ||
|
|
fcb2efbadd | ||
|
|
f4621f09c7 | ||
|
|
bf1b2cbfa2 | ||
|
|
d90e7b3117 | ||
|
|
088c43d666 | ||
|
|
69fc8a632f | ||
|
|
213f4fa45a | ||
|
|
5ca21f252a | ||
|
|
eeb67c3d52 | ||
|
|
ed37fdb9c9 | ||
|
|
e7e522fb06 | ||
|
|
8e39a675be | ||
|
|
7f542d2198 | ||
|
|
c8d32102fb |
38
.clang-tidy
38
.clang-tidy
@@ -1,21 +1,21 @@
|
||||
Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
|
||||
CheckOptions:
|
||||
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.StructCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypeAliasCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypedefCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypeTemplateParameterCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.MemberCase, value: lower_case }
|
||||
- { key: readability-identifier-naming.PrivateMemberSuffix, value: '_' }
|
||||
- { key: readability-identifier-naming.ProtectedMemberSuffix, value: '_' }
|
||||
- { key: readability-identifier-naming.EnumCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.EnumConstant, value: CamelCase }
|
||||
- { key: readability-identifier-naming.EnumConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.GlobalConstantCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.GlobalConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.StaticConstantCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.StaticConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.ConstexprVariableCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.ConstexprVariablePrefix, value: k }
|
||||
- { key: readability-identifier-naming.FunctionCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.NamespaceCase, value: lower_case }
|
||||
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.StructCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypeAliasCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypedefCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypeTemplateParameterCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.MemberCase, value: lower_case }
|
||||
- { key: readability-identifier-naming.PrivateMemberSuffix, value: '_' }
|
||||
- { key: readability-identifier-naming.ProtectedMemberSuffix, value: '_' }
|
||||
- { key: readability-identifier-naming.EnumCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.EnumConstant, value: CamelCase }
|
||||
- { key: readability-identifier-naming.EnumConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.GlobalConstantCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.GlobalConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.StaticConstantCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.StaticConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.ConstexprVariableCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.ConstexprVariablePrefix, value: k }
|
||||
- { key: readability-identifier-naming.FunctionCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.NamespaceCase, value: lower_case }
|
||||
|
||||
1
.github/FUNDING.yml
vendored
1
.github/FUNDING.yml
vendored
@@ -1 +0,0 @@
|
||||
open_collective: xgboost
|
||||
138
.github/workflows/main.yml
vendored
138
.github/workflows/main.yml
vendored
@@ -1,138 +0,0 @@
|
||||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: XGBoost-CI
|
||||
|
||||
# Controls when the action will run. Triggers the workflow on push or pull request
|
||||
# events but only for the master branch
|
||||
on: [push, pull_request]
|
||||
|
||||
env:
|
||||
R_PACKAGES: c('XML', 'igraph', 'data.table', 'magrittr', 'stringi', 'ggplot2', 'DiagrammeR', 'Ckmeans.1d.dp', 'vcd', 'testthat', 'lintr', 'knitr', 'rmarkdown', 'e1071', 'cplm', 'devtools')
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
jobs:
|
||||
test-with-jvm:
|
||||
name: Test JVM on OS ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [windows-latest, windows-2016, ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: actions/setup-java@v1
|
||||
with:
|
||||
java-version: 1.8
|
||||
|
||||
- name: Cache Maven packages
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/.m2
|
||||
key: ${{ runner.os }}-m2-${{ hashFiles('./jvm-packages/pom.xml') }}
|
||||
restore-keys: ${{ runner.os }}-m2
|
||||
|
||||
- name: Test JVM packages
|
||||
run: |
|
||||
cd jvm-packages
|
||||
mvn test -pl :xgboost4j_2.12
|
||||
|
||||
|
||||
lintr:
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
|
||||
name: Run R linters on OS ${{ matrix.config.os }}, R ${{ matrix.config.r }}, Compiler ${{ matrix.config.compiler }}, Build ${{ matrix.config.build }}
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
- {os: windows-latest, r: 'release', compiler: 'mingw', build: 'autotools'}
|
||||
env:
|
||||
R_REMOTES_NO_ERRORS_FROM_WARNINGS: true
|
||||
RSPM: ${{ matrix.config.rspm }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: r-lib/actions/setup-r@master
|
||||
with:
|
||||
r-version: ${{ matrix.config.r }}
|
||||
|
||||
- name: Cache R packages
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ env.R_LIBS_USER }}
|
||||
key: ${{ runner.os }}-r-${{ matrix.config.r }}-1-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-2-
|
||||
|
||||
- name: Install dependencies
|
||||
shell: Rscript {0}
|
||||
run: |
|
||||
install.packages(${{ env.R_PACKAGES }},
|
||||
repos = 'http://cloud.r-project.org',
|
||||
dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
|
||||
- name: Run lintr
|
||||
run: |
|
||||
cd R-package
|
||||
R.exe CMD INSTALL .
|
||||
Rscript.exe tests/run_lint.R
|
||||
|
||||
|
||||
test-with-R:
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
|
||||
name: Test R on OS ${{ matrix.config.os }}, R ${{ matrix.config.r }}, Compiler ${{ matrix.config.compiler }}, Build ${{ matrix.config.build }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- {os: windows-latest, r: 'release', compiler: 'msvc', build: 'autotools'}
|
||||
- {os: windows-2016, r: 'release', compiler: 'msvc', build: 'autotools'}
|
||||
- {os: windows-latest, r: 'release', compiler: 'msvc', build: 'cmake'}
|
||||
- {os: windows-2016, r: 'release', compiler: 'msvc', build: 'cmake'}
|
||||
- {os: windows-latest, r: 'release', compiler: 'mingw', build: 'autotools'}
|
||||
- {os: windows-2016, r: 'release', compiler: 'mingw', build: 'autotools'}
|
||||
- {os: windows-latest, r: 'release', compiler: 'mingw', build: 'cmake'}
|
||||
- {os: windows-2016, r: 'release', compiler: 'mingw', build: 'cmake'}
|
||||
env:
|
||||
R_REMOTES_NO_ERRORS_FROM_WARNINGS: true
|
||||
RSPM: ${{ matrix.config.rspm }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: r-lib/actions/setup-r@master
|
||||
with:
|
||||
r-version: ${{ matrix.config.r }}
|
||||
|
||||
- name: Cache R packages
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ env.R_LIBS_USER }}
|
||||
key: ${{ runner.os }}-r-${{ matrix.config.r }}-1-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-2-
|
||||
|
||||
- name: Install dependencies
|
||||
shell: Rscript {0}
|
||||
run: |
|
||||
install.packages(${{ env.R_PACKAGES }},
|
||||
repos = 'http://cloud.r-project.org',
|
||||
dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.6' # Version range or exact version of a Python version to use, using SemVer's version range syntax
|
||||
architecture: 'x64' # optional x64 or x86. Defaults to x64 if not specified
|
||||
|
||||
- name: Test R
|
||||
run: |
|
||||
python tests/ci_build/test_r_package.py --compiler="${{ matrix.config.compiler }}" --build-tool="${{ matrix.config.build }}"
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -51,7 +51,6 @@ Debug
|
||||
#.Rbuildignore
|
||||
R-package.Rproj
|
||||
*.cache*
|
||||
.mypy_cache/
|
||||
# java
|
||||
java/xgboost4j/target
|
||||
java/xgboost4j/tmp
|
||||
@@ -66,6 +65,7 @@ nb-configuration*
|
||||
.pydevproject
|
||||
.settings/
|
||||
build
|
||||
config.mk
|
||||
/xgboost
|
||||
*.data
|
||||
build_plugin
|
||||
@@ -93,7 +93,6 @@ metastore_db
|
||||
# files from R-package source install
|
||||
**/config.status
|
||||
R-package/src/Makevars
|
||||
*.lib
|
||||
|
||||
# Visual Studio Code
|
||||
/.vscode/
|
||||
@@ -102,6 +101,3 @@ R-package/src/Makevars
|
||||
.idea
|
||||
*.iml
|
||||
/cmake-build-debug/
|
||||
|
||||
# GDB
|
||||
.gdb_history
|
||||
11
.travis.yml
11
.travis.yml
@@ -6,7 +6,7 @@ os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
osx_image: xcode10.1
|
||||
osx_image: xcode10.3
|
||||
dist: bionic
|
||||
|
||||
# Use Build Matrix to do lint and build seperately
|
||||
@@ -21,10 +21,6 @@ env:
|
||||
# cmake test
|
||||
- TASK=cmake_test
|
||||
|
||||
global:
|
||||
- secure: "PR16i9F8QtNwn99C5NDp8nptAS+97xwDtXEJJfEiEVhxPaaRkOp0MPWhogCaK0Eclxk1TqkgWbdXFknwGycX620AzZWa/A1K3gAs+GrpzqhnPMuoBJ0Z9qxXTbSJvCyvMbYwVrjaxc/zWqdMU8waWz8A7iqKGKs/SqbQ3rO6v7c="
|
||||
- secure: "dAGAjBokqm/0nVoLMofQni/fWIBcYSmdq4XvCBX1ZAMDsWnuOfz/4XCY6h2lEI1rVHZQ+UdZkc9PioOHGPZh5BnvE49/xVVWr9c4/61lrDOlkD01ZjSAeoV0fAZq+93V/wPl4QV+MM+Sem9hNNzFSbN5VsQLAiWCSapWsLdKzqA="
|
||||
|
||||
matrix:
|
||||
exclude:
|
||||
- os: linux
|
||||
@@ -43,13 +39,12 @@ addons:
|
||||
- graphviz
|
||||
- openssl
|
||||
- libgit2
|
||||
- lz4
|
||||
- wget
|
||||
- r
|
||||
update: true
|
||||
|
||||
before_install:
|
||||
- source tests/travis/travis_setup_env.sh
|
||||
- source dmlc-core/scripts/travis/travis_setup_env.sh
|
||||
- if [ "${TASK}" != "python_sdist_test" ]; then export PYTHONPATH=${PYTHONPATH}:${PWD}/python-package; fi
|
||||
- echo "MAVEN_OPTS='-Xmx2g -XX:MaxPermSize=1024m -XX:ReservedCodeCacheSize=512m -Dorg.slf4j.simpleLogger.defaultLogLevel=error'" > ~/.mavenrc
|
||||
|
||||
@@ -65,7 +60,7 @@ cache:
|
||||
- ${HOME}/.cache/pip
|
||||
|
||||
before_cache:
|
||||
- tests/travis/travis_before_cache.sh
|
||||
- dmlc-core/scripts/travis/travis_before_cache.sh
|
||||
|
||||
after_failure:
|
||||
- tests/travis/travis_after_failure.sh
|
||||
|
||||
117
CMakeLists.txt
117
CMakeLists.txt
@@ -1,10 +1,8 @@
|
||||
cmake_minimum_required(VERSION 3.13)
|
||||
project(xgboost LANGUAGES CXX C VERSION 1.2.0)
|
||||
cmake_minimum_required(VERSION 3.12)
|
||||
project(xgboost LANGUAGES CXX C VERSION 1.0.1)
|
||||
include(cmake/Utils.cmake)
|
||||
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
|
||||
cmake_policy(SET CMP0022 NEW)
|
||||
cmake_policy(SET CMP0079 NEW)
|
||||
cmake_policy(SET CMP0063 NEW)
|
||||
|
||||
if ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13))
|
||||
cmake_policy(SET CMP0077 NEW)
|
||||
@@ -25,22 +23,17 @@ set_default_configuration_release()
|
||||
#-- Options
|
||||
option(BUILD_C_DOC "Build documentation for C APIs using Doxygen." OFF)
|
||||
option(USE_OPENMP "Build with OpenMP support." ON)
|
||||
option(BUILD_STATIC_LIB "Build static library" OFF)
|
||||
## Bindings
|
||||
option(JVM_BINDINGS "Build JVM bindings" OFF)
|
||||
option(R_LIB "Build shared library for R package" OFF)
|
||||
## Dev
|
||||
option(USE_DEBUG_OUTPUT "Dump internal training results like gradients and predictions to stdout.
|
||||
Should only be used for debugging." OFF)
|
||||
option(FORCE_COLORED_OUTPUT "Force colored output from compilers, useful when ninja is used instead of make." OFF)
|
||||
option(ENABLE_ALL_WARNINGS "Enable all compiler warnings. Only effective for GCC/Clang" OFF)
|
||||
option(LOG_CAPI_INVOCATION "Log all C API invocations for debugging" OFF)
|
||||
option(GOOGLE_TEST "Build google tests" OFF)
|
||||
option(USE_DMLC_GTEST "Use google tests bundled with dmlc-core submodule" OFF)
|
||||
option(USE_NVTX "Build with cuda profiling annotations. Developers only." OFF)
|
||||
set(NVTX_HEADER_DIR "" CACHE PATH "Path to the stand-alone nvtx header")
|
||||
option(RABIT_MOCK "Build rabit with mock" OFF)
|
||||
option(HIDE_CXX_SYMBOLS "Build shared library and hide all C++ symbols" OFF)
|
||||
## CUDA
|
||||
option(USE_CUDA "Build with GPU acceleration" OFF)
|
||||
option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF)
|
||||
@@ -60,7 +53,6 @@ address, leak, undefined and thread.")
|
||||
## Plugins
|
||||
option(PLUGIN_LZ4 "Build lz4 plugin" OFF)
|
||||
option(PLUGIN_DENSE_PARSER "Build dense parser plugin" OFF)
|
||||
option(ADD_PKGCONFIG "Add xgboost.pc into system." ON)
|
||||
|
||||
#-- Checks for building XGBoost
|
||||
if (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug)))
|
||||
@@ -82,11 +74,6 @@ endif (R_LIB AND GOOGLE_TEST)
|
||||
if (USE_AVX)
|
||||
message(SEND_ERROR "The option 'USE_AVX' is deprecated as experimental AVX features have been removed from XGBoost.")
|
||||
endif (USE_AVX)
|
||||
if (ENABLE_ALL_WARNINGS)
|
||||
if ((NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang") AND (NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU"))
|
||||
message(SEND_ERROR "ENABLE_ALL_WARNINGS is only available for Clang and GCC.")
|
||||
endif ((NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang") AND (NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU"))
|
||||
endif (ENABLE_ALL_WARNINGS)
|
||||
|
||||
#-- Sanitizer
|
||||
if (USE_SANITIZER)
|
||||
@@ -101,22 +88,11 @@ if (USE_CUDA)
|
||||
message(STATUS "Configured CUDA host compiler: ${CMAKE_CUDA_HOST_COMPILER}")
|
||||
|
||||
enable_language(CUDA)
|
||||
if (${CMAKE_CUDA_COMPILER_VERSION} VERSION_LESS 10.0)
|
||||
message(FATAL_ERROR "CUDA version must be at least 10.0!")
|
||||
endif()
|
||||
set(GEN_CODE "")
|
||||
format_gencode_flags("${GPU_COMPUTE_VER}" GEN_CODE)
|
||||
message(STATUS "CUDA GEN_CODE: ${GEN_CODE}")
|
||||
endif (USE_CUDA)
|
||||
|
||||
if (FORCE_COLORED_OUTPUT AND (CMAKE_GENERATOR STREQUAL "Ninja") AND
|
||||
((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") OR
|
||||
(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")))
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-color=always")
|
||||
endif()
|
||||
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
if (USE_OPENMP)
|
||||
if (APPLE)
|
||||
# Require CMake 3.16+ on Mac OSX, as previous versions of CMake had trouble locating
|
||||
@@ -126,28 +102,14 @@ if (USE_OPENMP)
|
||||
find_package(OpenMP REQUIRED)
|
||||
endif (USE_OPENMP)
|
||||
|
||||
# core xgboost
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/src)
|
||||
|
||||
# dmlc-core
|
||||
msvc_use_static_runtime()
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/dmlc-core)
|
||||
set_target_properties(dmlc PROPERTIES
|
||||
CXX_STANDARD 14
|
||||
CXX_STANDARD 11
|
||||
CXX_STANDARD_REQUIRED ON
|
||||
POSITION_INDEPENDENT_CODE ON)
|
||||
if (MSVC)
|
||||
target_compile_options(dmlc PRIVATE
|
||||
-D_CRT_SECURE_NO_WARNINGS -D_CRT_SECURE_NO_DEPRECATE)
|
||||
if (TARGET dmlc_unit_tests)
|
||||
target_compile_options(dmlc_unit_tests PRIVATE
|
||||
-D_CRT_SECURE_NO_WARNINGS -D_CRT_SECURE_NO_DEPRECATE)
|
||||
endif (TARGET dmlc_unit_tests)
|
||||
endif (MSVC)
|
||||
if (ENABLE_ALL_WARNINGS)
|
||||
target_compile_options(dmlc PRIVATE -Wall -Wextra)
|
||||
endif (ENABLE_ALL_WARNINGS)
|
||||
target_link_libraries(objxgboost PUBLIC dmlc)
|
||||
list(APPEND LINKED_LIBRARIES_PRIVATE dmlc)
|
||||
|
||||
# rabit
|
||||
set(RABIT_BUILD_DMLC OFF)
|
||||
@@ -156,62 +118,28 @@ set(RABIT_WITH_R_LIB ${R_LIB})
|
||||
add_subdirectory(rabit)
|
||||
|
||||
if (RABIT_MOCK)
|
||||
target_link_libraries(objxgboost PUBLIC rabit_mock_static)
|
||||
if (MSVC)
|
||||
target_compile_options(rabit_mock_static PRIVATE
|
||||
-D_CRT_SECURE_NO_WARNINGS -D_CRT_SECURE_NO_DEPRECATE)
|
||||
endif (MSVC)
|
||||
list(APPEND LINKED_LIBRARIES_PRIVATE rabit_mock_static)
|
||||
else()
|
||||
target_link_libraries(objxgboost PUBLIC rabit)
|
||||
if (MSVC)
|
||||
target_compile_options(rabit PRIVATE
|
||||
-D_CRT_SECURE_NO_WARNINGS -D_CRT_SECURE_NO_DEPRECATE)
|
||||
endif (MSVC)
|
||||
list(APPEND LINKED_LIBRARIES_PRIVATE rabit)
|
||||
endif(RABIT_MOCK)
|
||||
foreach(lib rabit rabit_base rabit_empty rabit_mock rabit_mock_static)
|
||||
# Explicitly link dmlc to rabit, so that configured header (build_config.h)
|
||||
# from dmlc is correctly applied to rabit.
|
||||
if (TARGET ${lib})
|
||||
target_link_libraries(${lib} dmlc ${CMAKE_THREAD_LIBS_INIT})
|
||||
if (HIDE_CXX_SYMBOLS) # Hide all C++ symbols from Rabit
|
||||
set_target_properties(${lib} PROPERTIES CXX_VISIBILITY_PRESET hidden)
|
||||
endif (HIDE_CXX_SYMBOLS)
|
||||
if (ENABLE_ALL_WARNINGS)
|
||||
target_compile_options(${lib} PRIVATE -Wall -Wextra)
|
||||
endif (ENABLE_ALL_WARNINGS)
|
||||
endif (TARGET ${lib})
|
||||
endforeach()
|
||||
|
||||
# Exports some R specific definitions and objects
|
||||
if (R_LIB)
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/R-package)
|
||||
endif (R_LIB)
|
||||
|
||||
# Plugin
|
||||
# core xgboost
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/plugin)
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/src)
|
||||
set(XGBOOST_OBJ_SOURCES "${XGBOOST_OBJ_SOURCES};$<TARGET_OBJECTS:objxgboost>")
|
||||
|
||||
#-- library
|
||||
if (BUILD_STATIC_LIB)
|
||||
add_library(xgboost STATIC)
|
||||
else (BUILD_STATIC_LIB)
|
||||
add_library(xgboost SHARED)
|
||||
endif (BUILD_STATIC_LIB)
|
||||
target_link_libraries(xgboost PRIVATE objxgboost)
|
||||
|
||||
if (USE_NVTX)
|
||||
enable_nvtx(xgboost)
|
||||
endif (USE_NVTX)
|
||||
|
||||
#-- Hide all C++ symbols
|
||||
if (HIDE_CXX_SYMBOLS)
|
||||
set_target_properties(objxgboost PROPERTIES CXX_VISIBILITY_PRESET hidden)
|
||||
set_target_properties(xgboost PROPERTIES CXX_VISIBILITY_PRESET hidden)
|
||||
endif (HIDE_CXX_SYMBOLS)
|
||||
|
||||
#-- Shared library
|
||||
add_library(xgboost SHARED ${XGBOOST_OBJ_SOURCES})
|
||||
target_include_directories(xgboost
|
||||
INTERFACE
|
||||
$<INSTALL_INTERFACE:${CMAKE_INSTALL_PREFIX}/include>
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}/include>)
|
||||
target_link_libraries(xgboost PRIVATE ${LINKED_LIBRARIES_PRIVATE})
|
||||
|
||||
# This creates its own shared library `xgboost4j'.
|
||||
if (JVM_BINDINGS)
|
||||
@@ -220,21 +148,18 @@ endif (JVM_BINDINGS)
|
||||
#-- End shared library
|
||||
|
||||
#-- CLI for xgboost
|
||||
add_executable(runxgboost ${xgboost_SOURCE_DIR}/src/cli_main.cc)
|
||||
target_link_libraries(runxgboost PRIVATE objxgboost)
|
||||
if (USE_NVTX)
|
||||
enable_nvtx(runxgboost)
|
||||
endif (USE_NVTX)
|
||||
add_executable(runxgboost ${xgboost_SOURCE_DIR}/src/cli_main.cc ${XGBOOST_OBJ_SOURCES})
|
||||
|
||||
target_include_directories(runxgboost
|
||||
PRIVATE
|
||||
${xgboost_SOURCE_DIR}/include
|
||||
${xgboost_SOURCE_DIR}/dmlc-core/include
|
||||
${xgboost_SOURCE_DIR}/rabit/include)
|
||||
target_link_libraries(runxgboost PRIVATE ${LINKED_LIBRARIES_PRIVATE})
|
||||
set_target_properties(
|
||||
runxgboost PROPERTIES
|
||||
OUTPUT_NAME xgboost
|
||||
CXX_STANDARD 14
|
||||
CXX_STANDARD 11
|
||||
CXX_STANDARD_REQUIRED ON)
|
||||
#-- End CLI for xgboost
|
||||
|
||||
@@ -245,12 +170,11 @@ add_dependencies(xgboost runxgboost)
|
||||
|
||||
#-- Installing XGBoost
|
||||
if (R_LIB)
|
||||
include(cmake/RPackageInstallTargetSetup.cmake)
|
||||
set_target_properties(xgboost PROPERTIES PREFIX "")
|
||||
if (APPLE)
|
||||
set_target_properties(xgboost PROPERTIES SUFFIX ".so")
|
||||
endif (APPLE)
|
||||
setup_rpackage_install_target(xgboost "${CMAKE_CURRENT_BINARY_DIR}/R-package-install")
|
||||
setup_rpackage_install_target(xgboost ${CMAKE_CURRENT_BINARY_DIR})
|
||||
set(CMAKE_INSTALL_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/dummy_inst")
|
||||
endif (R_LIB)
|
||||
if (MINGW)
|
||||
@@ -321,12 +245,3 @@ endif (GOOGLE_TEST)
|
||||
# replace /MD with /MT. See https://github.com/dmlc/xgboost/issues/4462
|
||||
# for issues caused by mixing of /MD and /MT flags
|
||||
msvc_use_static_runtime()
|
||||
|
||||
# Add xgboost.pc
|
||||
if (ADD_PKGCONFIG)
|
||||
configure_file(${xgboost_SOURCE_DIR}/cmake/xgboost.pc.in ${xgboost_BINARY_DIR}/xgboost.pc @ONLY)
|
||||
|
||||
install(
|
||||
FILES ${xgboost_BINARY_DIR}/xgboost.pc
|
||||
DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
|
||||
endif (ADD_PKGCONFIG)
|
||||
|
||||
@@ -10,14 +10,14 @@ The Project Management Committee(PMC) consists group of active committers that m
|
||||
- Tianqi is a Ph.D. student working on large-scale machine learning. He is the creator of the project.
|
||||
* [Michael Benesty](https://github.com/pommedeterresautee)
|
||||
- Michael is a lawyer and data scientist in France. He is the creator of XGBoost interactive analysis module in R.
|
||||
* [Yuan Tang](https://github.com/terrytangyuan), Ant Group
|
||||
- Yuan is a software engineer in Ant Group. He contributed mostly in R and Python packages.
|
||||
* [Yuan Tang](https://github.com/terrytangyuan), Ant Financial
|
||||
- Yuan is a software engineer in Ant Financial. He contributed mostly in R and Python packages.
|
||||
* [Nan Zhu](https://github.com/CodingCat), Uber
|
||||
- Nan is a software engineer in Uber. He contributed mostly in JVM packages.
|
||||
* [Jiaming Yuan](https://github.com/trivialfis)
|
||||
- Jiaming contributed to the GPU algorithms. He has also introduced new abstractions to improve the quality of the C++ codebase.
|
||||
* [Hyunsu Cho](http://hyunsu-cho.io/), NVIDIA
|
||||
- Hyunsu is the maintainer of the XGBoost Python package. He also manages the Jenkins continuous integration system (https://xgboost-ci.net/). He is the initial author of the CPU 'hist' updater.
|
||||
* [Hyunsu Cho](http://hyunsu-cho.io/), Amazon AI
|
||||
- Hyunsu is an applied scientist in Amazon AI. He is the maintainer of the XGBoost Python package. He also manages the Jenkins continuous integration system (https://xgboost-ci.net/). He is the initial author of the CPU 'hist' updater.
|
||||
* [Rory Mitchell](https://github.com/RAMitchell), University of Waikato
|
||||
- Rory is a Ph.D. student at University of Waikato. He is the original creator of the GPU training algorithms. He improved the CMake build system and continuous integration.
|
||||
* [Hongliang Liu](https://github.com/phunterlau)
|
||||
@@ -37,8 +37,6 @@ Committers are people who have made substantial contribution to the project and
|
||||
- Sergei is a software engineer in Criteo. He contributed mostly in JVM packages.
|
||||
* [Scott Lundberg](http://scottlundberg.com/), University of Washington
|
||||
- Scott is a Ph.D. student at University of Washington. He is the creator of SHAP, a unified approach to explain the output of machine learning models such as decision tree ensembles. He also helps maintain the XGBoost Julia package.
|
||||
* [Egor Smirnov](https://github.com/SmirnovEgorRu), Intel
|
||||
- Egor has led a major effort to improve the performance of XGBoost on multi-core CPUs.
|
||||
|
||||
|
||||
Become a Committer
|
||||
|
||||
218
Jenkinsfile
vendored
218
Jenkinsfile
vendored
@@ -6,9 +6,6 @@
|
||||
// Command to run command inside a docker container
|
||||
dockerRun = 'tests/ci_build/ci_build.sh'
|
||||
|
||||
// Which CUDA version to use when building reference distribution wheel
|
||||
ref_cuda_ver = '10.0'
|
||||
|
||||
import groovy.transform.Field
|
||||
|
||||
@Field
|
||||
@@ -34,14 +31,13 @@ pipeline {
|
||||
|
||||
// Build stages
|
||||
stages {
|
||||
stage('Jenkins Linux: Initialize') {
|
||||
agent { label 'job_initializer' }
|
||||
stage('Jenkins Linux: Get sources') {
|
||||
agent { label 'linux && cpu' }
|
||||
steps {
|
||||
script {
|
||||
checkoutSrcs()
|
||||
commit_id = "${GIT_COMMIT}"
|
||||
}
|
||||
sh 'python3 tests/jenkins_get_approval.py'
|
||||
stash name: 'srcs'
|
||||
milestone ordinal: 1
|
||||
}
|
||||
@@ -67,16 +63,10 @@ pipeline {
|
||||
parallel ([
|
||||
'build-cpu': { BuildCPU() },
|
||||
'build-cpu-rabit-mock': { BuildCPUMock() },
|
||||
'build-cpu-non-omp': { BuildCPUNonOmp() },
|
||||
// Build reference, distribution-ready Python wheel with CUDA 10.0
|
||||
// using CentOS 6 image
|
||||
'build-gpu-cuda9.0': { BuildCUDA(cuda_version: '9.0') },
|
||||
'build-gpu-cuda10.0': { BuildCUDA(cuda_version: '10.0') },
|
||||
// The build-gpu-* builds below use Ubuntu image
|
||||
'build-gpu-cuda10.1': { BuildCUDA(cuda_version: '10.1') },
|
||||
'build-gpu-cuda10.2': { BuildCUDA(cuda_version: '10.2') },
|
||||
'build-gpu-cuda11.0': { BuildCUDA(cuda_version: '11.0') },
|
||||
'build-jvm-packages-gpu-cuda10.0': { BuildJVMPackagesWithCUDA(spark_version: '3.0.0', cuda_version: '10.0') },
|
||||
'build-jvm-packages': { BuildJVMPackages(spark_version: '3.0.0') },
|
||||
'build-jvm-packages': { BuildJVMPackages(spark_version: '2.4.3') },
|
||||
'build-jvm-doc': { BuildJVMDoc() }
|
||||
])
|
||||
}
|
||||
@@ -89,33 +79,22 @@ pipeline {
|
||||
script {
|
||||
parallel ([
|
||||
'test-python-cpu': { TestPythonCPU() },
|
||||
'test-python-gpu-cuda10.2': { TestPythonGPU(host_cuda_version: '10.2') },
|
||||
'test-python-gpu-cuda11.0-cross': { TestPythonGPU(artifact_cuda_version: '10.0', host_cuda_version: '11.0') },
|
||||
'test-python-gpu-cuda11.0': { TestPythonGPU(artifact_cuda_version: '11.0', host_cuda_version: '11.0') },
|
||||
'test-python-mgpu-cuda10.2': { TestPythonGPU(artifact_cuda_version: '10.2', host_cuda_version: '10.2', multi_gpu: true) },
|
||||
'test-cpp-gpu-cuda10.2': { TestCppGPU(artifact_cuda_version: '10.2', host_cuda_version: '10.2') },
|
||||
'test-cpp-gpu-cuda11.0': { TestCppGPU(artifact_cuda_version: '11.0', host_cuda_version: '11.0') },
|
||||
'test-jvm-jdk8-cuda10.0': { CrossTestJVMwithJDKGPU(artifact_cuda_version: '10.0', host_cuda_version: '10.0') },
|
||||
'test-jvm-jdk8': { CrossTestJVMwithJDK(jdk_version: '8', spark_version: '3.0.0') },
|
||||
'test-python-gpu-cuda9.0': { TestPythonGPU(cuda_version: '9.0') },
|
||||
'test-python-gpu-cuda10.0': { TestPythonGPU(cuda_version: '10.0') },
|
||||
'test-python-gpu-cuda10.1': { TestPythonGPU(cuda_version: '10.1') },
|
||||
'test-python-mgpu-cuda10.1': { TestPythonGPU(cuda_version: '10.1', multi_gpu: true) },
|
||||
'test-cpp-gpu': { TestCppGPU(cuda_version: '10.1') },
|
||||
'test-cpp-mgpu': { TestCppGPU(cuda_version: '10.1', multi_gpu: true) },
|
||||
'test-jvm-jdk8': { CrossTestJVMwithJDK(jdk_version: '8', spark_version: '2.4.3') },
|
||||
'test-jvm-jdk11': { CrossTestJVMwithJDK(jdk_version: '11') },
|
||||
'test-jvm-jdk12': { CrossTestJVMwithJDK(jdk_version: '12') },
|
||||
'test-r-3.4.4': { TestR(use_r35: false) },
|
||||
'test-r-3.5.3': { TestR(use_r35: true) }
|
||||
])
|
||||
}
|
||||
milestone ordinal: 4
|
||||
}
|
||||
}
|
||||
stage('Jenkins Linux: Deploy') {
|
||||
agent none
|
||||
steps {
|
||||
script {
|
||||
parallel ([
|
||||
'deploy-jvm-packages': { DeployJVMPackages(spark_version: '3.0.0') }
|
||||
])
|
||||
}
|
||||
milestone ordinal: 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -134,17 +113,13 @@ def checkoutSrcs() {
|
||||
}
|
||||
}
|
||||
|
||||
def GetCUDABuildContainerType(cuda_version) {
|
||||
return (cuda_version == ref_cuda_ver) ? 'gpu_build_centos6' : 'gpu_build'
|
||||
}
|
||||
|
||||
def ClangTidy() {
|
||||
node('linux && cpu_build') {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Running clang-tidy job..."
|
||||
def container_type = "clang_tidy"
|
||||
def docker_binary = "docker"
|
||||
def dockerArgs = "--build-arg CUDA_VERSION=10.1"
|
||||
def dockerArgs = "--build-arg CUDA_VERSION=9.2"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} ${dockerArgs} python3 tests/ci_build/tidy.py
|
||||
"""
|
||||
@@ -159,7 +134,7 @@ def Lint() {
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} bash -c "source activate cpu_test && make lint"
|
||||
${dockerRun} ${container_type} ${docker_binary} make lint
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
@@ -173,7 +148,7 @@ def SphinxDoc() {
|
||||
def docker_binary = "docker"
|
||||
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e SPHINX_GIT_BRANCH=${BRANCH_NAME}'"
|
||||
sh """#!/bin/bash
|
||||
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} bash -c "source activate cpu_test && make -C doc html"
|
||||
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} make -C doc html
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
@@ -188,10 +163,8 @@ def Doxygen() {
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/doxygen.sh ${BRANCH_NAME}
|
||||
"""
|
||||
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
|
||||
echo 'Uploading doc...'
|
||||
s3Upload file: "build/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "doxygen/${BRANCH_NAME}.tar.bz2"
|
||||
}
|
||||
echo 'Uploading doc...'
|
||||
s3Upload file: "build/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "doxygen/${BRANCH_NAME}.tar.bz2"
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
@@ -203,22 +176,17 @@ def BuildCPU() {
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} rm -fv dmlc-core/include/dmlc/build_config_default.h
|
||||
# This step is not necessary, but here we include it, to ensure that DMLC_CORE_USE_CMAKE flag is correctly propagated
|
||||
# We want to make sure that we use the configured header build/dmlc/build_config.h instead of include/dmlc/build_config_default.h.
|
||||
# See discussion at https://github.com/dmlc/xgboost/issues/5510
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh -DPLUGIN_LZ4=ON -DPLUGIN_DENSE_PARSER=ON
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh
|
||||
${dockerRun} ${container_type} ${docker_binary} build/testxgboost
|
||||
"""
|
||||
// Sanitizer test
|
||||
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e ASAN_SYMBOLIZER_PATH=/usr/bin/llvm-symbolizer -e ASAN_OPTIONS=symbolize=1 -e UBSAN_OPTIONS=print_stacktrace=1:log_path=ubsan_error.log --cap-add SYS_PTRACE'"
|
||||
def docker_args = "--build-arg CMAKE_VERSION=3.12"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak;undefined" \
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak;undefined" \
|
||||
-DCMAKE_BUILD_TYPE=Debug -DSANITIZER_PATH=/usr/lib/x86_64-linux-gnu/
|
||||
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} build/testxgboost
|
||||
"""
|
||||
|
||||
stash name: 'xgboost_cli', includes: 'xgboost'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
@@ -232,76 +200,34 @@ def BuildCPUMock() {
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_mock_cmake.sh
|
||||
"""
|
||||
echo 'Stashing rabit C++ test executable (xgboost)...'
|
||||
echo 'Stashing rabit C++ test executable (xgboost)...'
|
||||
stash name: 'xgboost_rabit_tests', includes: 'xgboost'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def BuildCPUNonOmp() {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Build CPU without OpenMP"
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh -DUSE_OPENMP=OFF
|
||||
"""
|
||||
echo "Running Non-OpenMP C++ test..."
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} build/testxgboost
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def BuildCUDA(args) {
|
||||
node('linux && cpu_build') {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Build with CUDA ${args.cuda_version}"
|
||||
def container_type = GetCUDABuildContainerType(args.cuda_version)
|
||||
def container_type = "gpu_build"
|
||||
def docker_binary = "docker"
|
||||
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
|
||||
def arch_flag = ""
|
||||
if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) {
|
||||
arch_flag = "-DGPU_COMPUTE_VER=75"
|
||||
}
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_CUDA=ON -DUSE_NCCL=ON -DOPEN_MP:BOOL=ON -DHIDE_CXX_SYMBOLS=ON ${arch_flag}
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_CUDA=ON -DUSE_NCCL=ON -DOPEN_MP:BOOL=ON
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal"
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} python tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux2010_x86_64
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} python3 tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux1_x86_64
|
||||
"""
|
||||
echo 'Stashing Python wheel...'
|
||||
stash name: "xgboost_whl_cuda${args.cuda_version}", includes: 'python-package/dist/*.whl'
|
||||
if (args.cuda_version == ref_cuda_ver && (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release'))) {
|
||||
echo 'Uploading Python wheel...'
|
||||
// Stash wheel for CUDA 9.0 target
|
||||
if (args.cuda_version == '9.0') {
|
||||
echo 'Stashing Python wheel...'
|
||||
stash name: 'xgboost_whl_cuda9', includes: 'python-package/dist/*.whl'
|
||||
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
|
||||
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
|
||||
echo 'Stashing C++ test executable (testxgboost)...'
|
||||
stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost'
|
||||
}
|
||||
echo 'Stashing C++ test executable (testxgboost)...'
|
||||
stash name: "xgboost_cpp_tests_cuda${args.cuda_version}", includes: 'build/testxgboost'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def BuildJVMPackagesWithCUDA(args) {
|
||||
node('linux && mgpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Build XGBoost4J-Spark with Spark ${args.spark_version}, CUDA ${args.cuda_version}"
|
||||
def container_type = "jvm_gpu_build"
|
||||
def docker_binary = "nvidia-docker"
|
||||
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
|
||||
def arch_flag = ""
|
||||
if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) {
|
||||
arch_flag = "-DGPU_COMPUTE_VER=75"
|
||||
}
|
||||
// Use only 4 CPU cores
|
||||
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='--cpuset-cpus 0-3'"
|
||||
sh """
|
||||
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_jvm_packages.sh ${args.spark_version} -Duse.cuda=ON $arch_flag
|
||||
"""
|
||||
echo "Stashing XGBoost4J JAR with CUDA ${args.cuda_version} ..."
|
||||
stash name: 'xgboost4j_jar_gpu', includes: "jvm-packages/xgboost4j/target/*.jar,jvm-packages/xgboost4j-spark/target/*.jar,jvm-packages/xgboost4j-example/target/*.jar"
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
@@ -318,7 +244,7 @@ def BuildJVMPackages(args) {
|
||||
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_packages.sh ${args.spark_version}
|
||||
"""
|
||||
echo 'Stashing XGBoost4J JAR...'
|
||||
stash name: 'xgboost4j_jar', includes: "jvm-packages/xgboost4j/target/*.jar,jvm-packages/xgboost4j-spark/target/*.jar,jvm-packages/xgboost4j-example/target/*.jar"
|
||||
stash name: 'xgboost4j_jar', includes: 'jvm-packages/xgboost4j/target/*.jar,jvm-packages/xgboost4j-spark/target/*.jar,jvm-packages/xgboost4j-example/target/*.jar'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
@@ -332,46 +258,40 @@ def BuildJVMDoc() {
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_doc.sh ${BRANCH_NAME}
|
||||
"""
|
||||
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
|
||||
echo 'Uploading doc...'
|
||||
s3Upload file: "jvm-packages/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${BRANCH_NAME}.tar.bz2"
|
||||
}
|
||||
echo 'Uploading doc...'
|
||||
s3Upload file: "jvm-packages/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${BRANCH_NAME}.tar.bz2"
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def TestPythonCPU() {
|
||||
node('linux && cpu') {
|
||||
unstash name: "xgboost_whl_cuda${ref_cuda_ver}"
|
||||
unstash name: 'xgboost_whl_cuda9'
|
||||
unstash name: 'srcs'
|
||||
unstash name: 'xgboost_cli'
|
||||
echo "Test Python CPU"
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu-py35
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def TestPythonGPU(args) {
|
||||
def nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu'
|
||||
def artifact_cuda_version = (args.artifact_cuda_version) ?: ref_cuda_ver
|
||||
nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu'
|
||||
node(nodeReq) {
|
||||
unstash name: "xgboost_whl_cuda${artifact_cuda_version}"
|
||||
unstash name: "xgboost_cpp_tests_cuda${artifact_cuda_version}"
|
||||
unstash name: 'xgboost_whl_cuda9'
|
||||
unstash name: 'srcs'
|
||||
echo "Test Python GPU: CUDA ${args.host_cuda_version}"
|
||||
echo "Test Python GPU: CUDA ${args.cuda_version}"
|
||||
def container_type = "gpu"
|
||||
def docker_binary = "nvidia-docker"
|
||||
def docker_args = "--build-arg CUDA_VERSION=${args.host_cuda_version}"
|
||||
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
|
||||
if (args.multi_gpu) {
|
||||
echo "Using multiple GPUs"
|
||||
// Allocate extra space in /dev/shm to enable NCCL
|
||||
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='--shm-size=4g'"
|
||||
sh """
|
||||
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh mgpu
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh mgpu
|
||||
"""
|
||||
} else {
|
||||
echo "Using a single GPU"
|
||||
@@ -379,6 +299,13 @@ def TestPythonGPU(args) {
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh gpu
|
||||
"""
|
||||
}
|
||||
// For CUDA 10.0 target, run cuDF tests too
|
||||
if (args.cuda_version == '10.0') {
|
||||
echo "Running tests with cuDF..."
|
||||
sh """
|
||||
${dockerRun} cudf ${docker_binary} ${docker_args} tests/ci_build/test_python.sh cudf
|
||||
"""
|
||||
}
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
@@ -398,34 +325,21 @@ def TestCppRabit() {
|
||||
}
|
||||
|
||||
def TestCppGPU(args) {
|
||||
def nodeReq = 'linux && mgpu'
|
||||
def artifact_cuda_version = (args.artifact_cuda_version) ?: ref_cuda_ver
|
||||
nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu'
|
||||
node(nodeReq) {
|
||||
unstash name: "xgboost_cpp_tests_cuda${artifact_cuda_version}"
|
||||
unstash name: 'xgboost_cpp_tests'
|
||||
unstash name: 'srcs'
|
||||
echo "Test C++, CUDA ${args.host_cuda_version}"
|
||||
echo "Test C++, CUDA ${args.cuda_version}"
|
||||
def container_type = "gpu"
|
||||
def docker_binary = "nvidia-docker"
|
||||
def docker_args = "--build-arg CUDA_VERSION=${args.host_cuda_version}"
|
||||
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost"
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def CrossTestJVMwithJDKGPU(args) {
|
||||
def nodeReq = 'linux && mgpu'
|
||||
node(nodeReq) {
|
||||
unstash name: "xgboost4j_jar_gpu"
|
||||
unstash name: 'srcs'
|
||||
if (args.spark_version != null) {
|
||||
echo "Test XGBoost4J on a machine with JDK ${args.jdk_version}, Spark ${args.spark_version}, CUDA ${args.host_cuda_version}"
|
||||
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
|
||||
if (args.multi_gpu) {
|
||||
echo "Using multiple GPUs"
|
||||
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost --gtest_filter=*.MGPU_*"
|
||||
} else {
|
||||
echo "Test XGBoost4J on a machine with JDK ${args.jdk_version}, CUDA ${args.host_cuda_version}"
|
||||
echo "Using a single GPU"
|
||||
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost --gtest_filter=-*.MGPU_*"
|
||||
}
|
||||
def container_type = "gpu_jvm"
|
||||
def docker_binary = "nvidia-docker"
|
||||
def docker_args = "--build-arg CUDA_VERSION=${args.host_cuda_version}"
|
||||
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_jvm_gpu_cross.sh"
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
@@ -466,19 +380,3 @@ def TestR(args) {
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def DeployJVMPackages(args) {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
|
||||
echo 'Deploying to xgboost-maven-repo S3 repo...'
|
||||
sh """
|
||||
${dockerRun} jvm docker tests/ci_build/deploy_jvm_packages.sh ${args.spark_version} 0
|
||||
"""
|
||||
sh """
|
||||
${dockerRun} jvm_gpu_build docker --build-arg CUDA_VERSION=10.0 tests/ci_build/deploy_jvm_packages.sh ${args.spark_version} 1
|
||||
"""
|
||||
}
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,25 +10,15 @@ def commit_id // necessary to pass a variable from one stage to another
|
||||
|
||||
pipeline {
|
||||
agent none
|
||||
|
||||
// Setup common job properties
|
||||
options {
|
||||
timestamps()
|
||||
timeout(time: 240, unit: 'MINUTES')
|
||||
buildDiscarder(logRotator(numToKeepStr: '10'))
|
||||
preserveStashes()
|
||||
}
|
||||
|
||||
// Build stages
|
||||
stages {
|
||||
stage('Jenkins Win64: Initialize') {
|
||||
agent { label 'job_initializer' }
|
||||
stage('Jenkins Win64: Get sources') {
|
||||
agent { label 'win64 && build' }
|
||||
steps {
|
||||
script {
|
||||
checkoutSrcs()
|
||||
commit_id = "${GIT_COMMIT}"
|
||||
}
|
||||
sh 'python3 tests/jenkins_get_approval.py'
|
||||
stash name: 'srcs'
|
||||
milestone ordinal: 1
|
||||
}
|
||||
@@ -38,7 +28,7 @@ pipeline {
|
||||
steps {
|
||||
script {
|
||||
parallel ([
|
||||
'build-win64-cuda10.1': { BuildWin64() }
|
||||
'build-win64-cuda9.0': { BuildWin64() }
|
||||
])
|
||||
}
|
||||
milestone ordinal: 2
|
||||
@@ -49,7 +39,10 @@ pipeline {
|
||||
steps {
|
||||
script {
|
||||
parallel ([
|
||||
'test-win64-cuda10.1': { TestWin64() },
|
||||
'test-win64-cpu': { TestWin64CPU() },
|
||||
'test-win64-gpu-cuda9.0': { TestWin64GPU(cuda_target: 'cuda9') },
|
||||
'test-win64-gpu-cuda10.0': { TestWin64GPU(cuda_target: 'cuda10_0') },
|
||||
'test-win64-gpu-cuda10.1': { TestWin64GPU(cuda_target: 'cuda10_1') }
|
||||
])
|
||||
}
|
||||
milestone ordinal: 3
|
||||
@@ -74,18 +67,14 @@ def checkoutSrcs() {
|
||||
}
|
||||
|
||||
def BuildWin64() {
|
||||
node('win64 && cuda10_unified') {
|
||||
node('win64 && build') {
|
||||
unstash name: 'srcs'
|
||||
echo "Building XGBoost for Windows AMD64 target..."
|
||||
bat "nvcc --version"
|
||||
def arch_flag = ""
|
||||
if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) {
|
||||
arch_flag = "-DGPU_COMPUTE_VER=75"
|
||||
}
|
||||
bat """
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -G"Visual Studio 15 2017 Win64" -DUSE_CUDA=ON -DCMAKE_VERBOSE_MAKEFILE=ON -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON ${arch_flag}
|
||||
cmake .. -G"Visual Studio 15 2017 Win64" -DUSE_CUDA=ON -DCMAKE_VERBOSE_MAKEFILE=ON -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON
|
||||
"""
|
||||
bat """
|
||||
cd build
|
||||
@@ -103,41 +92,50 @@ def BuildWin64() {
|
||||
"""
|
||||
echo 'Stashing Python wheel...'
|
||||
stash name: 'xgboost_whl', includes: 'python-package/dist/*.whl'
|
||||
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
|
||||
echo 'Uploading Python wheel...'
|
||||
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
|
||||
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
|
||||
}
|
||||
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
|
||||
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
|
||||
echo 'Stashing C++ test executable (testxgboost)...'
|
||||
stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost.exe'
|
||||
stash name: 'xgboost_cli', includes: 'xgboost.exe'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def TestWin64() {
|
||||
node('win64 && cuda10_unified') {
|
||||
def TestWin64CPU() {
|
||||
node('win64 && cpu') {
|
||||
unstash name: 'srcs'
|
||||
unstash name: 'xgboost_whl'
|
||||
unstash name: 'xgboost_cli'
|
||||
unstash name: 'xgboost_cpp_tests'
|
||||
echo "Test Win64"
|
||||
bat "nvcc --version"
|
||||
echo "Running C++ tests..."
|
||||
bat "build\\testxgboost.exe"
|
||||
echo "Installing Python dependencies..."
|
||||
def env_name = 'win64_' + UUID.randomUUID().toString().replaceAll('-', '')
|
||||
bat "conda env create -n ${env_name} --file=tests/ci_build/conda_env/win64_test.yml"
|
||||
echo "Test Win64 CPU"
|
||||
echo "Installing Python wheel..."
|
||||
bat "conda activate && (python -m pip uninstall -y xgboost || cd .)"
|
||||
bat """
|
||||
conda activate ${env_name} && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i"
|
||||
conda activate && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i"
|
||||
"""
|
||||
echo "Running Python tests..."
|
||||
bat "conda activate ${env_name} && python -m pytest -v -s -rxXs --fulltrace tests\\python"
|
||||
bat """
|
||||
conda activate ${env_name} && python -m pytest -v -s -rxXs --fulltrace -m "(not slow) and (not mgpu)" tests\\python-gpu
|
||||
"""
|
||||
bat "conda env remove --name ${env_name}"
|
||||
bat "conda activate && python -m pytest -v -s --fulltrace tests\\python"
|
||||
bat "conda activate && python -m pip uninstall -y xgboost"
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def TestWin64GPU(args) {
|
||||
node("win64 && gpu && ${args.cuda_target}") {
|
||||
unstash name: 'srcs'
|
||||
unstash name: 'xgboost_whl'
|
||||
unstash name: 'xgboost_cpp_tests'
|
||||
echo "Test Win64 GPU (${args.cuda_target})"
|
||||
bat "nvcc --version"
|
||||
echo "Running C++ tests..."
|
||||
bat "build\\testxgboost.exe"
|
||||
echo "Installing Python wheel..."
|
||||
bat "conda activate && (python -m pip uninstall -y xgboost || cd .)"
|
||||
bat """
|
||||
conda activate && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i"
|
||||
"""
|
||||
echo "Running Python tests..."
|
||||
bat """
|
||||
conda activate && python -m pytest -v -s --fulltrace -m "(not slow) and (not mgpu)" tests\\python-gpu
|
||||
"""
|
||||
bat "conda activate && python -m pip uninstall -y xgboost"
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
142
Makefile
142
Makefile
@@ -1,3 +1,11 @@
|
||||
ifndef config
|
||||
ifneq ("$(wildcard ./config.mk)","")
|
||||
config = config.mk
|
||||
else
|
||||
config = make/config.mk
|
||||
endif
|
||||
endif
|
||||
|
||||
ifndef DMLC_CORE
|
||||
DMLC_CORE = dmlc-core
|
||||
endif
|
||||
@@ -22,6 +30,16 @@ ifndef MAKE_OK
|
||||
endif
|
||||
$(warning MAKE [$(MAKE)] - $(if $(MAKE_OK),checked OK,PROBLEM))
|
||||
|
||||
ifeq ($(OS), Windows_NT)
|
||||
UNAME="Windows"
|
||||
else
|
||||
UNAME=$(shell uname)
|
||||
endif
|
||||
|
||||
include $(config)
|
||||
ifeq ($(USE_OPENMP), 0)
|
||||
export NO_OPENMP = 1
|
||||
endif
|
||||
include $(DMLC_CORE)/make/dmlc.mk
|
||||
|
||||
# set compiler defaults for OSX versus *nix
|
||||
@@ -44,21 +62,75 @@ export CXX = g++
|
||||
endif
|
||||
endif
|
||||
|
||||
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++14 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS)
|
||||
export LDFLAGS= -pthread -lm $(ADD_LDFLAGS) $(DMLC_LDFLAGS)
|
||||
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++11 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS)
|
||||
CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include -I$(GTEST_PATH)/include
|
||||
#java include path
|
||||
export JAVAINCFLAGS = -I${JAVA_HOME}/include -I./java
|
||||
|
||||
ifeq ($(TEST_COVER), 1)
|
||||
CFLAGS += -g -O0 -fprofile-arcs -ftest-coverage
|
||||
else
|
||||
CFLAGS += -O3 -funroll-loops
|
||||
ifeq ($(USE_SSE), 1)
|
||||
CFLAGS += -msse2
|
||||
endif
|
||||
endif
|
||||
|
||||
ifndef LINT_LANG
|
||||
LINT_LANG= "all"
|
||||
endif
|
||||
|
||||
ifeq ($(UNAME), Windows)
|
||||
XGBOOST_DYLIB = lib/xgboost.dll
|
||||
JAVAINCFLAGS += -I${JAVA_HOME}/include/win32
|
||||
else
|
||||
ifeq ($(UNAME), Darwin)
|
||||
XGBOOST_DYLIB = lib/libxgboost.dylib
|
||||
CFLAGS += -fPIC
|
||||
else
|
||||
XGBOOST_DYLIB = lib/libxgboost.so
|
||||
CFLAGS += -fPIC
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(UNAME), Linux)
|
||||
LDFLAGS += -lrt
|
||||
JAVAINCFLAGS += -I${JAVA_HOME}/include/linux
|
||||
endif
|
||||
|
||||
ifeq ($(UNAME), Darwin)
|
||||
JAVAINCFLAGS += -I${JAVA_HOME}/include/darwin
|
||||
endif
|
||||
|
||||
OPENMP_FLAGS =
|
||||
ifeq ($(USE_OPENMP), 1)
|
||||
OPENMP_FLAGS = -fopenmp
|
||||
else
|
||||
OPENMP_FLAGS = -DDISABLE_OPENMP
|
||||
endif
|
||||
CFLAGS += $(OPENMP_FLAGS)
|
||||
|
||||
# specify tensor path
|
||||
.PHONY: clean all lint clean_all doxygen rcpplint pypack Rpack Rbuild Rcheck
|
||||
.PHONY: clean all lint clean_all doxygen rcpplint pypack Rpack Rbuild Rcheck java pylint
|
||||
|
||||
all: lib/libxgboost.a $(XGBOOST_DYLIB) xgboost
|
||||
|
||||
$(DMLC_CORE)/libdmlc.a: $(wildcard $(DMLC_CORE)/src/*.cc $(DMLC_CORE)/src/*/*.cc)
|
||||
+ cd $(DMLC_CORE); "$(MAKE)" libdmlc.a config=$(ROOTDIR)/$(config); cd $(ROOTDIR)
|
||||
|
||||
$(RABIT)/lib/$(LIB_RABIT): $(wildcard $(RABIT)/src/*.cc)
|
||||
+ cd $(RABIT); "$(MAKE)" lib/$(LIB_RABIT) USE_SSE=$(USE_SSE); cd $(ROOTDIR)
|
||||
|
||||
jvm: jvm-packages/lib/libxgboost4j.so
|
||||
|
||||
SRC = $(wildcard src/*.cc src/*/*.cc)
|
||||
ALL_OBJ = $(patsubst src/%.cc, build/%.o, $(SRC))
|
||||
AMALGA_OBJ = amalgamation/xgboost-all0.o
|
||||
LIB_DEP = $(DMLC_CORE)/libdmlc.a $(RABIT)/lib/$(LIB_RABIT)
|
||||
ALL_DEP = $(filter-out build/cli_main.o, $(ALL_OBJ)) $(LIB_DEP)
|
||||
CLI_OBJ = build/cli_main.o
|
||||
include tests/cpp/xgboost_test.mk
|
||||
|
||||
build/%.o: src/%.cc
|
||||
@mkdir -p $(@D)
|
||||
@@ -69,6 +141,27 @@ build/%.o: src/%.cc
|
||||
amalgamation/xgboost-all0.o: amalgamation/xgboost-all0.cc
|
||||
$(CXX) -c $(CFLAGS) $< -o $@
|
||||
|
||||
# Equivalent to lib/libxgboost_all.so
|
||||
lib/libxgboost_all.so: $(AMALGA_OBJ) $(LIB_DEP)
|
||||
@mkdir -p $(@D)
|
||||
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.o %.a, $^) $(LDFLAGS)
|
||||
|
||||
lib/libxgboost.a: $(ALL_DEP)
|
||||
@mkdir -p $(@D)
|
||||
ar crv $@ $(filter %.o, $?)
|
||||
|
||||
lib/xgboost.dll lib/libxgboost.so lib/libxgboost.dylib: $(ALL_DEP)
|
||||
@mkdir -p $(@D)
|
||||
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.o %a, $^) $(LDFLAGS)
|
||||
|
||||
jvm-packages/lib/libxgboost4j.so: jvm-packages/xgboost4j/src/native/xgboost4j.cpp $(ALL_DEP)
|
||||
@mkdir -p $(@D)
|
||||
$(CXX) $(CFLAGS) $(JAVAINCFLAGS) -shared -o $@ $(filter %.cpp %.o %.a, $^) $(LDFLAGS)
|
||||
|
||||
|
||||
xgboost: $(CLI_OBJ) $(ALL_DEP)
|
||||
$(CXX) $(CFLAGS) -o $@ $(filter %.o %.a, $^) $(LDFLAGS)
|
||||
|
||||
rcpplint:
|
||||
python3 dmlc-core/scripts/lint.py xgboost ${LINT_LANG} R-package/src
|
||||
|
||||
@@ -79,6 +172,16 @@ lint: rcpplint
|
||||
python-package/xgboost/src --pylint-rc ${PWD}/python-package/.pylintrc xgboost \
|
||||
${LINT_LANG} include src python-package
|
||||
|
||||
pylint:
|
||||
flake8 --ignore E501 python-package
|
||||
flake8 --ignore E501 tests/python
|
||||
|
||||
test: $(ALL_TEST)
|
||||
$(ALL_TEST)
|
||||
|
||||
check: test
|
||||
./tests/cpp/xgboost_test
|
||||
|
||||
ifeq ($(TEST_COVER), 1)
|
||||
cover: check
|
||||
@- $(foreach COV_OBJ, $(COVER_OBJ), \
|
||||
@@ -99,9 +202,38 @@ clean_all: clean
|
||||
cd $(DMLC_CORE); "$(MAKE)" clean; cd $(ROOTDIR)
|
||||
cd $(RABIT); "$(MAKE)" clean; cd $(ROOTDIR)
|
||||
|
||||
doxygen:
|
||||
doxygen doc/Doxyfile
|
||||
|
||||
# create standalone python tar file.
|
||||
pypack: ${XGBOOST_DYLIB}
|
||||
cp ${XGBOOST_DYLIB} python-package/xgboost
|
||||
cd python-package; tar cf xgboost.tar xgboost; cd ..
|
||||
|
||||
# create pip source dist (sdist) pack for PyPI
|
||||
pippack: clean_all
|
||||
cd python-package; python setup.py sdist; mv dist/*.tar.gz ..; cd ..
|
||||
rm -rf xgboost-python
|
||||
# remove symlinked directories in python-package/xgboost
|
||||
rm -rf python-package/xgboost/lib
|
||||
rm -rf python-package/xgboost/dmlc-core
|
||||
rm -rf python-package/xgboost/include
|
||||
rm -rf python-package/xgboost/make
|
||||
rm -rf python-package/xgboost/rabit
|
||||
rm -rf python-package/xgboost/src
|
||||
cp -r python-package xgboost-python
|
||||
cp -r CMakeLists.txt xgboost-python/xgboost/
|
||||
cp -r cmake xgboost-python/xgboost/
|
||||
cp -r plugin xgboost-python/xgboost/
|
||||
cp -r make xgboost-python/xgboost/
|
||||
cp -r src xgboost-python/xgboost/
|
||||
cp -r tests xgboost-python/xgboost/
|
||||
cp -r include xgboost-python/xgboost/
|
||||
cp -r dmlc-core xgboost-python/xgboost/
|
||||
cp -r rabit xgboost-python/xgboost/
|
||||
# Use setup_pip.py instead of setup.py
|
||||
mv xgboost-python/setup_pip.py xgboost-python/setup.py
|
||||
# Build sdist tarball
|
||||
cd xgboost-python; python setup.py sdist; mv dist/*.tar.gz ..; cd ..
|
||||
|
||||
# Script to make a clean installable R package.
|
||||
Rpack: clean_all
|
||||
@@ -122,9 +254,9 @@ Rpack: clean_all
|
||||
cp -r dmlc-core/include xgboost/src/dmlc-core/include
|
||||
cp -r dmlc-core/src xgboost/src/dmlc-core/src
|
||||
cp ./LICENSE xgboost
|
||||
# Modify PKGROOT in Makevars.in
|
||||
# Modify PKGROOT in Makevars.in
|
||||
cat R-package/src/Makevars.in|sed '2s/.*/PKGROOT=./' > xgboost/src/Makevars.in
|
||||
# Configure Makevars.win (Windows-specific Makevars, likely using MinGW)
|
||||
# Configure Makevars.win (Windows-specific Makevars, likely using MinGW)
|
||||
cp xgboost/src/Makevars.in xgboost/src/Makevars.win
|
||||
cat xgboost/src/Makevars.in| sed '3s/.*/ENABLE_STD_THREAD=0/' > xgboost/src/Makevars.win
|
||||
sed -i -e 's/@OPENMP_CXXFLAGS@/$$\(SHLIB_OPENMP_CXXFLAGS\)/g' xgboost/src/Makevars.win
|
||||
|
||||
508
NEWS.md
508
NEWS.md
@@ -3,514 +3,6 @@ XGBoost Change Log
|
||||
|
||||
This file records the changes in xgboost library in reverse chronological order.
|
||||
|
||||
## v1.1.0 (2020.05.17)
|
||||
|
||||
### Better performance on multi-core CPUs (#5244, #5334, #5522)
|
||||
* Poor performance scaling of the `hist` algorithm for multi-core CPUs has been under investigation (#3810). #5244 concludes the ongoing effort to improve performance scaling on multi-CPUs, in particular Intel CPUs. Roadmap: #5104
|
||||
* #5334 makes steps toward reducing memory consumption for the `hist` tree method on CPU.
|
||||
* #5522 optimizes random number generation for data sampling.
|
||||
|
||||
### Deterministic GPU algorithm for regression and classification (#5361)
|
||||
* GPU algorithm for regression and classification tasks is now deterministic.
|
||||
* Roadmap: #5023. Currently only single-GPU training is deterministic. Distributed training with multiple GPUs is not yet deterministic.
|
||||
|
||||
### Improve external memory support on GPUs (#5093, #5365)
|
||||
* Starting from 1.0.0 release, we added support for external memory on GPUs to enable training with larger datasets. Gradient-based sampling (#5093) speeds up the external memory algorithm by intelligently sampling a subset of the training data to copy into the GPU memory. [Learn more about out-of-core GPU gradient boosting.](https://arxiv.org/abs/2005.09148)
|
||||
* GPU-side data sketching now works with data from external memory (#5365).
|
||||
|
||||
### Parameter validation: detection of unused or incorrect parameters (#5477, #5569, #5508)
|
||||
* Mis-spelled training parameter is a common user mistake. In previous versions of XGBoost, mis-spelled parameters were silently ignored. Starting with 1.0.0 release, XGBoost will produce a warning message if there is any unused training parameters. The 1.1.0 release makes parameter validation available to the scikit-learn interface (#5477) and the R binding (#5569).
|
||||
|
||||
### Thread-safe, in-place prediction method (#5389, #5512)
|
||||
* Previously, the prediction method was not thread-safe (#5339). This release adds a new API function `inplace_predict()` that is thread-safe. It is now possible to serve concurrent requests for prediction using a shared model object.
|
||||
* It is now possible to compute prediction in-place for selected data formats (`numpy.ndarray` / `scipy.sparse.csr_matrix` / `cupy.ndarray` / `cudf.DataFrame` / `pd.DataFrame`) without creating a `DMatrix` object.
|
||||
|
||||
### Addition of Accelerated Failure Time objective for survival analysis (#4763, #5473, #5486, #5552, #5553)
|
||||
* Survival analysis (regression) models the time it takes for an event of interest to occur. The target label is potentially censored, i.e. the label is a range rather than a single number. We added a new objective `survival:aft` to support survival analysis. Also added is the new API to specify the ranged labels. Check out [the tutorial](https://xgboost.readthedocs.io/en/release_1.1.0/tutorials/aft_survival_analysis.html) and the [demos](https://github.com/dmlc/xgboost/tree/release_1.1.0/demo/aft_survival).
|
||||
* GPU support is work in progress (#5714).
|
||||
|
||||
### Improved installation experience on Mac OSX (#5597, #5602, #5606, #5701)
|
||||
* It only takes two commands to install the XGBoost Python package: `brew install libomp` followed by `pip install xgboost`. The installed XGBoost will use all CPU cores. Even better, starting with this release, we distribute pre-compiled binary wheels targeting Mac OSX. Now the install command `pip install xgboost` finishes instantly, as it no longer compiles the C++ source of XGBoost. The last three Mac versions (High Sierra, Mojave, Catalina) are supported.
|
||||
* R package: the 1.1.0 release fixes the error `Initializing libomp.dylib, but found libomp.dylib already initialized` (#5701)
|
||||
|
||||
### Ranking metrics are now accelerated on GPUs (#5380, #5387, #5398)
|
||||
|
||||
### GPU-side data matrix to ingest data directly from other GPU libraries (#5420, #5465)
|
||||
* Previously, data on GPU memory had to be copied back to the main memory before it could be used by XGBoost. Starting with 1.1.0 release, XGBoost provides a dedicated interface (`DeviceQuantileDMatrix`) so that it can ingest data from GPU memory directly. The result is that XGBoost interoperates better with GPU-accelerated data science libraries, such as cuDF, cuPy, and PyTorch.
|
||||
* Set device in device dmatrix. (#5596)
|
||||
|
||||
### Robust model serialization with JSON (#5123, #5217)
|
||||
* We continue efforts from the 1.0.0 release to adopt JSON as the format to save and load models robustly. Refer to the release note for 1.0.0 to learn more.
|
||||
* It is now possible to store internal configuration of the trained model (`Booster`) object in R as a JSON string (#5123, #5217).
|
||||
|
||||
### Improved integration with Dask
|
||||
* Pass through `verbose` parameter for dask fit (#5413)
|
||||
* Use `DMLC_TASK_ID`. (#5415)
|
||||
* Order the prediction result. (#5416)
|
||||
* Honor `nthreads` from dask worker. (#5414)
|
||||
* Enable grid searching with scikit-learn. (#5417)
|
||||
* Check non-equal when setting threads. (#5421)
|
||||
* Accept other inputs for prediction. (#5428)
|
||||
* Fix missing value for scikit-learn interface. (#5435)
|
||||
|
||||
### XGBoost4J-Spark: Check number of columns in the data iterator (#5202, #5303)
|
||||
* Before, the native layer in XGBoost did not know the number of columns (features) ahead of time and had to guess the number of columns by counting the feature index when ingesting data. This method has a failure more in distributed setting: if the training data is highly sparse, some features may be completely missing in one or more worker partitions. Thus, one or more workers may deduce an incorrect data shape, leading to crashes or silently wrong models.
|
||||
* Enforce correct data shape by passing the number of columns explicitly from the JVM layer into the native layer.
|
||||
|
||||
### Major refactoring of the `DMatrix` class
|
||||
* Continued from 1.0.0 release.
|
||||
* Remove update prediction cache from predictors. (#5312)
|
||||
* Predict on Ellpack. (#5327)
|
||||
* Partial rewrite EllpackPage (#5352)
|
||||
* Use ellpack for prediction only when sparsepage doesn't exist. (#5504)
|
||||
* RFC: #4354, Roadmap: #5143
|
||||
|
||||
### Breaking: XGBoost Python package now requires Pip 19.0 and higher (#5589)
|
||||
* Your Linux machine may have an old version of Pip and may attempt to install a source package, leading to long installation time. This is because we are now using `manylinux2010` tag in the binary wheel release. Ensure you have Pip 19.0 or newer by running `python3 -m pip -V` to check the version. Upgrade Pip with command
|
||||
```
|
||||
python3 -m pip install --upgrade pip
|
||||
```
|
||||
Upgrading to latest pip allows us to depend on newer versions of system libraries. [TensorFlow](https://www.tensorflow.org/install/pip) also requires Pip 19.0+.
|
||||
|
||||
### Breaking: GPU algorithm now requires CUDA 10.0 and higher (#5649)
|
||||
* CUDA 10.0 is necessary to make the GPU algorithm deterministic (#5361).
|
||||
|
||||
### Breaking: `silent` parameter is now removed (#5476)
|
||||
* Please use `verbosity` instead.
|
||||
|
||||
### Breaking: Set `output_margin` to True for custom objectives (#5564)
|
||||
* Now both R and Python interface custom objectives get un-transformed (raw) prediction outputs.
|
||||
|
||||
### Breaking: `Makefile` is now removed. We use CMake exclusively to build XGBoost (#5513)
|
||||
* Exception: the R package uses Autotools, as the CRAN ecosystem did not yet adopt CMake widely.
|
||||
|
||||
### Breaking: `distcol` updater is now removed (#5507)
|
||||
* The `distcol` updater has been long broken, and currently we lack resources to implement a working implementation from scratch.
|
||||
|
||||
### Deprecation notices
|
||||
* **Python 3.5**. This release is the last release to support Python 3.5. The following release (1.2.0) will require Python 3.6.
|
||||
* **Scala 2.11**. Currently XGBoost4J supports Scala 2.11. However, if a future release of XGBoost adopts Spark 3, it will not support Scala 2.11, as Spark 3 requires Scala 2.12+. We do not yet know which XGBoost release will adopt Spark 3.
|
||||
|
||||
### Known limitations
|
||||
* (Python package) When early stopping is activated with `early_stopping_rounds` at training time, the prediction method (`xgb.predict()`) behaves in a surprising way. If XGBoost runs for M rounds and chooses iteration N (N < M) as the best iteration, then the prediction method will use M trees by default. To use the best iteration (N trees), users will need to manually take the best iteration field `bst.best_iteration` and pass it as the `ntree_limit` argument to `xgb.predict()`. See #5209 and #4052 for additional context.
|
||||
* GPU ranking objective is currently not deterministic (#5561).
|
||||
* When training parameter `reg_lambda` is set to zero, some leaf nodes may be assigned a NaN value. (See [discussion](https://discuss.xgboost.ai/t/still-getting-unexplained-nans-new-replication-code/1383/9).) For now, please set `reg_lambda` to a nonzero value.
|
||||
|
||||
### Community and Governance
|
||||
* The XGBoost Project Management Committee (PMC) is pleased to announce a new committer: Egor Smirnov (@SmirnovEgorRu). He has led a major initiative to improve the performance of XGBoost on multi-core CPUs.
|
||||
|
||||
### Bug-fixes
|
||||
* Improved compatibility with scikit-learn (#5255, #5505, #5538)
|
||||
* Remove f-string, since it's not supported by Python 3.5 (#5330). Note that Python 3.5 support is deprecated and schedule to be dropped in the upcoming release (1.2.0).
|
||||
* Fix the pruner so that it doesn't prune the same branch twice (#5335)
|
||||
* Enforce only major version in JSON model schema (#5336). Any major revision of the model schema would bump up the major version.
|
||||
* Fix a small typo in sklearn.py that broke multiple eval metrics (#5341)
|
||||
* Restore loading model from a memory buffer (#5360)
|
||||
* Define lazy isinstance for Python compat (#5364)
|
||||
* [R] fixed uses of `class()` (#5426)
|
||||
* Force compressed buffer to be 4 bytes aligned, to keep cuda-memcheck happy (#5441)
|
||||
* Remove warning for calling host function (`std::max`) on a GPU device (#5453)
|
||||
* Fix uninitialized value bug in xgboost callback (#5463)
|
||||
* Fix model dump in CLI (#5485)
|
||||
* Fix out-of-bound array access in `WQSummary::SetPrune()` (#5493)
|
||||
* Ensure that configured `dmlc/build_config.h` is picked up by Rabit and XGBoost, to fix build on Alpine (#5514)
|
||||
* Fix a misspelled method, made in a git merge (#5509)
|
||||
* Fix a bug in binary model serialization (#5532)
|
||||
* Fix CLI model IO (#5535)
|
||||
* Don't use `uint` for threads (#5542)
|
||||
* Fix R interaction constraints to handle more than 100000 features (#5543)
|
||||
* [jvm-packages] XGBoost Spark should deal with NaN when parsing evaluation output (#5546)
|
||||
* GPU-side data sketching is now aware of query groups in learning-to-rank data (#5551)
|
||||
* Fix DMatrix slicing for newly added fields (#5552)
|
||||
* Fix configuration status with loading binary model (#5562)
|
||||
* Fix build when OpenMP is disabled (#5566)
|
||||
* R compatibility patches (#5577, #5600)
|
||||
* gpu\_hist performance fixes (#5558)
|
||||
* Don't set seed on CLI interface (#5563)
|
||||
* [R] When serializing model, preserve model attributes related to early stopping (#5573)
|
||||
* Avoid rabit calls in learner configuration (#5581)
|
||||
* Hide C++ symbols in libxgboost.so when building Python wheel (#5590). This fixes apache/incubator-tvm#4953.
|
||||
* Fix compilation on Mac OSX High Sierra (10.13) (#5597)
|
||||
* Fix build on big endian CPUs (#5617)
|
||||
* Resolve crash due to use of `vector<bool>::iterator` (#5642)
|
||||
* Validation JSON model dump using JSON schema (#5660)
|
||||
|
||||
### Performance improvements
|
||||
* Wide dataset quantile performance improvement (#5306)
|
||||
* Reduce memory usage of GPU-side data sketching (#5407)
|
||||
* Reduce span check overhead (#5464)
|
||||
* Serialise booster after training to free up GPU memory (#5484)
|
||||
* Use the maximum amount of GPU shared memory available to speed up the histogram kernel (#5491)
|
||||
* Use non-synchronising scan in Thrust (#5560)
|
||||
* Use `cudaDeviceGetAttribute()` instead of `cudaGetDeviceProperties()` for speed (#5570)
|
||||
|
||||
### API changes
|
||||
* Support importing data from a Pandas SparseArray (#5431)
|
||||
* `HostDeviceVector` (vector shared between CPU and GPU memory) now exposes `HostSpan` interface, to enable access on the CPU side with bound check (#5459)
|
||||
* Accept other gradient types for `SplitEntry` (#5467)
|
||||
|
||||
### Usability Improvements, Documentation
|
||||
* Add `JVM_CHECK_CALL` to prevent C++ exceptions from leaking into the JVM layer (#5199)
|
||||
* Updated Windows build docs (#5283)
|
||||
* Update affiliation of @hcho3 (#5292)
|
||||
* Display Sponsor button, link to OpenCollective (#5325)
|
||||
* Update docs for GPU external memory (#5332)
|
||||
* Add link to GPU documentation (#5437)
|
||||
* Small updates to GPU documentation (#5483)
|
||||
* Edits on tutorial for XGBoost job on Kubernetes (#5487)
|
||||
* Add reference to GPU external memory (#5490)
|
||||
* Fix typos (#5346, #5371, #5384, #5399, #5482, #5515)
|
||||
* Update Python doc (#5517)
|
||||
* Add Neptune and Optuna to list of examples (#5528)
|
||||
* Raise error if the number of data weights doesn't match the number of data sets (#5540)
|
||||
* Add a note about GPU ranking (#5572)
|
||||
* Clarify meaning of `training` parameter in the C API function `XGBoosterPredict()` (#5604)
|
||||
* Better error handling for situations where existing trees cannot be modified (#5406, #5418). This feature is enabled when `process_type` is set to `update`.
|
||||
|
||||
### Maintenance: testing, continuous integration, build system
|
||||
* Add C++ test coverage for data sketching (#5251)
|
||||
* Ignore gdb\_history (#5257)
|
||||
* Rewrite setup.py. (#5271, #5280)
|
||||
* Use `scikit-learn` in extra dependencies (#5310)
|
||||
* Add CMake option to build static library (#5397)
|
||||
* [R] changed FindLibR to take advantage of CMake cache (#5427)
|
||||
* [R] fixed inconsistency in R -e calls in FindLibR.cmake (#5438)
|
||||
* Refactor tests with data generator (#5439)
|
||||
* Resolve failing Travis CI (#5445)
|
||||
* Update dmlc-core. (#5466)
|
||||
* [CI] Use clang-tidy 10 (#5469)
|
||||
* De-duplicate code for checking maximum number of nodes (#5497)
|
||||
* [CI] Use Ubuntu 18.04 LTS in JVM CI, because 19.04 is EOL (#5537)
|
||||
* [jvm-packages] [CI] Create a Maven repository to host SNAPSHOT JARs (#5533)
|
||||
* [jvm-packages] [CI] Publish XGBoost4J JARs with Scala 2.11 and 2.12 (#5539)
|
||||
* [CI] Use Vault repository to re-gain access to devtoolset-4 (#5589)
|
||||
|
||||
### Maintenance: Refactor code for legibility and maintainability
|
||||
* Move prediction cache to Learner (#5220, #5302)
|
||||
* Remove SimpleCSRSource (#5315)
|
||||
* Refactor SparsePageSource, delete cache files after use (#5321)
|
||||
* Remove unnecessary DMatrix methods (#5324)
|
||||
* Split up `LearnerImpl` (#5350)
|
||||
* Move segment sorter to common (#5378)
|
||||
* Move thread local entry into Learner (#5396)
|
||||
* Split up test helpers header (#5455)
|
||||
* Requires setting leaf stat when expanding tree (#5501)
|
||||
* Purge device\_helpers.cuh (#5534)
|
||||
* Use thrust functions instead of custom functions (#5544)
|
||||
|
||||
### Acknowledgement
|
||||
**Contributors**: Nan Zhu (@CodingCat), Rory Mitchell (@RAMitchell), @ShvetsKS, Egor Smirnov (@SmirnovEgorRu), Andrew Kane (@ankane), Avinash Barnwal (@avinashbarnwal), Bart Broere (@bartbroere), Andy Adinets (@canonizer), Chen Qin (@chenqin), Daiki Katsuragawa (@daikikatsuragawa), David Díaz Vico (@daviddiazvico), Darius Kharazi (@dkharazi), Darby Payne (@dpayne), Jason E. Aten, Ph.D. (@glycerine), Philip Hyunsu Cho (@hcho3), James Lamb (@jameslamb), Jan Borchmann (@jborchma), Kamil A. Kaczmarek (@kamil-kaczmarek), Melissa Kohl (@mjkohl32), Nicolas Scozzaro (@nscozzaro), Paul Kaefer (@paulkaefer), Rong Ou (@rongou), Samrat Pandiri (@samratp), Sriram Chandramouli (@sriramch), Yuan Tang (@terrytangyuan), Jiaming Yuan (@trivialfis), Liang-Chi Hsieh (@viirya), Bobby Wang (@wbo4958), Zhang Zhang (@zhangzhang10),
|
||||
|
||||
**Reviewers**: Nan Zhu (@CodingCat), @LeZhengThu, Rory Mitchell (@RAMitchell), @ShvetsKS, Egor Smirnov (@SmirnovEgorRu), Steve Bronder (@SteveBronder), Nikita Titov (@StrikerRUS), Andrew Kane (@ankane), Avinash Barnwal (@avinashbarnwal), @brydag, Andy Adinets (@canonizer), Chandra Shekhar Reddy (@chandrureddy), Chen Qin (@chenqin), Codecov (@codecov-io), David Díaz Vico (@daviddiazvico), Darby Payne (@dpayne), Jason E. Aten, Ph.D. (@glycerine), Philip Hyunsu Cho (@hcho3), James Lamb (@jameslamb), @johnny-cat, Mu Li (@mli), Mate Soos (@msoos), @rnyak, Rong Ou (@rongou), Sriram Chandramouli (@sriramch), Toby Dylan Hocking (@tdhock), Yuan Tang (@terrytangyuan), Oleksandr Pryimak (@trams), Jiaming Yuan (@trivialfis), Liang-Chi Hsieh (@viirya), Bobby Wang (@wbo4958),
|
||||
|
||||
## v1.0.0 (2020.02.19)
|
||||
This release marks a major milestone for the XGBoost project.
|
||||
|
||||
### Apache-style governance, contribution policy, and semantic versioning (#4646, #4659)
|
||||
* Starting with 1.0.0 release, the XGBoost Project is adopting Apache-style governance. The full community guideline is [available in the doc website](https://xgboost.readthedocs.io/en/release_1.0.0/contrib/community.html). Note that we now have Project Management Committee (PMC) who would steward the project on the long-term basis. The PMC is also entrusted to run and fund the project's continuous integration (CI) infrastructure (https://xgboost-ci.net).
|
||||
* We also adopt the [semantic versioning](https://semver.org/). See [our release versioning policy](https://xgboost.readthedocs.io/en/release_1.0.0/contrib/release.html).
|
||||
|
||||
### Better performance scaling for multi-core CPUs (#4502, #4529, #4716, #4851, #5008, #5107, #5138, #5156)
|
||||
* Poor performance scaling of the `hist` algorithm for multi-core CPUs has been under investigation (#3810). Previous effort #4529 was replaced with a series of pull requests (#5107, #5138, #5156) aimed at achieving the same performance benefits while keeping the C++ codebase legible. The latest performance benchmark results show [up to 5x speedup on Intel CPUs with many cores](https://github.com/dmlc/xgboost/pull/5156#issuecomment-580024413). Note: #5244, which concludes the effort, will become part of the upcoming release 1.1.0.
|
||||
|
||||
### Improved installation experience on Mac OSX (#4672, #5074, #5080, #5146, #5240)
|
||||
* It used to be quite complicated to install XGBoost on Mac OSX. XGBoost uses OpenMP to distribute work among multiple CPU cores, and Mac's default C++ compiler (Apple Clang) does not come with OpenMP. Existing work-around (using another C++ compiler) was complex and prone to fail with cryptic diagnosis (#4933, #4949, #4969).
|
||||
* Now it only takes two commands to install XGBoost: `brew install libomp` followed by `pip install xgboost`. The installed XGBoost will use all CPU cores.
|
||||
* Even better, XGBoost is now available from Homebrew: `brew install xgboost`. See Homebrew/homebrew-core#50467.
|
||||
* Previously, if you installed the XGBoost R package using the command `install.packages('xgboost')`, it could only use a single CPU core and you would experience slow training performance. With 1.0.0 release, the R package will use all CPU cores out of box.
|
||||
|
||||
### Distributed XGBoost now available on Kubernetes (#4621, #4939)
|
||||
* Check out the [tutorial for setting up distributed XGBoost on a Kubernetes cluster](https://xgboost.readthedocs.io/en/release_1.0.0/tutorials/kubernetes.html).
|
||||
|
||||
### Ruby binding for XGBoost (#4856)
|
||||
|
||||
### New Native Dask interface for multi-GPU and multi-node scaling (#4473, #4507, #4617, #4819, #4907, #4914, #4941, #4942, #4951, #4973, #5048, #5077, #5144, #5270)
|
||||
* XGBoost now integrates seamlessly with [Dask](https://dask.org/), a lightweight distributed framework for data processing. Together with the first-class support for cuDF data frames (see below), it is now easier than ever to create end-to-end data pipeline running on one or more NVIDIA GPUs.
|
||||
* Multi-GPU training with Dask is now up to 20% faster than the previous release (#4914, #4951).
|
||||
|
||||
### First-class support for cuDF data frames and cuPy arrays (#4737, #4745, #4794, #4850, #4891, #4902, #4918, #4927, #4928, #5053, #5189, #5194, #5206, #5219, #5225)
|
||||
* [cuDF](https://github.com/rapidsai/cudf) is a data frame library for loading and processing tabular data on NVIDIA GPUs. It provides a Pandas-like API.
|
||||
* [cuPy](https://github.com/cupy/cupy) implements a NumPy-compatible multi-dimensional array on NVIDIA GPUs.
|
||||
* Now users can keep the data on the GPU memory throughout the end-to-end data pipeline, obviating the need for copying data between the main memory and GPU memory.
|
||||
* XGBoost can accept any data structure that exposes `__array_interface__` signature, opening way to support other columar formats that are compatible with Apache Arrow.
|
||||
|
||||
### [Feature interaction constraint](https://xgboost.readthedocs.io/en/release_1.0.0/tutorials/feature_interaction_constraint.html) is now available with `approx` and `gpu_hist` algorithms (#4534, #4587, #4596, #5034).
|
||||
|
||||
### Learning to rank is now GPU accelerated (#4873, #5004, #5129)
|
||||
* Supported ranking objectives: NDGC, Map, Pairwise.
|
||||
* [Up to 2x improved training performance on GPUs](https://devblogs.nvidia.com/learning-to-rank-with-xgboost-and-gpu/).
|
||||
|
||||
### Enable `gamma` parameter for GPU training (#4874, #4953)
|
||||
* The `gamma` parameter specifies the minimum loss reduction required to add a new split in a tree. A larger value for `gamma` has the effect of pre-pruning the tree, by making harder to add splits.
|
||||
|
||||
### External memory for GPU training (#4486, #4526, #4747, #4833, #4879, #5014)
|
||||
* It is now possible to use NVIDIA GPUs even when the size of training data exceeds the available GPU memory. Note that the external memory support for GPU is still experimental. #5093 will further improve performance and will become part of the upcoming release 1.1.0.
|
||||
* RFC for enabling external memory with GPU algorithms: #4357
|
||||
|
||||
### Improve Scikit-Learn interface (#4558, #4842, #4929, #5049, #5151, #5130, #5227)
|
||||
* Many users of XGBoost enjoy the convenience and breadth of Scikit-Learn ecosystem. In this release, we revise the Scikit-Learn API of XGBoost (`XGBRegressor`, `XGBClassifier`, and `XGBRanker`) to achieve feature parity with the traditional XGBoost interface (`xgboost.train()`).
|
||||
* Insert check to validate data shapes.
|
||||
* Produce an error message if `eval_set` is not a tuple. An error message is better than silently crashing.
|
||||
* Allow using `numpy.RandomState` object.
|
||||
* Add `n_jobs` as an alias of `nthread`.
|
||||
* Roadmap: #5152
|
||||
|
||||
### XGBoost4J-Spark: Redesigning checkpointing mechanism
|
||||
* RFC is available at #4786
|
||||
* Clean up checkpoint file after a successful training job (#4754): The current implementation in XGBoost4J-Spark does not clean up the checkpoint file after a successful training job. If the user runs another job with the same checkpointing directory, she will get a wrong model because the second job will re-use the checkpoint file left over from the first job. To prevent this scenario, we propose to always clean up the checkpoint file after every successful training job.
|
||||
* Avoid Multiple Jobs for Checkpointing (#5082): The current method for checkpoint is to collect the booster produced at the last iteration of each checkpoint internal to Driver and persist it in HDFS. The major issue with this approach is that it needs to re-perform the data preparation for training if the user did not choose to cache the training dataset. To avoid re-performing data prep, we build external-memory checkpointing in the XGBoost4J layer as well.
|
||||
* Enable deterministic repartitioning when checkpoint is enabled (#4807): Distributed algorithm for gradient boosting assumes a fixed partition of the training data between multiple iterations. In previous versions, there was no guarantee that data partition would stay the same, especially when a worker goes down and some data had to recovered from previous checkpoint. In this release, we make data partition deterministic by using the data hash value of each data row in computing the partition.
|
||||
|
||||
### XGBoost4J-Spark: handle errors thrown by the native code (#4560)
|
||||
* All core logic of XGBoost is written in C++, so XGBoost4J-Spark internally uses the C++ code via Java Native Interface (JNI). #4560 adds a proper error handling for any errors or exceptions arising from the C++ code, so that the XGBoost Spark application can be torn down in an orderly fashion.
|
||||
|
||||
### XGBoost4J-Spark: Refine method to count the number of alive cores (#4858)
|
||||
* The `SparkParallelismTracker` class ensures that sufficient number of executor cores are alive. To that end, it is important to query the number of alive cores reliably.
|
||||
|
||||
### XGBoost4J: Add `BigDenseMatrix` to store more than `Integer.MAX_VALUE` elements (#4383)
|
||||
|
||||
### Robust model serialization with JSON (#4632, #4708, #4739, #4868, #4936, #4945, #4974, #5086, #5087, #5089, #5091, #5094, #5110, #5111, #5112, #5120, #5137, #5218, #5222, #5236, #5245, #5248, #5281)
|
||||
* In this release, we introduce an experimental support of using [JSON](https://www.json.org/json-en.html) for serializing (saving/loading) XGBoost models and related hyperparameters for training. We would like to eventually replace the old binary format with JSON, since it is an open format and parsers are available in many programming languages and platforms. See [the documentation for model I/O using JSON](https://xgboost.readthedocs.io/en/release_1.0.0/tutorials/saving_model.html). #3980 explains why JSON was chosen over other alternatives.
|
||||
* To maximize interoperability and compatibility of the serialized models, we now split serialization into two parts (#4855):
|
||||
1. Model, e.g. decision trees and strictly related metadata like `num_features`.
|
||||
2. Internal configuration, consisting of training parameters and other configurable parameters. For example, `max_delta_step`, `tree_method`, `objective`, `predictor`, `gpu_id`.
|
||||
|
||||
Previously, users often ran into issues where the model file produced by one machine could not load or run on another machine. For example, models trained using a machine with an NVIDIA GPU could not run on another machine without a GPU (#5291, #5234). The reason is that the old binary format saved some internal configuration that were not universally applicable to all machines, e.g. `predictor='gpu_predictor'`.
|
||||
|
||||
Now, model saving function (`Booster.save_model()` in Python) will save only the model, without internal configuration. This will guarantee that your model file would be used anywhere. Internal configuration will be serialized in limited circumstances such as:
|
||||
* Multiple nodes in a distributed system exchange model details over the network.
|
||||
* Model checkpointing, to recover from possible crashes.
|
||||
|
||||
This work proved to be useful for parameter validation as well (see below).
|
||||
* Starting with 1.0.0 release, we will use semantic versioning to indicate whether the model produced by one version of XGBoost would be compatible with another version of XGBoost. Any change in the major version indicates a breaking change in the serialization format.
|
||||
* We now provide a robust method to save and load scikit-learn related attributes (#5245). Previously, we used Python pickle to save Python attributes related to `XGBClassifier`, `XGBRegressor`, and `XGBRanker` objects. The attributes are necessary to properly interact with scikit-learn. See #4639 for more details. The use of pickling hampered interoperability, as a pickle from one machine may not necessarily work on another machine. Starting with this release, we use an alternative method to serialize the scikit-learn related attributes. The use of Python pickle is now discouraged (#5236, #5281).
|
||||
|
||||
### Parameter validation: detection of unused or incorrect parameters (#4553, #4577, #4738, #4801, #4961, #5101, #5157, #5167, #5256)
|
||||
* Mis-spelled training parameter is a common user mistake. In previous versions of XGBoost, mis-spelled parameters were silently ignored. Starting with 1.0.0 release, XGBoost will produce a warning message if there is any unused training parameters. Currently, parameter validation is available to R users and Python XGBoost API users. We are working to extend its support to scikit-learn users.
|
||||
* Configuration steps now have well-defined semantics (#4542, #4738), so we know exactly where and how the internal configurable parameters are changed.
|
||||
* The user can now use `save_config()` function to inspect all (used) training parameters. This is helpful for debugging model performance.
|
||||
|
||||
### Allow individual workers to recover from faults (#4808, #4966)
|
||||
* Status quo: if a worker fails, all workers are shut down and restarted, and learning resumes from the last checkpoint. This involves requesting resources from the scheduler (e.g. Spark) and shuffling all the data again from scratch. Both of these operations can be quite costly and block training for extended periods of time, especially if the training data is big and the number of worker nodes is in the hundreds.
|
||||
* The proposed solution is to recover the single node that failed, instead of shutting down all workers. The rest of the clusters wait until the single failed worker is bootstrapped and catches up with the rest.
|
||||
* See roadmap at #4753. Note that this is work in progress. In particular, the feature is not yet available from XGBoost4J-Spark.
|
||||
|
||||
### Accurate prediction for DART models
|
||||
* Use DART tree weights when computing SHAPs (#5050)
|
||||
* Don't drop trees during DART prediction by default (#5115)
|
||||
* Fix DART prediction in R (#5204)
|
||||
|
||||
### Make external memory more robust
|
||||
* Fix issues with training with external memory on cpu (#4487)
|
||||
* Fix crash with approx tree method on cpu (#4510)
|
||||
* Fix external memory race in `exact` (#4980). Note: `dmlc::ThreadedIter` is not actually thread-safe. We would like to re-design it in the long term.
|
||||
|
||||
### Major refactoring of the `DMatrix` class (#4686, #4744, #4748, #5044, #5092, #5108, #5188, #5198)
|
||||
* Goal 1: improve performance and reduce memory consumption. Right now, if the user trains a model with a NumPy array as training data, the array gets copies 2-3 times before training begins. We'd like to reduce duplication of the data matrix.
|
||||
* Goal 2: Expose a common interface to external data, unify the way DMatrix objects are constructed and simplify the process of adding new external data sources. This work is essential for ingesting cuPy arrays.
|
||||
* Goal 3: Handle missing values consistently.
|
||||
* RFC: #4354, Roadmap: #5143
|
||||
* This work is also relevant to external memory support on GPUs.
|
||||
|
||||
### Breaking: XGBoost Python package now requires Python 3.5 or newer (#5021, #5274)
|
||||
* Python 3.4 has reached its end-of-life on March 16, 2019, so we now require Python 3.5 or newer.
|
||||
|
||||
### Breaking: GPU algorithm now requires CUDA 9.0 and higher (#4527, #4580)
|
||||
|
||||
### Breaking: `n_gpus` parameter removed; multi-GPU training now requires a distributed framework (#4579, #4749, #4773, #4810, #4867, #4908)
|
||||
* #4531 proposed removing support for single-process multi-GPU training. Contributors would focus on multi-GPU support through distributed frameworks such as Dask and Spark, where the framework would be expected to assign a worker process for each GPU independently. By delegating GPU management and data movement to the distributed framework, we can greatly simplify the core XGBoost codebase, make multi-GPU training more robust, and reduce burden for future development.
|
||||
|
||||
### Breaking: Some deprecated features have been removed
|
||||
* ``gpu_exact`` training method (#4527, #4742, #4777). Use ``gpu_hist`` instead.
|
||||
* ``learning_rates`` parameter in Python (#5155). Use the callback API instead.
|
||||
* ``num_roots`` (#5059, #5165), since the current training code always uses a single root node.
|
||||
* GPU-specific objectives (#4690), such as `gpu:reg:linear`. Use objectives without `gpu:` prefix; GPU will be used automatically if your machine has one.
|
||||
|
||||
### Breaking: the C API function `XGBoosterPredict()` now asks for an extra parameter `training`.
|
||||
|
||||
### Breaking: We now use CMake exclusively to build XGBoost. `Makefile` is being sunset.
|
||||
* Exception: the R package uses Autotools, as the CRAN ecosystem did not yet adopt CMake widely.
|
||||
|
||||
### Performance improvements
|
||||
* Smarter choice of histogram construction for distributed `gpu_hist` (#4519)
|
||||
* Optimizations for quantization on device (#4572)
|
||||
* Introduce caching memory allocator to avoid latency associated with GPU memory allocation (#4554, #4615)
|
||||
* Optimize the initialization stage of the CPU `hist` algorithm for sparse datasets (#4625)
|
||||
* Prevent unnecessary data copies from GPU memory to the host (#4795)
|
||||
* Improve operation efficiency for single prediction (#5016)
|
||||
* Group builder modified for incremental building, to speed up building large `DMatrix` (#5098)
|
||||
|
||||
### Bug-fixes
|
||||
* Eliminate `FutureWarning: Series.base is deprecated` (#4337)
|
||||
* Ensure pandas DataFrame column names are treated as strings in type error message (#4481)
|
||||
* [jvm-packages] Add back `reg:linear` for scala, as it is only deprecated and not meant to be removed yet (#4490)
|
||||
* Fix library loading for Cygwin users (#4499)
|
||||
* Fix prediction from loaded pickle (#4516)
|
||||
* Enforce exclusion between `pred_interactions=True` and `pred_interactions=True` (#4522)
|
||||
* Do not return dangling reference to local `std::string` (#4543)
|
||||
* Set the appropriate device before freeing device memory (#4566)
|
||||
* Mark `SparsePageDmatrix` destructor default. (#4568)
|
||||
* Choose the appropriate tree method only when the tree method is 'auto' (#4571)
|
||||
* Fix `benchmark_tree.py` (#4593)
|
||||
* [jvm-packages] Fix silly bug in feature scoring (#4604)
|
||||
* Fix GPU predictor when the test data matrix has different number of features than the training data matrix used to train the model (#4613)
|
||||
* Fix external memory for get column batches. (#4622)
|
||||
* [R] Use built-in label when xgb.DMatrix is given to xgb.cv() (#4631)
|
||||
* Fix early stopping in the Python package (#4638)
|
||||
* Fix AUC error in distributed mode caused by imbalanced dataset (#4645, #4798)
|
||||
* [jvm-packages] Expose `setMissing` method in `XGBoostClassificationModel` / `XGBoostRegressionModel` (#4643)
|
||||
* Remove initializing stringstream reference. (#4788)
|
||||
* [R] `xgb.get.handle` now checks all class listed of `object` (#4800)
|
||||
* Do not use `gpu_predictor` unless data comes from GPU (#4836)
|
||||
* Fix data loading (#4862)
|
||||
* Workaround `isnan` across different environments. (#4883)
|
||||
* [jvm-packages] Handle Long-type parameter (#4885)
|
||||
* Don't `set_params` at the end of `set_state` (#4947). Ensure that the model does not change after pickling and unpickling multiple times.
|
||||
* C++ exceptions should not crash OpenMP loops (#4960)
|
||||
* Fix `usegpu` flag in DART. (#4984)
|
||||
* Run training with empty `DMatrix` (#4990, #5159)
|
||||
* Ensure that no two processes can use the same GPU (#4990)
|
||||
* Fix repeated split and 0 cover nodes (#5010)
|
||||
* Reset histogram hit counter between multiple data batches (#5035)
|
||||
* Fix `feature_name` crated from int64index dataframe. (#5081)
|
||||
* Don't use 0 for "fresh leaf" (#5084)
|
||||
* Throw error when user attempts to use multi-GPU training and XGBoost has not been compiled with NCCL (#5170)
|
||||
* Fix metric name loading (#5122)
|
||||
* Quick fix for memory leak in CPU `hist` algorithm (#5153)
|
||||
* Fix wrapping GPU ID and prevent data copying (#5160)
|
||||
* Fix signature of Span constructor (#5166)
|
||||
* Lazy initialization of device vector, so that XGBoost compiled with CUDA can run on a machine without any GPU (#5173)
|
||||
* Model loading should not change system locale (#5314)
|
||||
* Distributed training jobs would sometimes hang; revert Rabit to fix this regression (dmlc/rabit#132, #5237)
|
||||
|
||||
### API changes
|
||||
* Add support for cross-validation using query ID (#4474)
|
||||
* Enable feature importance property for DART model (#4525)
|
||||
* Add `rmsle` metric and `reg:squaredlogerror` objective (#4541)
|
||||
* All objective and evaluation metrics are now exposed to JVM packages (#4560)
|
||||
* `dump_model()` and `get_dump()` now support exporting in GraphViz language (#4602)
|
||||
* Support metrics `ndcg-` and `map-` (#4635)
|
||||
* [jvm-packages] Allow chaining prediction (transform) in XGBoost4J-Spark (#4667)
|
||||
* [jvm-packages] Add option to bypass missing value check in the Spark layer (#4805). Only use this option if you know what you are doing.
|
||||
* [jvm-packages] Add public group getter (#4838)
|
||||
* `XGDMatrixSetGroup` C API is now deprecated (#4864). Use `XGDMatrixSetUIntInfo` instead.
|
||||
* [R] Added new `train_folds` parameter to `xgb.cv()` (#5114)
|
||||
* Ingest meta information from Pandas DataFrame, such as data weights (#5216)
|
||||
|
||||
### Maintenance: Refactor code for legibility and maintainability
|
||||
* De-duplicate GPU parameters (#4454)
|
||||
* Simplify INI-style config reader using C++11 STL (#4478, #4521)
|
||||
* Refactor histogram building code for `gpu_hist` (#4528)
|
||||
* Overload device memory allocator, to enable instrumentation for compiling memory usage statistics (#4532)
|
||||
* Refactor out row partitioning logic from `gpu_hist` (#4554)
|
||||
* Remove an unused variable (#4588)
|
||||
* Implement tree model dump with code generator, to de-duplicate code for generating dumps in 3 different formats (#4602)
|
||||
* Remove `RowSet` class which is no longer being used (#4697)
|
||||
* Remove some unused functions as reported by cppcheck (#4743)
|
||||
* Mimic CUDA assert output in Span check (#4762)
|
||||
* [jvm-packages] Refactor `XGBoost.scala` to put all params processing in one place (#4815)
|
||||
* Add some comments for GPU row partitioner (#4832)
|
||||
* Span: use `size_t' for index_type, add `front' and `back'. (#4935)
|
||||
* Remove dead code in `exact` algorithm (#5034, #5105)
|
||||
* Unify integer types used for row and column indices (#5034)
|
||||
* Extract feature interaction constraint from `SplitEvaluator` class. (#5034)
|
||||
* [Breaking] De-duplicate paramters and docstrings in the constructors of Scikit-Learn models (#5130)
|
||||
* Remove benchmark code from GPU tests (#5141)
|
||||
* Clean up Python 2 compatibility code. (#5161)
|
||||
* Extensible binary serialization format for `DMatrix::MetaInfo` (#5187). This will be useful for implementing censored labels for survival analysis applications.
|
||||
* Cleanup clang-tidy warnings. (#5247)
|
||||
|
||||
### Maintenance: testing, continuous integration, build system
|
||||
* Use `yaml.safe_load` instead of `yaml.load`. (#4537)
|
||||
* Ensure GCC is at least 5.x (#4538)
|
||||
* Remove all mention of `reg:linear` from tests (#4544)
|
||||
* [jvm-packages] Upgrade to Scala 2.12 (#4574)
|
||||
* [jvm-packages] Update kryo dependency to 2.22 (#4575)
|
||||
* [CI] Specify account ID when logging into ECR Docker registry (#4584)
|
||||
* Use Sphinx 2.1+ to compile documentation (#4609)
|
||||
* Make Pandas optional for running Python unit tests (#4620)
|
||||
* Fix spark tests on machines with many cores (#4634)
|
||||
* [jvm-packages] Update local dev build process (#4640)
|
||||
* Add optional dependencies to setup.py (#4655)
|
||||
* [jvm-packages] Fix maven warnings (#4664)
|
||||
* Remove extraneous files from the R package, to comply with CRAN policy (#4699)
|
||||
* Remove VC-2013 support, since it is not C++11 compliant (#4701)
|
||||
* [CI] Fix broken installation of Pandas (#4704, #4722)
|
||||
* [jvm-packages] Clean up temporary files afer running tests (#4706)
|
||||
* Specify version macro in CMake. (#4730)
|
||||
* Include dmlc-tracker into XGBoost Python package (#4731)
|
||||
* [CI] Use long key ID for Ubuntu repository fingerprints. (#4783)
|
||||
* Remove plugin, cuda related code in automake & autoconf files (#4789)
|
||||
* Skip related tests when scikit-learn is not installed. (#4791)
|
||||
* Ignore vscode and clion files (#4866)
|
||||
* Use bundled Google Test by default (#4900)
|
||||
* [CI] Raise timeout threshold in Jenkins (#4938)
|
||||
* Copy CMake parameter from dmlc-core. (#4948)
|
||||
* Set correct file permission. (#4964)
|
||||
* [CI] Update lint configuration to support latest pylint convention (#4971)
|
||||
* [CI] Upload nightly builds to S3 (#4976, #4979)
|
||||
* Add asan.so.5 to cmake script. (#4999)
|
||||
* [CI] Fix Travis tests. (#5062)
|
||||
* [CI] Locate vcomp140.dll from System32 directory (#5078)
|
||||
* Implement training observer to dump internal states of objects (#5088). This will be useful for debugging.
|
||||
* Fix visual studio output library directories (#5119)
|
||||
* [jvm-packages] Comply with scala style convention + fix broken unit test (#5134)
|
||||
* [CI] Repair download URL for Maven 3.6.1 (#5139)
|
||||
* Don't use modernize-use-trailing-return-type in clang-tidy. (#5169)
|
||||
* Explicitly use UTF-8 codepage when using MSVC (#5197)
|
||||
* Add CMake option to run Undefined Behavior Sanitizer (UBSan) (#5211)
|
||||
* Make some GPU tests deterministic (#5229)
|
||||
* [R] Robust endian detection in CRAN xgboost build (#5232)
|
||||
* Support FreeBSD (#5233)
|
||||
* Make `pip install xgboost*.tar.gz` work by fixing build-python.sh (#5241)
|
||||
* Fix compilation error due to 64-bit integer narrowing to `size_t` (#5250)
|
||||
* Remove use of `std::cout` from R package, to comply with CRAN policy (#5261)
|
||||
* Update DMLC-Core submodule (#4674, #4688, #4726, #4924)
|
||||
* Update Rabit submodule (#4560, #4667, #4718, #4808, #4966, #5237)
|
||||
|
||||
### Usability Improvements, Documentation
|
||||
* Add Random Forest API to Python API doc (#4500)
|
||||
* Fix Python demo and doc. (#4545)
|
||||
* Remove doc about not supporting cuda 10.1 (#4578)
|
||||
* Address some sphinx warnings and errors, add doc for building doc. (#4589)
|
||||
* Add instruction to run formatting checks locally (#4591)
|
||||
* Fix docstring for `XGBModel.predict()` (#4592)
|
||||
* Doc and demo for customized metric and objective (#4598, #4608)
|
||||
* Add to documentation how to run tests locally (#4610)
|
||||
* Empty evaluation list in early stopping should produce meaningful error message (#4633)
|
||||
* Fixed year to 2019 in conf.py, helpers.h and LICENSE (#4661)
|
||||
* Minor updates to links and grammar (#4673)
|
||||
* Remove `silent` in doc (#4689)
|
||||
* Remove old Python trouble shooting doc (#4729)
|
||||
* Add `os.PathLike` support for file paths to DMatrix and Booster Python classes (#4757)
|
||||
* Update XGBoost4J-Spark doc (#4804)
|
||||
* Regular formatting for evaluation metrics (#4803)
|
||||
* [jvm-packages] Refine documentation for handling missing values in XGBoost4J-Spark (#4805)
|
||||
* Monitor for distributed envorinment (#4829). This is useful for identifying performance bottleneck.
|
||||
* Add check for length of weights and produce a good error message (#4872)
|
||||
* Fix DMatrix doc (#4884)
|
||||
* Export C++ headers in CMake installation (#4897)
|
||||
* Update license year in README.md to 2019 (#4940)
|
||||
* Fix incorrectly displayed Note in the doc (#4943)
|
||||
* Follow PEP 257 Docstring Conventions (#4959)
|
||||
* Document minimum version required for Google Test (#5001)
|
||||
* Add better error message for invalid feature names (#5024)
|
||||
* Some guidelines on device memory usage (#5038)
|
||||
* [doc] Some notes for external memory. (#5065)
|
||||
* Update document for `tree_method` (#5106)
|
||||
* Update demo for ranking. (#5154)
|
||||
* Add new lines for Spark XGBoost missing values section (#5180)
|
||||
* Fix simple typo: utilty -> utility (#5182)
|
||||
* Update R doc by roxygen2 (#5201)
|
||||
* [R] Direct user to use `set.seed()` instead of setting `seed` parameter (#5125)
|
||||
* Add Optuna badge to `README.md` (#5208)
|
||||
* Fix compilation error in `c-api-demo.c` (#5215)
|
||||
|
||||
### Acknowledgement
|
||||
**Contributors**: Nan Zhu (@CodingCat), Crissman Loomis (@Crissman), Cyprien Ricque (@Cyprien-Ricque), Evan Kepner (@EvanKepner), K.O. (@Hi-king), KaiJin Ji (@KerryJi), Peter Badida (@KeyWeeUsr), Kodi Arfer (@Kodiologist), Rory Mitchell (@RAMitchell), Egor Smirnov (@SmirnovEgorRu), Jacob Kim (@TheJacobKim), Vibhu Jawa (@VibhuJawa), Marcos (@astrowonk), Andy Adinets (@canonizer), Chen Qin (@chenqin), Christopher Cowden (@cowden), @cpfarrell, @david-cortes, Liangcai Li (@firestarman), @fuhaoda, Philip Hyunsu Cho (@hcho3), @here-nagini, Tong He (@hetong007), Michal Kurka (@michalkurka), Honza Sterba (@honzasterba), @iblumin, @koertkuipers, mattn (@mattn), Mingjie Tang (@merlintang), OrdoAbChao (@mglowacki100), Matthew Jones (@mt-jones), mitama (@nigimitama), Nathan Moore (@nmoorenz), Daniel Stahl (@phillyfan1138), Michaël Benesty (@pommedeterresautee), Rong Ou (@rongou), Sebastian (@sfahnens), Xu Xiao (@sperlingxx), @sriramch, Sean Owen (@srowen), Stephanie Yang (@stpyang), Yuan Tang (@terrytangyuan), Mathew Wicks (@thesuperzapper), Tim Gates (@timgates42), TinkleG (@tinkle1129), Oleksandr Pryimak (@trams), Jiaming Yuan (@trivialfis), Matvey Turkov (@turk0v), Bobby Wang (@wbo4958), yage (@yage99), @yellowdolphin
|
||||
|
||||
**Reviewers**: Nan Zhu (@CodingCat), Crissman Loomis (@Crissman), Cyprien Ricque (@Cyprien-Ricque), Evan Kepner (@EvanKepner), John Zedlewski (@JohnZed), KOLANICH (@KOLANICH), KaiJin Ji (@KerryJi), Kodi Arfer (@Kodiologist), Rory Mitchell (@RAMitchell), Egor Smirnov (@SmirnovEgorRu), Nikita Titov (@StrikerRUS), Jacob Kim (@TheJacobKim), Vibhu Jawa (@VibhuJawa), Andrew Kane (@ankane), Arno Candel (@arnocandel), Marcos (@astrowonk), Bryan Woods (@bryan-woods), Andy Adinets (@canonizer), Chen Qin (@chenqin), Thomas Franke (@coding-komek), Peter (@codingforfun), @cpfarrell, Joshua Patterson (@datametrician), @fuhaoda, Philip Hyunsu Cho (@hcho3), Tong He (@hetong007), Honza Sterba (@honzasterba), @iblumin, @jakirkham, Vadim Khotilovich (@khotilov), Keith Kraus (@kkraus14), @koertkuipers, @melonki, Mingjie Tang (@merlintang), OrdoAbChao (@mglowacki100), Daniel Mahler (@mhlr), Matthew Rocklin (@mrocklin), Matthew Jones (@mt-jones), Michaël Benesty (@pommedeterresautee), PSEUDOTENSOR / Jonathan McKinney (@pseudotensor), Rong Ou (@rongou), Vladimir (@sh1ng), Scott Lundberg (@slundberg), Xu Xiao (@sperlingxx), @sriramch, Pasha Stetsenko (@st-pasha), Stephanie Yang (@stpyang), Yuan Tang (@terrytangyuan), Mathew Wicks (@thesuperzapper), Theodore Vasiloudis (@thvasilo), TinkleG (@tinkle1129), Oleksandr Pryimak (@trams), Jiaming Yuan (@trivialfis), Bobby Wang (@wbo4958), yage (@yage99), @yellowdolphin, Yin Lou (@yinlou)
|
||||
|
||||
## v0.90 (2019.05.18)
|
||||
|
||||
### XGBoost Python package drops Python 2.x (#4379, #4381)
|
||||
|
||||
@@ -6,11 +6,8 @@ file(GLOB_RECURSE R_SOURCES
|
||||
${CMAKE_CURRENT_LIST_DIR}/src/*.c)
|
||||
# Use object library to expose symbols
|
||||
add_library(xgboost-r OBJECT ${R_SOURCES})
|
||||
if (ENABLE_ALL_WARNINGS)
|
||||
target_compile_options(xgboost-r PRIVATE -Wall -Wextra)
|
||||
endif (ENABLE_ALL_WARNINGS)
|
||||
target_compile_definitions(xgboost-r
|
||||
PUBLIC
|
||||
|
||||
set(R_DEFINITIONS
|
||||
-DXGBOOST_STRICT_R_MODE=1
|
||||
-DXGBOOST_CUSTOMIZE_GLOBAL_PRNG=1
|
||||
-DDMLC_LOG_BEFORE_THROW=0
|
||||
@@ -18,27 +15,20 @@ target_compile_definitions(xgboost-r
|
||||
-DDMLC_LOG_CUSTOMIZE=1
|
||||
-DRABIT_CUSTOMIZE_MSG_
|
||||
-DRABIT_STRICT_CXX98_)
|
||||
target_compile_definitions(xgboost-r
|
||||
PRIVATE ${R_DEFINITIONS})
|
||||
target_include_directories(xgboost-r
|
||||
PRIVATE
|
||||
${LIBR_INCLUDE_DIRS}
|
||||
${PROJECT_SOURCE_DIR}/include
|
||||
${PROJECT_SOURCE_DIR}/dmlc-core/include
|
||||
${PROJECT_SOURCE_DIR}/rabit/include)
|
||||
target_link_libraries(xgboost-r PUBLIC ${LIBR_CORE_LIBRARY})
|
||||
if (USE_OPENMP)
|
||||
find_package(OpenMP REQUIRED)
|
||||
target_link_libraries(xgboost-r PUBLIC OpenMP::OpenMP_CXX OpenMP::OpenMP_C)
|
||||
endif (USE_OPENMP)
|
||||
set_target_properties(
|
||||
xgboost-r PROPERTIES
|
||||
CXX_STANDARD 14
|
||||
CXX_STANDARD 11
|
||||
CXX_STANDARD_REQUIRED ON
|
||||
POSITION_INDEPENDENT_CODE ON)
|
||||
|
||||
# Get compilation and link flags of xgboost-r and propagate to objxgboost
|
||||
target_link_libraries(objxgboost PUBLIC xgboost-r)
|
||||
# Add all objects of xgboost-r to objxgboost
|
||||
target_sources(objxgboost INTERFACE $<TARGET_OBJECTS:xgboost-r>)
|
||||
|
||||
set(LIBR_HOME "${LIBR_HOME}" PARENT_SCOPE)
|
||||
set(LIBR_EXECUTABLE "${LIBR_EXECUTABLE}" PARENT_SCOPE)
|
||||
set(XGBOOST_DEFINITIONS "${XGBOOST_DEFINITIONS};${R_DEFINITIONS}" PARENT_SCOPE)
|
||||
set(XGBOOST_OBJ_SOURCES $<TARGET_OBJECTS:xgboost-r> PARENT_SCOPE)
|
||||
set(LINKED_LIBRARIES_PRIVATE ${LINKED_LIBRARIES_PRIVATE} ${LIBR_CORE_LIBRARY} PARENT_SCOPE)
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
Package: xgboost
|
||||
Type: Package
|
||||
Title: Extreme Gradient Boosting
|
||||
Version: 1.2.0.1
|
||||
Date: 2020-02-21
|
||||
Version: 1.0.0.1
|
||||
Date: 2019-07-23
|
||||
Authors@R: c(
|
||||
person("Tianqi", "Chen", role = c("aut"),
|
||||
email = "tianqi.tchen@gmail.com"),
|
||||
@@ -31,9 +31,9 @@ Authors@R: c(
|
||||
)
|
||||
Description: Extreme Gradient Boosting, which is an efficient implementation
|
||||
of the gradient boosting framework from Chen & Guestrin (2016) <doi:10.1145/2939672.2939785>.
|
||||
This package is its R interface. The package includes efficient linear
|
||||
model solver and tree learning algorithms. The package can automatically
|
||||
do parallel computation on a single machine which could be more than 10
|
||||
This package is its R interface. The package includes efficient linear
|
||||
model solver and tree learning algorithms. The package can automatically
|
||||
do parallel computation on a single machine which could be more than 10
|
||||
times faster than existing gradient boosting packages. It supports
|
||||
various objective functions, including regression, classification and ranking.
|
||||
The package is made to be extensible, so that users are also allowed to define
|
||||
@@ -54,8 +54,7 @@ Suggests:
|
||||
lintr,
|
||||
igraph (>= 1.0.1),
|
||||
jsonlite,
|
||||
float,
|
||||
crayon
|
||||
float
|
||||
Depends:
|
||||
R (>= 3.3.0)
|
||||
Imports:
|
||||
@@ -64,5 +63,5 @@ Imports:
|
||||
data.table (>= 1.9.6),
|
||||
magrittr (>= 1.5),
|
||||
stringi (>= 0.5.2)
|
||||
RoxygenNote: 7.1.1
|
||||
SystemRequirements: GNU make, C++14
|
||||
RoxygenNote: 7.0.2
|
||||
SystemRequirements: GNU make, C++11
|
||||
|
||||
@@ -14,7 +14,6 @@ S3method(setinfo,xgb.DMatrix)
|
||||
S3method(slice,xgb.DMatrix)
|
||||
export("xgb.attr<-")
|
||||
export("xgb.attributes<-")
|
||||
export("xgb.config<-")
|
||||
export("xgb.parameters<-")
|
||||
export(cb.cv.predict)
|
||||
export(cb.early.stop)
|
||||
@@ -31,7 +30,6 @@ export(xgb.DMatrix)
|
||||
export(xgb.DMatrix.save)
|
||||
export(xgb.attr)
|
||||
export(xgb.attributes)
|
||||
export(xgb.config)
|
||||
export(xgb.create.features)
|
||||
export(xgb.cv)
|
||||
export(xgb.dump)
|
||||
@@ -40,7 +38,6 @@ export(xgb.ggplot.deepness)
|
||||
export(xgb.ggplot.importance)
|
||||
export(xgb.importance)
|
||||
export(xgb.load)
|
||||
export(xgb.load.raw)
|
||||
export(xgb.model.dt.tree)
|
||||
export(xgb.plot.deepness)
|
||||
export(xgb.plot.importance)
|
||||
@@ -49,9 +46,7 @@ export(xgb.plot.shap)
|
||||
export(xgb.plot.tree)
|
||||
export(xgb.save)
|
||||
export(xgb.save.raw)
|
||||
export(xgb.serialize)
|
||||
export(xgb.train)
|
||||
export(xgb.unserialize)
|
||||
export(xgboost)
|
||||
import(methods)
|
||||
importClassesFrom(Matrix,dgCMatrix)
|
||||
|
||||
@@ -62,11 +62,11 @@ cb.print.evaluation <- function(period = 1, showsd = TRUE) {
|
||||
callback <- function(env = parent.frame()) {
|
||||
if (length(env$bst_evaluation) == 0 ||
|
||||
period == 0 ||
|
||||
NVL(env$rank, 0) != 0)
|
||||
NVL(env$rank, 0) != 0 )
|
||||
return()
|
||||
|
||||
i <- env$iteration
|
||||
if ((i - 1) %% period == 0 ||
|
||||
if ((i-1) %% period == 0 ||
|
||||
i == env$begin_iteration ||
|
||||
i == env$end_iteration) {
|
||||
stdev <- if (showsd) env$bst_evaluation_err else NULL
|
||||
@@ -115,7 +115,7 @@ cb.evaluation.log <- function() {
|
||||
stop("bst_evaluation must have non-empty names")
|
||||
|
||||
mnames <<- gsub('-', '_', names(env$bst_evaluation))
|
||||
if (!is.null(env$bst_evaluation_err))
|
||||
if(!is.null(env$bst_evaluation_err))
|
||||
mnames <<- c(paste0(mnames, '_mean'), paste0(mnames, '_std'))
|
||||
}
|
||||
|
||||
@@ -123,12 +123,12 @@ cb.evaluation.log <- function() {
|
||||
env$evaluation_log <- as.data.table(t(simplify2array(env$evaluation_log)))
|
||||
setnames(env$evaluation_log, c('iter', mnames))
|
||||
|
||||
if (!is.null(env$bst_evaluation_err)) {
|
||||
if(!is.null(env$bst_evaluation_err)) {
|
||||
# rearrange col order from _mean,_mean,...,_std,_std,...
|
||||
# to be _mean,_std,_mean,_std,...
|
||||
len <- length(mnames)
|
||||
means <- mnames[seq_len(len / 2)]
|
||||
stds <- mnames[(len / 2 + 1):len]
|
||||
means <- mnames[seq_len(len/2)]
|
||||
stds <- mnames[(len/2 + 1):len]
|
||||
cnames <- numeric(len)
|
||||
cnames[c(TRUE, FALSE)] <- means
|
||||
cnames[c(FALSE, TRUE)] <- stds
|
||||
@@ -144,7 +144,7 @@ cb.evaluation.log <- function() {
|
||||
return(finalizer(env))
|
||||
|
||||
ev <- env$bst_evaluation
|
||||
if (!is.null(env$bst_evaluation_err))
|
||||
if(!is.null(env$bst_evaluation_err))
|
||||
ev <- c(ev, env$bst_evaluation_err)
|
||||
env$evaluation_log <- c(env$evaluation_log,
|
||||
list(c(iter = env$iteration, ev)))
|
||||
@@ -351,13 +351,13 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
||||
|
||||
finalizer <- function(env) {
|
||||
if (!is.null(env$bst)) {
|
||||
attr_best_score <- as.numeric(xgb.attr(env$bst$handle, 'best_score'))
|
||||
attr_best_score = as.numeric(xgb.attr(env$bst$handle, 'best_score'))
|
||||
if (best_score != attr_best_score)
|
||||
stop("Inconsistent 'best_score' values between the closure state: ", best_score,
|
||||
" and the xgb.attr: ", attr_best_score)
|
||||
env$bst$best_iteration <- best_iteration
|
||||
env$bst$best_ntreelimit <- best_ntreelimit
|
||||
env$bst$best_score <- best_score
|
||||
env$bst$best_iteration = best_iteration
|
||||
env$bst$best_ntreelimit = best_ntreelimit
|
||||
env$bst$best_score = best_score
|
||||
} else {
|
||||
env$basket$best_iteration <- best_iteration
|
||||
env$basket$best_ntreelimit <- best_ntreelimit
|
||||
@@ -372,9 +372,9 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
||||
return(finalizer(env))
|
||||
|
||||
i <- env$iteration
|
||||
score <- env$bst_evaluation[metric_idx]
|
||||
score = env$bst_evaluation[metric_idx]
|
||||
|
||||
if ((maximize && score > best_score) ||
|
||||
if (( maximize && score > best_score) ||
|
||||
(!maximize && score < best_score)) {
|
||||
|
||||
best_msg <<- format.eval.string(i, env$bst_evaluation, env$bst_evaluation_err)
|
||||
@@ -500,7 +500,7 @@ cb.cv.predict <- function(save_models = FALSE) {
|
||||
for (fd in env$bst_folds) {
|
||||
pr <- predict(fd$bst, fd$watchlist[[2]], ntreelimit = ntreelimit, reshape = TRUE)
|
||||
if (is.matrix(pred)) {
|
||||
pred[fd$index, ] <- pr
|
||||
pred[fd$index,] <- pr
|
||||
} else {
|
||||
pred[fd$index] <- pr
|
||||
}
|
||||
@@ -613,7 +613,9 @@ cb.gblinear.history <- function(sparse=FALSE) {
|
||||
|
||||
init <- function(env) {
|
||||
if (!is.null(env$bst)) { # xgb.train:
|
||||
coef_path <- list()
|
||||
} else if (!is.null(env$bst_folds)) { # xgb.cv:
|
||||
coef_path <- rep(list(), length(env$bst_folds))
|
||||
} else stop("Parent frame has neither 'bst' nor 'bst_folds'")
|
||||
}
|
||||
|
||||
@@ -703,11 +705,11 @@ xgb.gblinear.history <- function(model, class_index = NULL) {
|
||||
if (!is_cv) {
|
||||
# extract num_class & num_feat from the internal model
|
||||
dmp <- xgb.dump(model)
|
||||
if (length(dmp) < 2 || dmp[2] != "bias:")
|
||||
if(length(dmp) < 2 || dmp[2] != "bias:")
|
||||
stop("It does not appear to be a gblinear model")
|
||||
dmp <- dmp[-c(1, 2)]
|
||||
dmp <- dmp[-c(1,2)]
|
||||
n <- which(dmp == 'weight:')
|
||||
if (length(n) != 1)
|
||||
if(length(n) != 1)
|
||||
stop("It does not appear to be a gblinear model")
|
||||
num_class <- n - 1
|
||||
num_feat <- (length(dmp) - 4) / num_class
|
||||
@@ -730,9 +732,9 @@ xgb.gblinear.history <- function(model, class_index = NULL) {
|
||||
if (!is.null(class_index) && num_class > 1) {
|
||||
coef_path <- if (is.list(coef_path)) {
|
||||
lapply(coef_path,
|
||||
function(x) x[, seq(1 + class_index, by = num_class, length.out = num_feat)])
|
||||
function(x) x[, seq(1 + class_index, by=num_class, length.out=num_feat)])
|
||||
} else {
|
||||
coef_path <- coef_path[, seq(1 + class_index, by = num_class, length.out = num_feat)]
|
||||
coef_path <- coef_path[, seq(1 + class_index, by=num_class, length.out=num_feat)]
|
||||
}
|
||||
}
|
||||
coef_path
|
||||
|
||||
@@ -28,7 +28,7 @@ NVL <- function(x, val) {
|
||||
# Merges booster params with whatever is provided in ...
|
||||
# plus runs some checks
|
||||
check.booster.params <- function(params, ...) {
|
||||
if (!identical(class(params), "list"))
|
||||
if (typeof(params) != "list")
|
||||
stop("params must be a list")
|
||||
|
||||
# in R interface, allow for '.' instead of '_' in parameter names
|
||||
@@ -69,23 +69,23 @@ check.booster.params <- function(params, ...) {
|
||||
|
||||
if (!is.null(params[['monotone_constraints']]) &&
|
||||
typeof(params[['monotone_constraints']]) != "character") {
|
||||
vec2str <- paste(params[['monotone_constraints']], collapse = ',')
|
||||
vec2str <- paste0('(', vec2str, ')')
|
||||
params[['monotone_constraints']] <- vec2str
|
||||
vec2str = paste(params[['monotone_constraints']], collapse = ',')
|
||||
vec2str = paste0('(', vec2str, ')')
|
||||
params[['monotone_constraints']] = vec2str
|
||||
}
|
||||
|
||||
# interaction constraints parser (convert from list of column indices to string)
|
||||
if (!is.null(params[['interaction_constraints']]) &&
|
||||
typeof(params[['interaction_constraints']]) != "character"){
|
||||
# check input class
|
||||
if (!identical(class(params[['interaction_constraints']]), 'list')) stop('interaction_constraints should be class list')
|
||||
if (!all(unique(sapply(params[['interaction_constraints']], class)) %in% c('numeric', 'integer'))) {
|
||||
if (class(params[['interaction_constraints']]) != 'list') stop('interaction_constraints should be class list')
|
||||
if (!all(unique(sapply(params[['interaction_constraints']], class)) %in% c('numeric','integer'))) {
|
||||
stop('interaction_constraints should be a list of numeric/integer vectors')
|
||||
}
|
||||
|
||||
# recast parameter as string
|
||||
interaction_constraints <- sapply(params[['interaction_constraints']], function(x) paste0('[', paste(x, collapse = ','), ']'))
|
||||
params[['interaction_constraints']] <- paste0('[', paste(interaction_constraints, collapse = ','), ']')
|
||||
interaction_constraints <- sapply(params[['interaction_constraints']], function(x) paste0('[', paste(x, collapse=','), ']'))
|
||||
params[['interaction_constraints']] <- paste0('[', paste(interaction_constraints, collapse=','), ']')
|
||||
}
|
||||
return(params)
|
||||
}
|
||||
@@ -145,8 +145,7 @@ xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
|
||||
if (is.null(obj)) {
|
||||
.Call(XGBoosterUpdateOneIter_R, booster_handle, as.integer(iter), dtrain)
|
||||
} else {
|
||||
pred <- predict(booster_handle, dtrain, outputmargin = TRUE, training = TRUE,
|
||||
ntreelimit = 0)
|
||||
pred <- predict(booster_handle, dtrain, training = TRUE)
|
||||
gpair <- obj(pred, dtrain)
|
||||
.Call(XGBoosterBoostOneIter_R, booster_handle, dtrain, gpair$grad, gpair$hess)
|
||||
}
|
||||
@@ -168,12 +167,12 @@ xgb.iter.eval <- function(booster_handle, watchlist, iter, feval = NULL) {
|
||||
if (is.null(feval)) {
|
||||
msg <- .Call(XGBoosterEvalOneIter_R, booster_handle, as.integer(iter), watchlist, as.list(evnames))
|
||||
msg <- stri_split_regex(msg, '(\\s+|:|\\s+)')[[1]][-1]
|
||||
res <- as.numeric(msg[c(FALSE, TRUE)]) # even indices are the values
|
||||
names(res) <- msg[c(TRUE, FALSE)] # odds are the names
|
||||
res <- as.numeric(msg[c(FALSE,TRUE)]) # even indices are the values
|
||||
names(res) <- msg[c(TRUE,FALSE)] # odds are the names
|
||||
} else {
|
||||
res <- sapply(seq_along(watchlist), function(j) {
|
||||
w <- watchlist[[j]]
|
||||
preds <- predict(booster_handle, w, outputmargin = TRUE, ntreelimit = 0) # predict using all trees
|
||||
preds <- predict(booster_handle, w) # predict using all trees
|
||||
eval_res <- feval(preds, w)
|
||||
out <- eval_res$value
|
||||
names(out) <- paste0(evnames[j], "-", eval_res$metric)
|
||||
@@ -308,66 +307,6 @@ xgb.createFolds <- function(y, k = 10)
|
||||
#' @name xgboost-deprecated
|
||||
NULL
|
||||
|
||||
#' Do not use \code{\link[base]{saveRDS}} or \code{\link[base]{save}} for long-term archival of
|
||||
#' models. Instead, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}}.
|
||||
#'
|
||||
#' It is a common practice to use the built-in \code{\link[base]{saveRDS}} function (or
|
||||
#' \code{\link[base]{save}}) to persist R objects to the disk. While it is possible to persist
|
||||
#' \code{xgb.Booster} objects using \code{\link[base]{saveRDS}}, it is not advisable to do so if
|
||||
#' the model is to be accessed in the future. If you train a model with the current version of
|
||||
#' XGBoost and persist it with \code{\link[base]{saveRDS}}, the model is not guaranteed to be
|
||||
#' accessible in later releases of XGBoost. To ensure that your model can be accessed in future
|
||||
#' releases of XGBoost, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}} instead.
|
||||
#'
|
||||
#' @details
|
||||
#' Use \code{\link{xgb.save}} to save the XGBoost model as a stand-alone file. You may opt into
|
||||
#' the JSON format by specifying the JSON extension. To read the model back, use
|
||||
#' \code{\link{xgb.load}}.
|
||||
#'
|
||||
#' Use \code{\link{xgb.save.raw}} to save the XGBoost model as a sequence (vector) of raw bytes
|
||||
#' in a future-proof manner. Future releases of XGBoost will be able to read the raw bytes and
|
||||
#' re-construct the corresponding model. To read the model back, use \code{\link{xgb.load.raw}}.
|
||||
#' The \code{\link{xgb.save.raw}} function is useful if you'd like to persist the XGBoost model
|
||||
#' as part of another R object.
|
||||
#'
|
||||
#' Note: Do not use \code{\link{xgb.serialize}} to store models long-term. It persists not only the
|
||||
#' model but also internal configurations and parameters, and its format is not stable across
|
||||
#' multiple XGBoost versions. Use \code{\link{xgb.serialize}} only for checkpointing.
|
||||
#'
|
||||
#' For more details and explanation about model persistence and archival, consult the page
|
||||
#' \url{https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html}.
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
#'
|
||||
#' # Save as a stand-alone file; load it with xgb.load()
|
||||
#' xgb.save(bst, 'xgb.model')
|
||||
#' bst2 <- xgb.load('xgb.model')
|
||||
#'
|
||||
#' # Save as a stand-alone file (JSON); load it with xgb.load()
|
||||
#' xgb.save(bst, 'xgb.model.json')
|
||||
#' bst2 <- xgb.load('xgb.model.json')
|
||||
#'
|
||||
#' # Save as a raw byte vector; load it with xgb.load.raw()
|
||||
#' xgb_bytes <- xgb.save.raw(bst)
|
||||
#' bst2 <- xgb.load.raw(xgb_bytes)
|
||||
#'
|
||||
#' # Persist XGBoost model as part of another R object
|
||||
#' obj <- list(xgb_model_bytes = xgb.save.raw(bst), description = "My first XGBoost model")
|
||||
#' # Persist the R object. Here, saveRDS() is okay, since it doesn't persist
|
||||
#' # xgb.Booster directly. What's being persisted is the future-proof byte representation
|
||||
#' # as given by xgb.save.raw().
|
||||
#' saveRDS(obj, 'my_object.rds')
|
||||
#' # Read back the R object
|
||||
#' obj2 <- readRDS('my_object.rds')
|
||||
#' # Re-construct xgb.Booster object from the bytes
|
||||
#' bst2 <- xgb.load.raw(obj2$xgb_model_bytes)
|
||||
#'
|
||||
#' @name a-compatibility-note-for-saveRDS-save
|
||||
NULL
|
||||
|
||||
# Lookup table for the deprecated parameters bookkeeping
|
||||
depr_par_lut <- matrix(c(
|
||||
'print.every.n', 'print_every_n',
|
||||
@@ -376,8 +315,8 @@ depr_par_lut <- matrix(c(
|
||||
'with.stats', 'with_stats',
|
||||
'numberOfClusters', 'n_clusters',
|
||||
'features.keep', 'features_keep',
|
||||
'plot.height', 'plot_height',
|
||||
'plot.width', 'plot_width',
|
||||
'plot.height','plot_height',
|
||||
'plot.width','plot_width',
|
||||
'n_first_tree', 'trees',
|
||||
'dummy', 'DUMMY'
|
||||
), ncol = 2, byrow = TRUE)
|
||||
@@ -390,20 +329,20 @@ colnames(depr_par_lut) <- c('old', 'new')
|
||||
check.deprecation <- function(..., env = parent.frame()) {
|
||||
pars <- list(...)
|
||||
# exact and partial matches
|
||||
all_match <- pmatch(names(pars), depr_par_lut[, 1])
|
||||
all_match <- pmatch(names(pars), depr_par_lut[,1])
|
||||
# indices of matched pars' names
|
||||
idx_pars <- which(!is.na(all_match))
|
||||
if (length(idx_pars) == 0) return()
|
||||
# indices of matched LUT rows
|
||||
idx_lut <- all_match[idx_pars]
|
||||
# which of idx_lut were the exact matches?
|
||||
ex_match <- depr_par_lut[idx_lut, 1] %in% names(pars)
|
||||
ex_match <- depr_par_lut[idx_lut,1] %in% names(pars)
|
||||
for (i in seq_along(idx_pars)) {
|
||||
pars_par <- names(pars)[idx_pars[i]]
|
||||
old_par <- depr_par_lut[idx_lut[i], 1]
|
||||
new_par <- depr_par_lut[idx_lut[i], 2]
|
||||
if (!ex_match[i]) {
|
||||
warning("'", pars_par, "' was partially matched to '", old_par, "'")
|
||||
warning("'", pars_par, "' was partially matched to '", old_par,"'")
|
||||
}
|
||||
.Deprecated(new_par, old = old_par, package = 'xgboost')
|
||||
if (new_par != 'NULL') {
|
||||
|
||||
@@ -1,39 +1,24 @@
|
||||
# Construct an internal xgboost Booster and return a handle to it.
|
||||
# internal utility function
|
||||
xgb.Booster.handle <- function(params = list(), cachelist = list(),
|
||||
modelfile = NULL) {
|
||||
xgb.Booster.handle <- function(params = list(), cachelist = list(), modelfile = NULL) {
|
||||
if (typeof(cachelist) != "list" ||
|
||||
!all(vapply(cachelist, inherits, logical(1), what = 'xgb.DMatrix'))) {
|
||||
stop("cachelist must be a list of xgb.DMatrix objects")
|
||||
}
|
||||
## Load existing model, dispatch for on disk model file and in memory buffer
|
||||
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
if (!is.null(modelfile)) {
|
||||
if (typeof(modelfile) == "character") {
|
||||
## A filename
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
.Call(XGBoosterLoadModel_R, handle, modelfile[1])
|
||||
class(handle) <- "xgb.Booster.handle"
|
||||
if (length(params) > 0) {
|
||||
xgb.parameters(handle) <- params
|
||||
}
|
||||
return(handle)
|
||||
} else if (typeof(modelfile) == "raw") {
|
||||
## A memory buffer
|
||||
bst <- xgb.unserialize(modelfile)
|
||||
xgb.parameters(bst) <- params
|
||||
return (bst)
|
||||
.Call(XGBoosterLoadModelFromRaw_R, handle, modelfile)
|
||||
} else if (inherits(modelfile, "xgb.Booster")) {
|
||||
## A booster object
|
||||
bst <- xgb.Booster.complete(modelfile, saveraw = TRUE)
|
||||
bst <- xgb.unserialize(bst$raw)
|
||||
xgb.parameters(bst) <- params
|
||||
return (bst)
|
||||
.Call(XGBoosterLoadModelFromRaw_R, handle, bst$raw)
|
||||
} else {
|
||||
stop("modelfile must be either character filename, or raw booster dump, or xgb.Booster object")
|
||||
}
|
||||
}
|
||||
## Create new model
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
class(handle) <- "xgb.Booster.handle"
|
||||
if (length(params) > 0) {
|
||||
xgb.parameters(handle) <- params
|
||||
@@ -63,8 +48,8 @@ is.null.handle <- function(handle) {
|
||||
return(FALSE)
|
||||
}
|
||||
|
||||
# Return a verified to be valid handle out of either xgb.Booster.handle or
|
||||
# xgb.Booster internal utility function
|
||||
# Return a verified to be valid handle out of either xgb.Booster.handle or xgb.Booster
|
||||
# internal utility function
|
||||
xgb.get.handle <- function(object) {
|
||||
if (inherits(object, "xgb.Booster")) {
|
||||
handle <- object$handle
|
||||
@@ -111,8 +96,6 @@ xgb.get.handle <- function(object) {
|
||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
#' saveRDS(bst, "xgb.model.rds")
|
||||
#'
|
||||
#' # Warning: The resulting RDS file is only compatible with the current XGBoost version.
|
||||
#' # Refer to the section titled "a-compatibility-note-for-saveRDS-save".
|
||||
#' bst1 <- readRDS("xgb.model.rds")
|
||||
#' if (file.exists("xgb.model.rds")) file.remove("xgb.model.rds")
|
||||
#' # the handle is invalid:
|
||||
@@ -130,29 +113,9 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
if (is.null.handle(object$handle)) {
|
||||
object$handle <- xgb.Booster.handle(modelfile = object$raw)
|
||||
} else {
|
||||
if (is.null(object$raw) && saveraw) {
|
||||
object$raw <- xgb.serialize(object$handle)
|
||||
}
|
||||
if (is.null(object$raw) && saveraw)
|
||||
object$raw <- xgb.save.raw(object$handle)
|
||||
}
|
||||
|
||||
attrs <- xgb.attributes(object)
|
||||
if (!is.null(attrs$best_ntreelimit)) {
|
||||
object$best_ntreelimit <- as.integer(attrs$best_ntreelimit)
|
||||
}
|
||||
if (!is.null(attrs$best_iteration)) {
|
||||
## Convert from 0 based back to 1 based.
|
||||
object$best_iteration <- as.integer(attrs$best_iteration) + 1
|
||||
}
|
||||
if (!is.null(attrs$best_score)) {
|
||||
object$best_score <- as.numeric(attrs$best_score)
|
||||
}
|
||||
if (!is.null(attrs$best_msg)) {
|
||||
object$best_msg <- attrs$best_msg
|
||||
}
|
||||
if (!is.null(attrs$niter)) {
|
||||
object$niter <- as.integer(attrs$niter)
|
||||
}
|
||||
|
||||
return(object)
|
||||
}
|
||||
|
||||
@@ -372,8 +335,8 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
|
||||
matrix(ret, nrow = n_row, byrow = TRUE, dimnames = list(NULL, cnames))
|
||||
} else {
|
||||
arr <- array(ret, c(n_col1, n_group, n_row),
|
||||
dimnames = list(cnames, NULL, NULL)) %>% aperm(c(2, 3, 1)) # [group, row, col]
|
||||
lapply(seq_len(n_group), function(g) arr[g, , ])
|
||||
dimnames = list(cnames, NULL, NULL)) %>% aperm(c(2,3,1)) # [group, row, col]
|
||||
lapply(seq_len(n_group), function(g) arr[g,,])
|
||||
}
|
||||
} else if (predinteraction) {
|
||||
n_col1 <- ncol(newdata) + 1
|
||||
@@ -382,11 +345,11 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
|
||||
ret <- if (n_ret == n_row) {
|
||||
matrix(ret, ncol = 1, dimnames = list(NULL, cnames))
|
||||
} else if (n_group == 1) {
|
||||
array(ret, c(n_col1, n_col1, n_row), dimnames = list(cnames, cnames, NULL)) %>% aperm(c(3, 1, 2))
|
||||
array(ret, c(n_col1, n_col1, n_row), dimnames = list(cnames, cnames, NULL)) %>% aperm(c(3,1,2))
|
||||
} else {
|
||||
arr <- array(ret, c(n_col1, n_col1, n_group, n_row),
|
||||
dimnames = list(cnames, cnames, NULL, NULL)) %>% aperm(c(3, 4, 1, 2)) # [group, row, col1, col2]
|
||||
lapply(seq_len(n_group), function(g) arr[g, , , ])
|
||||
dimnames = list(cnames, cnames, NULL, NULL)) %>% aperm(c(3,4,1,2)) # [group, row, col1, col2]
|
||||
lapply(seq_len(n_group), function(g) arr[g,,,])
|
||||
}
|
||||
} else if (reshape && npred_per_case > 1) {
|
||||
ret <- matrix(ret, nrow = n_row, byrow = TRUE)
|
||||
@@ -436,7 +399,7 @@ predict.xgb.Booster.handle <- function(object, ...) {
|
||||
#' That would only matter if attributes need to be set many times.
|
||||
#' Note, however, that when feeding a handle of an \code{xgb.Booster} object to the attribute setters,
|
||||
#' the raw model cache of an \code{xgb.Booster} object would not be automatically updated,
|
||||
#' and it would be user's responsibility to call \code{xgb.serialize} to update it.
|
||||
#' and it would be user's responsibility to call \code{xgb.save.raw} to update it.
|
||||
#'
|
||||
#' The \code{xgb.attributes<-} setter either updates the existing or adds one or several attributes,
|
||||
#' but it doesn't delete the other existing attributes.
|
||||
@@ -495,7 +458,7 @@ xgb.attr <- function(object, name) {
|
||||
}
|
||||
.Call(XGBoosterSetAttr_R, handle, as.character(name[1]), value)
|
||||
if (is(object, 'xgb.Booster') && !is.null(object$raw)) {
|
||||
object$raw <- xgb.serialize(object$handle)
|
||||
object$raw <- xgb.save.raw(object$handle)
|
||||
}
|
||||
object
|
||||
}
|
||||
@@ -535,41 +498,11 @@ xgb.attributes <- function(object) {
|
||||
.Call(XGBoosterSetAttr_R, handle, names(a[i]), a[[i]])
|
||||
}
|
||||
if (is(object, 'xgb.Booster') && !is.null(object$raw)) {
|
||||
object$raw <- xgb.serialize(object$handle)
|
||||
object$raw <- xgb.save.raw(object$handle)
|
||||
}
|
||||
object
|
||||
}
|
||||
|
||||
#' Accessors for model parameters as JSON string.
|
||||
#'
|
||||
#' @param object Object of class \code{xgb.Booster}
|
||||
#' @param value A JSON string.
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#'
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
#' config <- xgb.config(bst)
|
||||
#'
|
||||
#' @rdname xgb.config
|
||||
#' @export
|
||||
xgb.config <- function(object) {
|
||||
handle <- xgb.get.handle(object)
|
||||
.Call(XGBoosterSaveJsonConfig_R, handle);
|
||||
}
|
||||
|
||||
#' @rdname xgb.config
|
||||
#' @export
|
||||
`xgb.config<-` <- function(object, value) {
|
||||
handle <- xgb.get.handle(object)
|
||||
.Call(XGBoosterLoadJsonConfig_R, handle, value)
|
||||
object$raw <- NULL # force renew the raw buffer
|
||||
object <- xgb.Booster.complete(object)
|
||||
object
|
||||
}
|
||||
|
||||
#' Accessors for model parameters.
|
||||
#'
|
||||
#' Only the setter for xgboost parameters is currently implemented.
|
||||
@@ -606,7 +539,7 @@ xgb.config <- function(object) {
|
||||
.Call(XGBoosterSetParam_R, handle, names(p[i]), p[[i]])
|
||||
}
|
||||
if (is(object, 'xgb.Booster') && !is.null(object$raw)) {
|
||||
object$raw <- xgb.serialize(object$handle)
|
||||
object$raw <- xgb.save.raw(object$handle)
|
||||
}
|
||||
object
|
||||
}
|
||||
@@ -659,7 +592,7 @@ print.xgb.Booster <- function(x, verbose = FALSE, ...) {
|
||||
|
||||
if (!is.null(x$params)) {
|
||||
cat('params (as set within xgb.train):\n')
|
||||
cat(' ',
|
||||
cat( ' ',
|
||||
paste(names(x$params),
|
||||
paste0('"', unlist(x$params), '"'),
|
||||
sep = ' = ', collapse = ', '), '\n', sep = '')
|
||||
@@ -672,9 +605,9 @@ print.xgb.Booster <- function(x, verbose = FALSE, ...) {
|
||||
if (length(attrs) > 0) {
|
||||
cat('xgb.attributes:\n')
|
||||
if (verbose) {
|
||||
cat(paste(paste0(' ', names(attrs)),
|
||||
paste0('"', unlist(attrs), '"'),
|
||||
sep = ' = ', collapse = '\n'), '\n', sep = '')
|
||||
cat( paste(paste0(' ',names(attrs)),
|
||||
paste0('"', unlist(attrs), '"'),
|
||||
sep = ' = ', collapse = '\n'), '\n', sep = '')
|
||||
} else {
|
||||
cat(' ', paste(names(attrs), collapse = ', '), '\n', sep = '')
|
||||
}
|
||||
@@ -696,7 +629,7 @@ print.xgb.Booster <- function(x, verbose = FALSE, ...) {
|
||||
#cat('ntree: ', xgb.ntree(x), '\n', sep='')
|
||||
|
||||
for (n in setdiff(names(x), c('handle', 'raw', 'call', 'params', 'callbacks',
|
||||
'evaluation_log', 'niter', 'feature_names'))) {
|
||||
'evaluation_log','niter','feature_names'))) {
|
||||
if (is.atomic(x[[n]])) {
|
||||
cat(n, ':', x[[n]], '\n', sep = ' ')
|
||||
} else {
|
||||
|
||||
@@ -188,10 +188,9 @@ getinfo <- function(object, ...) UseMethod("getinfo")
|
||||
getinfo.xgb.DMatrix <- function(object, name, ...) {
|
||||
if (typeof(name) != "character" ||
|
||||
length(name) != 1 ||
|
||||
!name %in% c('label', 'weight', 'base_margin', 'nrow',
|
||||
'label_lower_bound', 'label_upper_bound')) {
|
||||
!name %in% c('label', 'weight', 'base_margin', 'nrow')) {
|
||||
stop("getinfo: name must be one of the following\n",
|
||||
" 'label', 'weight', 'base_margin', 'nrow', 'label_lower_bound', 'label_upper_bound'")
|
||||
" 'label', 'weight', 'base_margin', 'nrow'")
|
||||
}
|
||||
if (name != "nrow"){
|
||||
ret <- .Call(XGDMatrixGetInfo_R, object, name)
|
||||
@@ -244,19 +243,9 @@ setinfo.xgb.DMatrix <- function(object, name, info, ...) {
|
||||
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
|
||||
return(TRUE)
|
||||
}
|
||||
if (name == "label_lower_bound") {
|
||||
if (length(info) != nrow(object))
|
||||
stop("The length of lower-bound labels must equal to the number of rows in the input data")
|
||||
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
|
||||
return(TRUE)
|
||||
}
|
||||
if (name == "label_upper_bound") {
|
||||
if (length(info) != nrow(object))
|
||||
stop("The length of upper-bound labels must equal to the number of rows in the input data")
|
||||
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
|
||||
return(TRUE)
|
||||
}
|
||||
if (name == "weight") {
|
||||
if (length(info) != nrow(object))
|
||||
stop("The length of weights must equal to the number of rows in the input data")
|
||||
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
|
||||
return(TRUE)
|
||||
}
|
||||
@@ -320,7 +309,7 @@ slice.xgb.DMatrix <- function(object, idxset, ...) {
|
||||
for (i in seq_along(ind)) {
|
||||
obj_attr <- attr(object, nms[i])
|
||||
if (NCOL(obj_attr) > 1) {
|
||||
attr(ret, nms[i]) <- obj_attr[idxset, ]
|
||||
attr(ret, nms[i]) <- obj_attr[idxset,]
|
||||
} else {
|
||||
attr(ret, nms[i]) <- obj_attr[idxset]
|
||||
}
|
||||
@@ -358,9 +347,9 @@ slice.xgb.DMatrix <- function(object, idxset, ...) {
|
||||
print.xgb.DMatrix <- function(x, verbose = FALSE, ...) {
|
||||
cat('xgb.DMatrix dim:', nrow(x), 'x', ncol(x), ' info: ')
|
||||
infos <- c()
|
||||
if (length(getinfo(x, 'label')) > 0) infos <- 'label'
|
||||
if (length(getinfo(x, 'weight')) > 0) infos <- c(infos, 'weight')
|
||||
if (length(getinfo(x, 'base_margin')) > 0) infos <- c(infos, 'base_margin')
|
||||
if(length(getinfo(x, 'label')) > 0) infos <- 'label'
|
||||
if(length(getinfo(x, 'weight')) > 0) infos <- c(infos, 'weight')
|
||||
if(length(getinfo(x, 'base_margin')) > 0) infos <- c(infos, 'base_margin')
|
||||
if (length(infos) == 0) infos <- 'NA'
|
||||
cat(infos)
|
||||
cnames <- colnames(x)
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
#' Save xgb.DMatrix object to binary file
|
||||
#'
|
||||
#'
|
||||
#' Save xgb.DMatrix object to binary file
|
||||
#'
|
||||
#'
|
||||
#' @param dmatrix the \code{xgb.DMatrix} object
|
||||
#' @param fname the name of the file to write.
|
||||
#'
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
@@ -18,7 +18,7 @@ xgb.DMatrix.save <- function(dmatrix, fname) {
|
||||
stop("fname must be character")
|
||||
if (!inherits(dmatrix, "xgb.DMatrix"))
|
||||
stop("dmatrix must be xgb.DMatrix")
|
||||
|
||||
|
||||
.Call(XGDMatrixSaveBinary_R, dmatrix, fname[1], 0L)
|
||||
return(TRUE)
|
||||
}
|
||||
|
||||
@@ -1,50 +1,50 @@
|
||||
#' Create new features from a previously learned model
|
||||
#'
|
||||
#'
|
||||
#' May improve the learning by adding new features to the training data based on the decision trees from a previously learned model.
|
||||
#'
|
||||
#'
|
||||
#' @param model decision tree boosting model learned on the original data
|
||||
#' @param data original data (usually provided as a \code{dgCMatrix} matrix)
|
||||
#' @param ... currently not used
|
||||
#'
|
||||
#'
|
||||
#' @return \code{dgCMatrix} matrix including both the original data and the new features.
|
||||
#'
|
||||
#' @details
|
||||
#' @details
|
||||
#' This is the function inspired from the paragraph 3.1 of the paper:
|
||||
#'
|
||||
#'
|
||||
#' \strong{Practical Lessons from Predicting Clicks on Ads at Facebook}
|
||||
#'
|
||||
#' \emph{(Xinran He, Junfeng Pan, Ou Jin, Tianbing Xu, Bo Liu, Tao Xu, Yan, xin Shi, Antoine Atallah, Ralf Herbrich, Stuart Bowers,
|
||||
#'
|
||||
#' \emph{(Xinran He, Junfeng Pan, Ou Jin, Tianbing Xu, Bo Liu, Tao Xu, Yan, xin Shi, Antoine Atallah, Ralf Herbrich, Stuart Bowers,
|
||||
#' Joaquin Quinonero Candela)}
|
||||
#'
|
||||
#'
|
||||
#' International Workshop on Data Mining for Online Advertising (ADKDD) - August 24, 2014
|
||||
#'
|
||||
#'
|
||||
#' \url{https://research.fb.com/publications/practical-lessons-from-predicting-clicks-on-ads-at-facebook/}.
|
||||
#'
|
||||
#'
|
||||
#' Extract explaining the method:
|
||||
#'
|
||||
#'
|
||||
#' "We found that boosted decision trees are a powerful and very
|
||||
#' convenient way to implement non-linear and tuple transformations
|
||||
#' of the kind we just described. We treat each individual
|
||||
#' tree as a categorical feature that takes as value the
|
||||
#' index of the leaf an instance ends up falling in. We use
|
||||
#' 1-of-K coding of this type of features.
|
||||
#'
|
||||
#' For example, consider the boosted tree model in Figure 1 with 2 subtrees,
|
||||
#' index of the leaf an instance ends up falling in. We use
|
||||
#' 1-of-K coding of this type of features.
|
||||
#'
|
||||
#' For example, consider the boosted tree model in Figure 1 with 2 subtrees,
|
||||
#' where the first subtree has 3 leafs and the second 2 leafs. If an
|
||||
#' instance ends up in leaf 2 in the first subtree and leaf 1 in
|
||||
#' second subtree, the overall input to the linear classifier will
|
||||
#' be the binary vector \code{[0, 1, 0, 1, 0]}, where the first 3 entries
|
||||
#' correspond to the leaves of the first subtree and last 2 to
|
||||
#' those of the second subtree.
|
||||
#'
|
||||
#'
|
||||
#' [...]
|
||||
#'
|
||||
#'
|
||||
#' We can understand boosted decision tree
|
||||
#' based transformation as a supervised feature encoding that
|
||||
#' converts a real-valued vector into a compact binary-valued
|
||||
#' vector. A traversal from root node to a leaf node represents
|
||||
#' a rule on certain features."
|
||||
#'
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
@@ -55,33 +55,33 @@
|
||||
#' nrounds = 4
|
||||
#'
|
||||
#' bst = xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
|
||||
#'
|
||||
#'
|
||||
#' # Model accuracy without new features
|
||||
#' accuracy.before <- sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label) /
|
||||
#' length(agaricus.test$label)
|
||||
#'
|
||||
#'
|
||||
#' # Convert previous features to one hot encoding
|
||||
#' new.features.train <- xgb.create.features(model = bst, agaricus.train$data)
|
||||
#' new.features.test <- xgb.create.features(model = bst, agaricus.test$data)
|
||||
#'
|
||||
#'
|
||||
#' # learning with new features
|
||||
#' new.dtrain <- xgb.DMatrix(data = new.features.train, label = agaricus.train$label)
|
||||
#' new.dtest <- xgb.DMatrix(data = new.features.test, label = agaricus.test$label)
|
||||
#' watchlist <- list(train = new.dtrain)
|
||||
#' bst <- xgb.train(params = param, data = new.dtrain, nrounds = nrounds, nthread = 2)
|
||||
#'
|
||||
#'
|
||||
#' # Model accuracy with new features
|
||||
#' accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) /
|
||||
#' length(agaricus.test$label)
|
||||
#'
|
||||
#'
|
||||
#' # Here the accuracy was already good and is now perfect.
|
||||
#' cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now",
|
||||
#' accuracy.after, "!\n"))
|
||||
#'
|
||||
#'
|
||||
#' @export
|
||||
xgb.create.features <- function(model, data, ...){
|
||||
check.deprecation(...)
|
||||
pred_with_leaf <- predict(model, data, predleaf = TRUE)
|
||||
cols <- lapply(as.data.frame(pred_with_leaf), factor)
|
||||
cbind(data, sparse.model.matrix(~ . -1, cols)) # nolint
|
||||
cbind(data, sparse.model.matrix( ~ . -1, cols))
|
||||
}
|
||||
|
||||
@@ -2,15 +2,12 @@
|
||||
#'
|
||||
#' The cross validation function of xgboost
|
||||
#'
|
||||
#' @param params the list of parameters. The complete list of parameters is
|
||||
#' available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below
|
||||
#' is a shorter summary:
|
||||
#' @param params the list of parameters. Commonly used ones are:
|
||||
#' \itemize{
|
||||
#' \item \code{objective} objective function, common ones are
|
||||
#' \itemize{
|
||||
#' \item \code{reg:squarederror} Regression with squared loss.
|
||||
#' \item \code{binary:logistic} logistic regression for classification.
|
||||
#' \item See \code{\link[=xgb.train]{xgb.train}()} for complete list of objectives.
|
||||
#' \item \code{reg:squarederror} Regression with squared loss
|
||||
#' \item \code{binary:logistic} logistic regression for classification
|
||||
#' }
|
||||
#' \item \code{eta} step size of each boosting step
|
||||
#' \item \code{max_depth} maximum depth of the tree
|
||||
@@ -104,7 +101,7 @@
|
||||
#' (only available with early stopping).
|
||||
#' \item \code{pred} CV prediction values available when \code{prediction} is set.
|
||||
#' It is either vector or matrix (see \code{\link{cb.cv.predict}}).
|
||||
#' \item \code{models} a list of the CV folds' models. It is only available with the explicit
|
||||
#' \item \code{models} a liost of the CV folds' models. It is only available with the explicit
|
||||
#' setting of the \code{cb.cv.predict(save_models = TRUE)} callback.
|
||||
#' }
|
||||
#'
|
||||
@@ -137,20 +134,20 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
||||
# stop("Either 'eval_metric' or 'feval' must be provided for CV")
|
||||
|
||||
# Check the labels
|
||||
if ((inherits(data, 'xgb.DMatrix') && is.null(getinfo(data, 'label'))) ||
|
||||
(!inherits(data, 'xgb.DMatrix') && is.null(label))) {
|
||||
if ( (inherits(data, 'xgb.DMatrix') && is.null(getinfo(data, 'label'))) ||
|
||||
(!inherits(data, 'xgb.DMatrix') && is.null(label))) {
|
||||
stop("Labels must be provided for CV either through xgb.DMatrix, or through 'label=' when 'data' is matrix")
|
||||
} else if (inherits(data, 'xgb.DMatrix')) {
|
||||
if (!is.null(label))
|
||||
warning("xgb.cv: label will be ignored, since data is of type xgb.DMatrix")
|
||||
cv_label <- getinfo(data, 'label')
|
||||
cv_label = getinfo(data, 'label')
|
||||
} else {
|
||||
cv_label <- label
|
||||
cv_label = label
|
||||
}
|
||||
|
||||
# CV folds
|
||||
if (!is.null(folds)) {
|
||||
if (!is.list(folds) || length(folds) < 2)
|
||||
if(!is.null(folds)) {
|
||||
if(!is.list(folds) || length(folds) < 2)
|
||||
stop("'folds' must be a list with 2 or more elements that are vectors of indices for each CV-fold")
|
||||
nfold <- length(folds)
|
||||
} else {
|
||||
@@ -165,7 +162,7 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
||||
|
||||
# verbosity & evaluation printing callback:
|
||||
params <- c(params, list(silent = 1))
|
||||
print_every_n <- max(as.integer(print_every_n), 1L)
|
||||
print_every_n <- max( as.integer(print_every_n), 1L)
|
||||
if (!has.callbacks(callbacks, 'cb.print.evaluation') && verbose) {
|
||||
callbacks <- add.cb(callbacks, cb.print.evaluation(print_every_n, showsd = showsd))
|
||||
}
|
||||
@@ -196,20 +193,20 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
||||
bst_folds <- lapply(seq_along(folds), function(k) {
|
||||
dtest <- slice(dall, folds[[k]])
|
||||
# code originally contributed by @RolandASc on stackoverflow
|
||||
if (is.null(train_folds))
|
||||
if(is.null(train_folds))
|
||||
dtrain <- slice(dall, unlist(folds[-k]))
|
||||
else
|
||||
dtrain <- slice(dall, train_folds[[k]])
|
||||
handle <- xgb.Booster.handle(params, list(dtrain, dtest))
|
||||
list(dtrain = dtrain, bst = handle, watchlist = list(train = dtrain, test = dtest), index = folds[[k]])
|
||||
list(dtrain = dtrain, bst = handle, watchlist = list(train = dtrain, test=dtest), index = folds[[k]])
|
||||
})
|
||||
rm(dall)
|
||||
# a "basket" to collect some results from callbacks
|
||||
basket <- list()
|
||||
|
||||
# extract parameters that can affect the relationship b/w #trees and #iterations
|
||||
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1) # nolint
|
||||
num_parallel_tree <- max(as.numeric(NVL(params[['num_parallel_tree']], 1)), 1) # nolint
|
||||
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1)
|
||||
num_parallel_tree <- max(as.numeric(NVL(params[['num_parallel_tree']], 1)), 1)
|
||||
|
||||
# those are fixed for CV (no training continuation)
|
||||
begin_iteration <- 1
|
||||
@@ -226,7 +223,7 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
||||
})
|
||||
msg <- simplify2array(msg)
|
||||
bst_evaluation <- rowMeans(msg)
|
||||
bst_evaluation_err <- sqrt(rowMeans(msg^2) - bst_evaluation^2) # nolint
|
||||
bst_evaluation_err <- sqrt(rowMeans(msg^2) - bst_evaluation^2)
|
||||
|
||||
for (f in cb$post_iter) f()
|
||||
|
||||
@@ -285,10 +282,10 @@ print.xgb.cv.synchronous <- function(x, verbose = FALSE, ...) {
|
||||
}
|
||||
if (!is.null(x$params)) {
|
||||
cat('params (as set within xgb.cv):\n')
|
||||
cat(' ',
|
||||
paste(names(x$params),
|
||||
paste0('"', unlist(x$params), '"'),
|
||||
sep = ' = ', collapse = ', '), '\n', sep = '')
|
||||
cat( ' ',
|
||||
paste(names(x$params),
|
||||
paste0('"', unlist(x$params), '"'),
|
||||
sep = ' = ', collapse = ', '), '\n', sep = '')
|
||||
}
|
||||
if (!is.null(x$callbacks) && length(x$callbacks) > 0) {
|
||||
cat('callbacks:\n')
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
#' Dump an xgboost model in text format.
|
||||
#'
|
||||
#'
|
||||
#' Dump an xgboost model in text format.
|
||||
#'
|
||||
#'
|
||||
#' @param model the model object.
|
||||
#' @param fname the name of the text file where to save the model text dump.
|
||||
#' @param fname the name of the text file where to save the model text dump.
|
||||
#' If not provided or set to \code{NULL}, the model is returned as a \code{character} vector.
|
||||
#' @param fmap feature map file representing feature types.
|
||||
#' Detailed description could be found at
|
||||
#' Detailed description could be found at
|
||||
#' \url{https://github.com/dmlc/xgboost/wiki/Binary-Classification#dump-model}.
|
||||
#' See demo/ for walkthrough example in R, and
|
||||
#' \url{https://github.com/dmlc/xgboost/blob/master/demo/data/featmap.txt}
|
||||
#' \url{https://github.com/dmlc/xgboost/blob/master/demo/data/featmap.txt}
|
||||
#' for example Format.
|
||||
#' @param with_stats whether to dump some additional statistics about the splits.
|
||||
#' When this option is on, the model dump contains two additional values:
|
||||
@@ -27,18 +27,18 @@
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' test <- agaricus.test
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
#' # save the model in file 'xgb.model.dump'
|
||||
#' dump_path = file.path(tempdir(), 'model.dump')
|
||||
#' xgb.dump(bst, dump_path, with_stats = TRUE)
|
||||
#'
|
||||
#'
|
||||
#' # print the model without saving it to a file
|
||||
#' print(xgb.dump(bst, with_stats = TRUE))
|
||||
#'
|
||||
#'
|
||||
#' # print in JSON format:
|
||||
#' cat(xgb.dump(bst, with_stats = TRUE, dump_format='json'))
|
||||
#'
|
||||
#'
|
||||
#' @export
|
||||
xgb.dump <- function(model, fname = NULL, fmap = "", with_stats=FALSE,
|
||||
dump_format = c("text", "json"), ...) {
|
||||
@@ -50,19 +50,19 @@ xgb.dump <- function(model, fname = NULL, fmap = "", with_stats=FALSE,
|
||||
stop("fname: argument must be a character string (when provided)")
|
||||
if (!(is.null(fmap) || is.character(fmap)))
|
||||
stop("fmap: argument must be a character string (when provided)")
|
||||
|
||||
|
||||
model <- xgb.Booster.complete(model)
|
||||
model_dump <- .Call(XGBoosterDumpModel_R, model$handle, NVL(fmap, "")[1], as.integer(with_stats),
|
||||
as.character(dump_format))
|
||||
|
||||
if (is.null(fname))
|
||||
if (is.null(fname))
|
||||
model_dump <- stri_replace_all_regex(model_dump, '\t', '')
|
||||
|
||||
|
||||
if (dump_format == "text")
|
||||
model_dump <- unlist(stri_split_regex(model_dump, '\n'))
|
||||
|
||||
|
||||
model_dump <- grep('^\\s*$', model_dump, invert = TRUE, value = TRUE)
|
||||
|
||||
|
||||
if (is.null(fname)) {
|
||||
return(model_dump)
|
||||
} else {
|
||||
|
||||
@@ -3,9 +3,9 @@
|
||||
|
||||
#' @rdname xgb.plot.importance
|
||||
#' @export
|
||||
xgb.ggplot.importance <- function(importance_matrix = NULL, top_n = NULL, measure = NULL,
|
||||
xgb.ggplot.importance <- function(importance_matrix = NULL, top_n = NULL, measure = NULL,
|
||||
rel_to_first = FALSE, n_clusters = c(1:10), ...) {
|
||||
|
||||
|
||||
importance_matrix <- xgb.plot.importance(importance_matrix, top_n = top_n, measure = measure,
|
||||
rel_to_first = rel_to_first, plot = FALSE, ...)
|
||||
if (!requireNamespace("ggplot2", quietly = TRUE)) {
|
||||
@@ -14,21 +14,21 @@ xgb.ggplot.importance <- function(importance_matrix = NULL, top_n = NULL, measur
|
||||
if (!requireNamespace("Ckmeans.1d.dp", quietly = TRUE)) {
|
||||
stop("Ckmeans.1d.dp package is required", call. = FALSE)
|
||||
}
|
||||
|
||||
|
||||
clusters <- suppressWarnings(
|
||||
Ckmeans.1d.dp::Ckmeans.1d.dp(importance_matrix$Importance, n_clusters)
|
||||
)
|
||||
importance_matrix[, Cluster := as.character(clusters$cluster)]
|
||||
|
||||
plot <-
|
||||
ggplot2::ggplot(importance_matrix,
|
||||
ggplot2::ggplot(importance_matrix,
|
||||
ggplot2::aes(x = factor(Feature, levels = rev(Feature)), y = Importance, width = 0.5),
|
||||
environment = environment()) +
|
||||
ggplot2::geom_bar(ggplot2::aes(fill = Cluster), stat = "identity", position = "identity") +
|
||||
ggplot2::coord_flip() +
|
||||
ggplot2::xlab("Features") +
|
||||
ggplot2::ggtitle("Feature importance") +
|
||||
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = .9, face = "bold"),
|
||||
environment = environment()) +
|
||||
ggplot2::geom_bar(ggplot2::aes(fill = Cluster), stat = "identity", position = "identity") +
|
||||
ggplot2::coord_flip() +
|
||||
ggplot2::xlab("Features") +
|
||||
ggplot2::ggtitle("Feature importance") +
|
||||
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = .9, face = "bold"),
|
||||
panel.grid.major.y = ggplot2::element_blank())
|
||||
return(plot)
|
||||
}
|
||||
@@ -42,7 +42,7 @@ xgb.ggplot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med
|
||||
stop("ggplot2 package is required for plotting the graph deepness.", call. = FALSE)
|
||||
|
||||
which <- match.arg(which)
|
||||
|
||||
|
||||
dt_depths <- xgb.plot.deepness(model = model, plot = FALSE)
|
||||
dt_summaries <- dt_depths[, .(.N, Cover = mean(Cover)), Depth]
|
||||
setkey(dt_summaries, 'Depth')
|
||||
@@ -60,30 +60,30 @@ xgb.ggplot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med
|
||||
axis.ticks = ggplot2::element_blank(),
|
||||
axis.text.x = ggplot2::element_blank()
|
||||
)
|
||||
|
||||
p2 <-
|
||||
|
||||
p2 <-
|
||||
ggplot2::ggplot(dt_summaries) +
|
||||
ggplot2::geom_bar(ggplot2::aes(x = Depth, y = Cover), stat = "Identity") +
|
||||
ggplot2::geom_bar(ggplot2::aes(x = Depth, y = Cover), stat = "Identity") +
|
||||
ggplot2::xlab("Leaf depth") +
|
||||
ggplot2::ylab("Weighted cover")
|
||||
|
||||
|
||||
multiplot(p1, p2, cols = 1)
|
||||
return(invisible(list(p1, p2)))
|
||||
|
||||
|
||||
} else if (which == "max.depth") {
|
||||
p <-
|
||||
ggplot2::ggplot(dt_depths[, max(Depth), Tree]) +
|
||||
ggplot2::geom_jitter(ggplot2::aes(x = Tree, y = V1),
|
||||
height = 0.15, alpha = 0.4, size = 3, stroke = 0) +
|
||||
height = 0.15, alpha=0.4, size=3, stroke=0) +
|
||||
ggplot2::xlab("tree #") +
|
||||
ggplot2::ylab("Max tree leaf depth")
|
||||
return(p)
|
||||
|
||||
|
||||
} else if (which == "med.depth") {
|
||||
p <-
|
||||
ggplot2::ggplot(dt_depths[, median(as.numeric(Depth)), Tree]) +
|
||||
ggplot2::geom_jitter(ggplot2::aes(x = Tree, y = V1),
|
||||
height = 0.15, alpha = 0.4, size = 3, stroke = 0) +
|
||||
height = 0.15, alpha=0.4, size=3, stroke=0) +
|
||||
ggplot2::xlab("tree #") +
|
||||
ggplot2::ylab("Median tree leaf depth")
|
||||
return(p)
|
||||
@@ -92,7 +92,7 @@ xgb.ggplot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med
|
||||
p <-
|
||||
ggplot2::ggplot(dt_depths[, median(abs(Weight)), Tree]) +
|
||||
ggplot2::geom_point(ggplot2::aes(x = Tree, y = V1),
|
||||
alpha = 0.4, size = 3, stroke = 0) +
|
||||
alpha=0.4, size=3, stroke=0) +
|
||||
ggplot2::xlab("tree #") +
|
||||
ggplot2::ylab("Median absolute leaf weight")
|
||||
return(p)
|
||||
@@ -105,11 +105,11 @@ xgb.ggplot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med
|
||||
# internal utility function
|
||||
multiplot <- function(..., cols = 1) {
|
||||
plots <- list(...)
|
||||
num_plots <- length(plots)
|
||||
|
||||
num_plots = length(plots)
|
||||
|
||||
layout <- matrix(seq(1, cols * ceiling(num_plots / cols)),
|
||||
ncol = cols, nrow = ceiling(num_plots / cols))
|
||||
|
||||
|
||||
if (num_plots == 1) {
|
||||
print(plots[[1]])
|
||||
} else {
|
||||
@@ -118,7 +118,7 @@ multiplot <- function(..., cols = 1) {
|
||||
for (i in 1:num_plots) {
|
||||
# Get the i,j matrix positions of the regions that contain this subplot
|
||||
matchidx <- as.data.table(which(layout == i, arr.ind = TRUE))
|
||||
|
||||
|
||||
print(
|
||||
plots[[i]], vp = grid::viewport(
|
||||
layout.pos.row = matchidx$row,
|
||||
|
||||
@@ -1,66 +1,66 @@
|
||||
#' Importance of features in a model.
|
||||
#'
|
||||
#'
|
||||
#' Creates a \code{data.table} of feature importances in a model.
|
||||
#'
|
||||
#'
|
||||
#' @param feature_names character vector of feature names. If the model already
|
||||
#' contains feature names, those would be used when \code{feature_names=NULL} (default value).
|
||||
#' Non-null \code{feature_names} could be provided to override those in the model.
|
||||
#' @param model object of class \code{xgb.Booster}.
|
||||
#' @param trees (only for the gbtree booster) an integer vector of tree indices that should be included
|
||||
#' into the importance calculation. If set to \code{NULL}, all trees of the model are parsed.
|
||||
#' It could be useful, e.g., in multiclass classification to get feature importances
|
||||
#' It could be useful, e.g., in multiclass classification to get feature importances
|
||||
#' for each class separately. IMPORTANT: the tree index in xgboost models
|
||||
#' is zero-based (e.g., use \code{trees = 0:4} for first 5 trees).
|
||||
#' @param data deprecated.
|
||||
#' @param label deprecated.
|
||||
#' @param target deprecated.
|
||||
#'
|
||||
#' @details
|
||||
#'
|
||||
#' @details
|
||||
#'
|
||||
#' This function works for both linear and tree models.
|
||||
#'
|
||||
#' For linear models, the importance is the absolute magnitude of linear coefficients.
|
||||
#' For that reason, in order to obtain a meaningful ranking by importance for a linear model,
|
||||
#' the features need to be on the same scale (which you also would want to do when using either
|
||||
#'
|
||||
#' For linear models, the importance is the absolute magnitude of linear coefficients.
|
||||
#' For that reason, in order to obtain a meaningful ranking by importance for a linear model,
|
||||
#' the features need to be on the same scale (which you also would want to do when using either
|
||||
#' L1 or L2 regularization).
|
||||
#'
|
||||
#'
|
||||
#' @return
|
||||
#'
|
||||
#'
|
||||
#' For a tree model, a \code{data.table} with the following columns:
|
||||
#' \itemize{
|
||||
#' \item \code{Features} names of the features used in the model;
|
||||
#' \item \code{Gain} represents fractional contribution of each feature to the model based on
|
||||
#' the total gain of this feature's splits. Higher percentage means a more important
|
||||
#' the total gain of this feature's splits. Higher percentage means a more important
|
||||
#' predictive feature.
|
||||
#' \item \code{Cover} metric of the number of observation related to this feature;
|
||||
#' \item \code{Frequency} percentage representing the relative number of times
|
||||
#' a feature have been used in trees.
|
||||
#' }
|
||||
#'
|
||||
#'
|
||||
#' A linear model's importance \code{data.table} has the following columns:
|
||||
#' \itemize{
|
||||
#' \item \code{Features} names of the features used in the model;
|
||||
#' \item \code{Weight} the linear coefficient of this feature;
|
||||
#' \item \code{Class} (only for multiclass models) class label.
|
||||
#' }
|
||||
#'
|
||||
#' If \code{feature_names} is not provided and \code{model} doesn't have \code{feature_names},
|
||||
#'
|
||||
#' If \code{feature_names} is not provided and \code{model} doesn't have \code{feature_names},
|
||||
#' index of the features will be used instead. Because the index is extracted from the model dump
|
||||
#' (based on C++ code), it starts at 0 (as in C/C++ or Python) instead of 1 (usual in R).
|
||||
#'
|
||||
#'
|
||||
#' @examples
|
||||
#'
|
||||
#'
|
||||
#' # binomial classification using gbtree:
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
#' xgb.importance(model = bst)
|
||||
#'
|
||||
#'
|
||||
#' # binomial classification using gblinear:
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, booster = "gblinear",
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, booster = "gblinear",
|
||||
#' eta = 0.3, nthread = 1, nrounds = 20, objective = "binary:logistic")
|
||||
#' xgb.importance(model = bst)
|
||||
#'
|
||||
#'
|
||||
#' # multiclass classification using gbtree:
|
||||
#' nclass <- 3
|
||||
#' nrounds <- 10
|
||||
@@ -73,7 +73,7 @@
|
||||
#' xgb.importance(model = mbst, trees = seq(from=0, by=nclass, length.out=nrounds))
|
||||
#' xgb.importance(model = mbst, trees = seq(from=1, by=nclass, length.out=nrounds))
|
||||
#' xgb.importance(model = mbst, trees = seq(from=2, by=nclass, length.out=nrounds))
|
||||
#'
|
||||
#'
|
||||
#' # multiclass classification using gblinear:
|
||||
#' mbst <- xgboost(data = scale(as.matrix(iris[, -5])), label = as.numeric(iris$Species) - 1,
|
||||
#' booster = "gblinear", eta = 0.2, nthread = 1, nrounds = 15,
|
||||
@@ -83,33 +83,33 @@
|
||||
#' @export
|
||||
xgb.importance <- function(feature_names = NULL, model = NULL, trees = NULL,
|
||||
data = NULL, label = NULL, target = NULL){
|
||||
|
||||
|
||||
if (!(is.null(data) && is.null(label) && is.null(target)))
|
||||
warning("xgb.importance: parameters 'data', 'label' and 'target' are deprecated")
|
||||
|
||||
|
||||
if (!inherits(model, "xgb.Booster"))
|
||||
stop("model: must be an object of class xgb.Booster")
|
||||
|
||||
|
||||
if (is.null(feature_names) && !is.null(model$feature_names))
|
||||
feature_names <- model$feature_names
|
||||
|
||||
|
||||
if (!(is.null(feature_names) || is.character(feature_names)))
|
||||
stop("feature_names: Has to be a character vector")
|
||||
|
||||
model_text_dump <- xgb.dump(model = model, with_stats = TRUE)
|
||||
|
||||
|
||||
# linear model
|
||||
if (model_text_dump[2] == "bias:"){
|
||||
if(model_text_dump[2] == "bias:"){
|
||||
weights <- which(model_text_dump == "weight:") %>%
|
||||
{model_text_dump[(. + 1):length(model_text_dump)]} %>%
|
||||
as.numeric
|
||||
|
||||
|
||||
num_class <- NVL(model$params$num_class, 1)
|
||||
if (is.null(feature_names))
|
||||
if(is.null(feature_names))
|
||||
feature_names <- seq(to = length(weights) / num_class) - 1
|
||||
if (length(feature_names) * num_class != length(weights))
|
||||
stop("feature_names length does not match the number of features used in the model")
|
||||
|
||||
|
||||
result <- if (num_class == 1) {
|
||||
data.table(Feature = feature_names, Weight = weights)[order(-abs(Weight))]
|
||||
} else {
|
||||
@@ -117,17 +117,18 @@ xgb.importance <- function(feature_names = NULL, model = NULL, trees = NULL,
|
||||
Weight = weights,
|
||||
Class = seq_len(num_class) - 1)[order(Class, -abs(Weight))]
|
||||
}
|
||||
} else { # tree model
|
||||
result <- xgb.model.dt.tree(feature_names = feature_names,
|
||||
text = model_text_dump,
|
||||
trees = trees)[
|
||||
Feature != "Leaf", .(Gain = sum(Quality),
|
||||
Cover = sum(Cover),
|
||||
Frequency = .N), by = Feature][
|
||||
, `:=`(Gain = Gain / sum(Gain),
|
||||
Cover = Cover / sum(Cover),
|
||||
Frequency = Frequency / sum(Frequency))][
|
||||
order(Gain, decreasing = TRUE)]
|
||||
} else {
|
||||
# tree model
|
||||
result <- xgb.model.dt.tree(feature_names = feature_names,
|
||||
text = model_text_dump,
|
||||
trees = trees)[
|
||||
Feature != "Leaf", .(Gain = sum(Quality),
|
||||
Cover = sum(Cover),
|
||||
Frequency = .N), by = Feature][
|
||||
,`:=`(Gain = Gain / sum(Gain),
|
||||
Cover = Cover / sum(Cover),
|
||||
Frequency = Frequency / sum(Frequency))][
|
||||
order(Gain, decreasing = TRUE)]
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
@@ -1,30 +1,30 @@
|
||||
#' Load xgboost model from binary file
|
||||
#'
|
||||
#' Load xgboost model from the binary model file.
|
||||
#'
|
||||
#'
|
||||
#' Load xgboost model from the binary model file.
|
||||
#'
|
||||
#' @param modelfile the name of the binary input file.
|
||||
#'
|
||||
#' @details
|
||||
#'
|
||||
#' @details
|
||||
#' The input file is expected to contain a model saved in an xgboost-internal binary format
|
||||
#' using either \code{\link{xgb.save}} or \code{\link{cb.save.model}} in R, or using some
|
||||
#' appropriate methods from other xgboost interfaces. E.g., a model trained in Python and
|
||||
#' using either \code{\link{xgb.save}} or \code{\link{cb.save.model}} in R, or using some
|
||||
#' appropriate methods from other xgboost interfaces. E.g., a model trained in Python and
|
||||
#' saved from there in xgboost format, could be loaded from R.
|
||||
#'
|
||||
#'
|
||||
#' Note: a model saved as an R-object, has to be loaded using corresponding R-methods,
|
||||
#' not \code{xgb.load}.
|
||||
#'
|
||||
#' @return
|
||||
#'
|
||||
#' @return
|
||||
#' An object of \code{xgb.Booster} class.
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{xgb.save}}, \code{\link{xgb.Booster.complete}}.
|
||||
#'
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{xgb.save}}, \code{\link{xgb.Booster.complete}}.
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' test <- agaricus.test
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#' xgb.save(bst, 'xgb.model')
|
||||
#' bst <- xgb.load('xgb.model')
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
#' Load serialised xgboost model from R's raw vector
|
||||
#'
|
||||
#' User can generate raw memory buffer by calling xgb.save.raw
|
||||
#'
|
||||
#' @param buffer the buffer returned by xgb.save.raw
|
||||
#'
|
||||
#' @export
|
||||
xgb.load.raw <- function(buffer) {
|
||||
cachelist <- list()
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
.Call(XGBoosterLoadModelFromRaw_R, handle, buffer)
|
||||
class(handle) <- "xgb.Booster.handle"
|
||||
return (handle)
|
||||
}
|
||||
@@ -1,12 +1,12 @@
|
||||
#' Parse a boosted tree model text dump
|
||||
#'
|
||||
#'
|
||||
#' Parse a boosted tree model text dump into a \code{data.table} structure.
|
||||
#'
|
||||
#'
|
||||
#' @param feature_names character vector of feature names. If the model already
|
||||
#' contains feature names, those would be used when \code{feature_names=NULL} (default value).
|
||||
#' Non-null \code{feature_names} could be provided to override those in the model.
|
||||
#' @param model object of class \code{xgb.Booster}
|
||||
#' @param text \code{character} vector previously generated by the \code{xgb.dump}
|
||||
#' @param text \code{character} vector previously generated by the \code{xgb.dump}
|
||||
#' function (where parameter \code{with_stats = TRUE} should have been set).
|
||||
#' \code{text} takes precedence over \code{model}.
|
||||
#' @param trees an integer vector of tree indices that should be parsed.
|
||||
@@ -18,11 +18,11 @@
|
||||
#' represented as integers (when FALSE) or as "Tree-Node" character strings (when FALSE).
|
||||
#' @param ... currently not used.
|
||||
#'
|
||||
#' @return
|
||||
#' @return
|
||||
#' A \code{data.table} with detailed information about model trees' nodes.
|
||||
#'
|
||||
#' The columns of the \code{data.table} are:
|
||||
#'
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{Tree}: integer ID of a tree in a model (zero-based index)
|
||||
#' \item \code{Node}: integer ID of a node in a tree (zero-based index)
|
||||
@@ -36,79 +36,79 @@
|
||||
#' \item \code{Quality}: either the split gain (change in loss) or the leaf value
|
||||
#' \item \code{Cover}: metric related to the number of observation either seen by a split
|
||||
#' or collected by a leaf during training.
|
||||
#' }
|
||||
#'
|
||||
#' }
|
||||
#'
|
||||
#' When \code{use_int_id=FALSE}, columns "Yes", "No", and "Missing" point to model-wide node identifiers
|
||||
#' in the "ID" column. When \code{use_int_id=TRUE}, those columns point to node identifiers from
|
||||
#' in the "ID" column. When \code{use_int_id=TRUE}, those columns point to node identifiers from
|
||||
#' the corresponding trees in the "Node" column.
|
||||
#'
|
||||
#'
|
||||
#' @examples
|
||||
#' # Basic use:
|
||||
#'
|
||||
#'
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#'
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
|
||||
#'
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#'
|
||||
#'
|
||||
#' (dt <- xgb.model.dt.tree(colnames(agaricus.train$data), bst))
|
||||
#'
|
||||
#' # This bst model already has feature_names stored with it, so those would be used when
|
||||
#'
|
||||
#' # This bst model already has feature_names stored with it, so those would be used when
|
||||
#' # feature_names is not set:
|
||||
#' (dt <- xgb.model.dt.tree(model = bst))
|
||||
#'
|
||||
#'
|
||||
#' # How to match feature names of splits that are following a current 'Yes' branch:
|
||||
#'
|
||||
#'
|
||||
#' merge(dt, dt[, .(ID, Y.Feature=Feature)], by.x='Yes', by.y='ID', all.x=TRUE)[order(Tree,Node)]
|
||||
#'
|
||||
#'
|
||||
#' @export
|
||||
xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
|
||||
trees = NULL, use_int_id = FALSE, ...){
|
||||
check.deprecation(...)
|
||||
|
||||
|
||||
if (!inherits(model, "xgb.Booster") && !is.character(text)) {
|
||||
stop("Either 'model' must be an object of class xgb.Booster\n",
|
||||
" or 'text' must be a character vector with the result of xgb.dump\n",
|
||||
" (or NULL if 'model' was provided).")
|
||||
}
|
||||
|
||||
|
||||
if (is.null(feature_names) && !is.null(model) && !is.null(model$feature_names))
|
||||
feature_names <- model$feature_names
|
||||
|
||||
|
||||
if (!(is.null(feature_names) || is.character(feature_names))) {
|
||||
stop("feature_names: must be a character vector")
|
||||
}
|
||||
|
||||
|
||||
if (!(is.null(trees) || is.numeric(trees))) {
|
||||
stop("trees: must be a vector of integers.")
|
||||
}
|
||||
|
||||
|
||||
if (is.null(text)){
|
||||
text <- xgb.dump(model = model, with_stats = TRUE)
|
||||
}
|
||||
|
||||
|
||||
if (length(text) < 2 ||
|
||||
sum(stri_detect_regex(text, 'yes=(\\d+),no=(\\d+)')) < 1) {
|
||||
stop("Non-tree model detected! This function can only be used with tree models.")
|
||||
}
|
||||
|
||||
|
||||
position <- which(!is.na(stri_match_first_regex(text, "booster")))
|
||||
|
||||
|
||||
add.tree.id <- function(node, tree) if (use_int_id) node else paste(tree, node, sep = "-")
|
||||
|
||||
|
||||
anynumber_regex <- "[-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?"
|
||||
|
||||
|
||||
td <- data.table(t = text)
|
||||
td[position, Tree := 1L]
|
||||
td[, Tree := cumsum(ifelse(is.na(Tree), 0L, Tree)) - 1L]
|
||||
|
||||
|
||||
if (is.null(trees)) {
|
||||
trees <- 0:max(td$Tree)
|
||||
} else {
|
||||
trees <- trees[trees >= 0 & trees <= max(td$Tree)]
|
||||
}
|
||||
td <- td[Tree %in% trees & !grepl('^booster', t)]
|
||||
|
||||
td[, Node := stri_match_first_regex(t, "(\\d+):")[, 2] %>% as.integer]
|
||||
|
||||
td[, Node := stri_match_first_regex(t, "(\\d+):")[,2] %>% as.integer ]
|
||||
if (!use_int_id) td[, ID := add.tree.id(Node, Tree)]
|
||||
td[, isLeaf := !is.na(stri_match_first_regex(t, "leaf"))]
|
||||
|
||||
@@ -116,29 +116,29 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
|
||||
branch_rx <- paste0("f(\\d+)<(", anynumber_regex, ")\\] yes=(\\d+),no=(\\d+),missing=(\\d+),",
|
||||
"gain=(", anynumber_regex, "),cover=(", anynumber_regex, ")")
|
||||
branch_cols <- c("Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover")
|
||||
td[isLeaf == FALSE,
|
||||
td[isLeaf == FALSE,
|
||||
(branch_cols) := {
|
||||
# skip some indices with spurious capture groups from anynumber_regex
|
||||
xtr <- stri_match_first_regex(t, branch_rx)[, c(2, 3, 5, 6, 7, 8, 10), drop = FALSE]
|
||||
xtr <- stri_match_first_regex(t, branch_rx)[, c(2,3,5,6,7,8,10), drop = FALSE]
|
||||
xtr[, 3:5] <- add.tree.id(xtr[, 3:5], Tree)
|
||||
lapply(seq_len(ncol(xtr)), function(i) xtr[, i])
|
||||
lapply(seq_len(ncol(xtr)), function(i) xtr[,i])
|
||||
}]
|
||||
# assign feature_names when available
|
||||
if (!is.null(feature_names)) {
|
||||
if (length(feature_names) <= max(as.numeric(td$Feature), na.rm = TRUE))
|
||||
stop("feature_names has less elements than there are features used in the model")
|
||||
td[isLeaf == FALSE, Feature := feature_names[as.numeric(Feature) + 1]]
|
||||
td[isLeaf == FALSE, Feature := feature_names[as.numeric(Feature) + 1] ]
|
||||
}
|
||||
|
||||
|
||||
# parse leaf lines
|
||||
leaf_rx <- paste0("leaf=(", anynumber_regex, "),cover=(", anynumber_regex, ")")
|
||||
leaf_cols <- c("Feature", "Quality", "Cover")
|
||||
td[isLeaf == TRUE,
|
||||
(leaf_cols) := {
|
||||
xtr <- stri_match_first_regex(t, leaf_rx)[, c(2, 4)]
|
||||
c("Leaf", lapply(seq_len(ncol(xtr)), function(i) xtr[, i]))
|
||||
xtr <- stri_match_first_regex(t, leaf_rx)[, c(2,4)]
|
||||
c("Leaf", lapply(seq_len(ncol(xtr)), function(i) xtr[,i]))
|
||||
}]
|
||||
|
||||
|
||||
# convert some columns to numeric
|
||||
numeric_cols <- c("Split", "Quality", "Cover")
|
||||
td[, (numeric_cols) := lapply(.SD, as.numeric), .SDcols = numeric_cols]
|
||||
@@ -146,14 +146,14 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
|
||||
int_cols <- c("Yes", "No", "Missing")
|
||||
td[, (int_cols) := lapply(.SD, as.integer), .SDcols = int_cols]
|
||||
}
|
||||
|
||||
|
||||
td[, t := NULL]
|
||||
td[, isLeaf := NULL]
|
||||
|
||||
|
||||
td[order(Tree, Node)]
|
||||
}
|
||||
|
||||
# Avoid error messages during CRAN check.
|
||||
# The reason is that these variables are never declared
|
||||
# They are mainly column names inferred by Data.table...
|
||||
globalVariables(c("Tree", "Node", "ID", "Feature", "t", "isLeaf", ".SD", ".SDcols"))
|
||||
globalVariables(c("Tree", "Node", "ID", "Feature", "t", "isLeaf",".SD", ".SDcols"))
|
||||
|
||||
@@ -2,48 +2,48 @@
|
||||
#'
|
||||
#' Visualizes distributions related to depth of tree leafs.
|
||||
#' \code{xgb.plot.deepness} uses base R graphics, while \code{xgb.ggplot.deepness} uses the ggplot backend.
|
||||
#'
|
||||
#'
|
||||
#' @param model either an \code{xgb.Booster} model generated by the \code{xgb.train} function
|
||||
#' or a data.table result of the \code{xgb.model.dt.tree} function.
|
||||
#' @param plot (base R barplot) whether a barplot should be produced.
|
||||
#' @param plot (base R barplot) whether a barplot should be produced.
|
||||
#' If FALSE, only a data.table is returned.
|
||||
#' @param which which distribution to plot (see details).
|
||||
#' @param ... other parameters passed to \code{barplot} or \code{plot}.
|
||||
#'
|
||||
#'
|
||||
#' @details
|
||||
#'
|
||||
#'
|
||||
#' When \code{which="2x1"}, two distributions with respect to the leaf depth
|
||||
#' are plotted on top of each other:
|
||||
#' \itemize{
|
||||
#' \item the distribution of the number of leafs in a tree model at a certain depth;
|
||||
#' \item the distribution of average weighted number of observations ("cover")
|
||||
#' \item the distribution of average weighted number of observations ("cover")
|
||||
#' ending up in leafs at certain depth.
|
||||
#' }
|
||||
#' Those could be helpful in determining sensible ranges of the \code{max_depth}
|
||||
#' Those could be helpful in determining sensible ranges of the \code{max_depth}
|
||||
#' and \code{min_child_weight} parameters.
|
||||
#'
|
||||
#'
|
||||
#' When \code{which="max.depth"} or \code{which="med.depth"}, plots of either maximum or median depth
|
||||
#' per tree with respect to tree number are created. And \code{which="med.weight"} allows to see how
|
||||
#' a tree's median absolute leaf weight changes through the iterations.
|
||||
#'
|
||||
#' This function was inspired by the blog post
|
||||
#' \url{https://github.com/aysent/random-forest-leaf-visualization}.
|
||||
#'
|
||||
#'
|
||||
#' @return
|
||||
#'
|
||||
#'
|
||||
#' Other than producing plots (when \code{plot=TRUE}), the \code{xgb.plot.deepness} function
|
||||
#' silently returns a processed data.table where each row corresponds to a terminal leaf in a tree model,
|
||||
#' and contains information about leaf's depth, cover, and weight (which is used in calculating predictions).
|
||||
#'
|
||||
#'
|
||||
#' The \code{xgb.ggplot.deepness} silently returns either a list of two ggplot graphs when \code{which="2x1"}
|
||||
#' or a single ggplot graph for the other \code{which} options.
|
||||
#'
|
||||
#' @seealso
|
||||
#'
|
||||
#' @seealso
|
||||
#'
|
||||
#' \code{\link{xgb.train}}, \code{\link{xgb.model.dt.tree}}.
|
||||
#'
|
||||
#'
|
||||
#' @examples
|
||||
#'
|
||||
#'
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#'
|
||||
#' # Change max_depth to a higher number to get a more significant result
|
||||
@@ -53,16 +53,16 @@
|
||||
#'
|
||||
#' xgb.plot.deepness(bst)
|
||||
#' xgb.ggplot.deepness(bst)
|
||||
#'
|
||||
#'
|
||||
#' xgb.plot.deepness(bst, which='max.depth', pch=16, col=rgb(0,0,1,0.3), cex=2)
|
||||
#'
|
||||
#'
|
||||
#' xgb.plot.deepness(bst, which='med.weight', pch=16, col=rgb(0,0,1,0.3), cex=2)
|
||||
#'
|
||||
#' @rdname xgb.plot.deepness
|
||||
#' @export
|
||||
xgb.plot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med.depth", "med.weight"),
|
||||
plot = TRUE, ...) {
|
||||
|
||||
|
||||
if (!(inherits(model, "xgb.Booster") || is.data.table(model)))
|
||||
stop("model: Has to be either an xgb.Booster model generaged by the xgb.train function\n",
|
||||
"or a data.table result of the xgb.importance function")
|
||||
@@ -71,32 +71,32 @@ xgb.plot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med.d
|
||||
stop("igraph package is required for plotting the graph deepness.", call. = FALSE)
|
||||
|
||||
which <- match.arg(which)
|
||||
|
||||
|
||||
dt_tree <- model
|
||||
if (inherits(model, "xgb.Booster"))
|
||||
dt_tree <- xgb.model.dt.tree(model = model)
|
||||
|
||||
|
||||
if (!all(c("Feature", "Tree", "ID", "Yes", "No", "Cover") %in% colnames(dt_tree)))
|
||||
stop("Model tree columns are not as expected!\n",
|
||||
" Note that this function works only for tree models.")
|
||||
|
||||
|
||||
dt_depths <- merge(get.leaf.depth(dt_tree), dt_tree[, .(ID, Cover, Weight = Quality)], by = "ID")
|
||||
setkeyv(dt_depths, c("Tree", "ID"))
|
||||
# count by depth levels, and also calculate average cover at a depth
|
||||
dt_summaries <- dt_depths[, .(.N, Cover = mean(Cover)), Depth]
|
||||
setkey(dt_summaries, "Depth")
|
||||
|
||||
|
||||
if (plot) {
|
||||
if (which == "2x1") {
|
||||
op <- par(no.readonly = TRUE)
|
||||
par(mfrow = c(2, 1),
|
||||
oma = c(3, 1, 3, 1) + 0.1,
|
||||
mar = c(1, 4, 1, 0) + 0.1)
|
||||
par(mfrow = c(2,1),
|
||||
oma = c(3,1,3,1) + 0.1,
|
||||
mar = c(1,4,1,0) + 0.1)
|
||||
|
||||
dt_summaries[, barplot(N, border = NA, ylab = 'Number of leafs', ...)]
|
||||
|
||||
dt_summaries[, barplot(Cover, border = NA, ylab = "Weighted cover", names.arg = Depth, ...)]
|
||||
|
||||
|
||||
title("Model complexity", xlab = "Leaf depth", outer = TRUE, line = 1)
|
||||
par(op)
|
||||
} else if (which == "max.depth") {
|
||||
@@ -123,14 +123,14 @@ get.leaf.depth <- function(dt_tree) {
|
||||
dt_tree[Feature != "Leaf", .(ID, To = No, Tree)]
|
||||
))
|
||||
# whether "To" is a leaf:
|
||||
dt_edges <-
|
||||
dt_edges <-
|
||||
merge(dt_edges,
|
||||
dt_tree[Feature == "Leaf", .(ID, Leaf = TRUE)],
|
||||
all.x = TRUE, by.x = "To", by.y = "ID")
|
||||
dt_edges[is.na(Leaf), Leaf := FALSE]
|
||||
|
||||
dt_edges[, {
|
||||
graph <- igraph::graph_from_data_frame(.SD[, .(ID, To)])
|
||||
graph <- igraph::graph_from_data_frame(.SD[,.(ID, To)])
|
||||
# min(ID) in a tree is a root node
|
||||
paths_tmp <- igraph::shortest_paths(graph, from = min(ID), to = To[Leaf == TRUE])
|
||||
# list of paths to each leaf in a tree
|
||||
|
||||
@@ -92,10 +92,10 @@ xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure
|
||||
importance_matrix <- head(importance_matrix, top_n)
|
||||
}
|
||||
if (rel_to_first) {
|
||||
importance_matrix[, Importance := Importance / max(abs(Importance))]
|
||||
importance_matrix[, Importance := Importance/max(abs(Importance))]
|
||||
}
|
||||
if (is.null(cex)) {
|
||||
cex <- 2.5 / log2(1 + nrow(importance_matrix))
|
||||
cex <- 2.5/log2(1 + nrow(importance_matrix))
|
||||
}
|
||||
|
||||
if (plot) {
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
#' @param plot_height height in pixels of the graph to produce
|
||||
#' @param render a logical flag for whether the graph should be rendered (see Value).
|
||||
#' @param ... currently not used
|
||||
#'
|
||||
#'
|
||||
#' @details
|
||||
#'
|
||||
#' This function tries to capture the complexity of a gradient boosted tree model
|
||||
@@ -72,53 +72,53 @@ xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5,
|
||||
|
||||
precedent.nodes <- root.nodes
|
||||
|
||||
while (tree.matrix[, sum(is.na(abs.node.position))] > 0) {
|
||||
while(tree.matrix[,sum(is.na(abs.node.position))] > 0) {
|
||||
yes.row.nodes <- tree.matrix[abs.node.position %in% precedent.nodes & !is.na(Yes)]
|
||||
no.row.nodes <- tree.matrix[abs.node.position %in% precedent.nodes & !is.na(No)]
|
||||
yes.nodes.abs.pos <- yes.row.nodes[, abs.node.position] %>% paste0("_0")
|
||||
no.nodes.abs.pos <- no.row.nodes[, abs.node.position] %>% paste0("_1")
|
||||
|
||||
|
||||
tree.matrix[ID %in% yes.row.nodes[, Yes], abs.node.position := yes.nodes.abs.pos]
|
||||
tree.matrix[ID %in% no.row.nodes[, No], abs.node.position := no.nodes.abs.pos]
|
||||
precedent.nodes <- c(yes.nodes.abs.pos, no.nodes.abs.pos)
|
||||
}
|
||||
|
||||
|
||||
tree.matrix[!is.na(Yes), Yes := paste0(abs.node.position, "_0")]
|
||||
tree.matrix[!is.na(No), No := paste0(abs.node.position, "_1")]
|
||||
|
||||
|
||||
remove.tree <- . %>% stri_replace_first_regex(pattern = "^\\d+-", replacement = "")
|
||||
|
||||
tree.matrix[, `:=`(abs.node.position = remove.tree(abs.node.position),
|
||||
Yes = remove.tree(Yes),
|
||||
No = remove.tree(No))]
|
||||
|
||||
|
||||
tree.matrix[,`:=`(abs.node.position = remove.tree(abs.node.position),
|
||||
Yes = remove.tree(Yes),
|
||||
No = remove.tree(No))]
|
||||
|
||||
nodes.dt <- tree.matrix[
|
||||
, .(Quality = sum(Quality))
|
||||
, by = .(abs.node.position, Feature)
|
||||
][, .(Text = paste0(Feature[1:min(length(Feature), features_keep)],
|
||||
" (",
|
||||
format(Quality[1:min(length(Quality), features_keep)], digits = 5),
|
||||
format(Quality[1:min(length(Quality), features_keep)], digits=5),
|
||||
")") %>%
|
||||
paste0(collapse = "\n"))
|
||||
, by = abs.node.position]
|
||||
|
||||
|
||||
edges.dt <- tree.matrix[Feature != "Leaf", .(abs.node.position, Yes)] %>%
|
||||
list(tree.matrix[Feature != "Leaf", .(abs.node.position, No)]) %>%
|
||||
list(tree.matrix[Feature != "Leaf",.(abs.node.position, No)]) %>%
|
||||
rbindlist() %>%
|
||||
setnames(c("From", "To")) %>%
|
||||
.[, .N, .(From, To)] %>%
|
||||
.[, N := NULL]
|
||||
|
||||
.[, N:=NULL]
|
||||
|
||||
nodes <- DiagrammeR::create_node_df(
|
||||
n = nrow(nodes.dt),
|
||||
label = nodes.dt[, Text]
|
||||
label = nodes.dt[,Text]
|
||||
)
|
||||
|
||||
|
||||
edges <- DiagrammeR::create_edge_df(
|
||||
from = match(edges.dt[, From], nodes.dt[, abs.node.position]),
|
||||
to = match(edges.dt[, To], nodes.dt[, abs.node.position]),
|
||||
from = match(edges.dt[,From], nodes.dt[,abs.node.position]),
|
||||
to = match(edges.dt[,To], nodes.dt[,abs.node.position]),
|
||||
rel = "leading_to")
|
||||
|
||||
|
||||
graph <- DiagrammeR::create_graph(
|
||||
nodes_df = nodes,
|
||||
edges_df = edges,
|
||||
|
||||
@@ -125,12 +125,12 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
|
||||
nsample <- if (is.null(subsample)) min(100000, nrow(data)) else as.integer(subsample * nrow(data))
|
||||
idx <- sample(1:nrow(data), nsample)
|
||||
data <- data[idx, ]
|
||||
data <- data[idx,]
|
||||
|
||||
if (is.null(shap_contrib)) {
|
||||
shap_contrib <- predict(model, data, predcontrib = TRUE, approxcontrib = approxcontrib)
|
||||
} else {
|
||||
shap_contrib <- shap_contrib[idx, ]
|
||||
shap_contrib <- shap_contrib[idx,]
|
||||
}
|
||||
|
||||
which <- match.arg(which)
|
||||
@@ -168,8 +168,8 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
|
||||
if (plot && which == "1d") {
|
||||
op <- par(mfrow = c(ceiling(length(features) / n_col), n_col),
|
||||
oma = c(0, 0, 0, 0) + 0.2,
|
||||
mar = c(3.5, 3.5, 0, 0) + 0.1,
|
||||
oma = c(0,0,0,0) + 0.2,
|
||||
mar = c(3.5,3.5,0,0) + 0.1,
|
||||
mgp = c(1.7, 0.6, 0))
|
||||
for (f in cols) {
|
||||
ord <- order(data[, f])
|
||||
@@ -192,7 +192,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
grid()
|
||||
if (plot_loess) {
|
||||
# compress x to 3 digits, and mean-aggredate y
|
||||
zz <- data.table(x = signif(x, 3), y)[, .(.N, y = mean(y)), x]
|
||||
zz <- data.table(x = signif(x, 3), y)[, .(.N, y=mean(y)), x]
|
||||
if (nrow(zz) <= 5) {
|
||||
lines(zz$x, zz$y, col = col_loess)
|
||||
} else {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#' Plot a boosted tree model
|
||||
#'
|
||||
#' Read a tree model text dump and plot the model.
|
||||
#'
|
||||
#'
|
||||
#' Read a tree model text dump and plot the model.
|
||||
#'
|
||||
#' @param feature_names names of each feature as a \code{character} vector.
|
||||
#' @param model produced by the \code{xgb.train} function.
|
||||
#' @param trees an integer vector of tree indices that should be visualized.
|
||||
@@ -14,10 +14,10 @@
|
||||
#' @param show_node_id a logical flag for whether to show node id's in the graph.
|
||||
#' @param ... currently not used.
|
||||
#'
|
||||
#' @details
|
||||
#'
|
||||
#' @details
|
||||
#'
|
||||
#' The content of each node is organised that way:
|
||||
#'
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item Feature name.
|
||||
#' \item \code{Cover}: The sum of second order gradient of training data classified to the leaf.
|
||||
@@ -27,21 +27,21 @@
|
||||
#' \item \code{Gain} (for split nodes): the information gain metric of a split
|
||||
#' (corresponds to the importance of the node in the model).
|
||||
#' \item \code{Value} (for leafs): the margin value that the leaf may contribute to prediction.
|
||||
#' }
|
||||
#' }
|
||||
#' The tree root nodes also indicate the Tree index (0-based).
|
||||
#'
|
||||
#'
|
||||
#' The "Yes" branches are marked by the "< split_value" label.
|
||||
#' The branches that also used for missing values are marked as bold
|
||||
#' (as in "carrying extra capacity").
|
||||
#'
|
||||
#'
|
||||
#' This function uses \href{http://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR.
|
||||
#'
|
||||
#'
|
||||
#' @return
|
||||
#'
|
||||
#'
|
||||
#' When \code{render = TRUE}:
|
||||
#' returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}.
|
||||
#' Similar to ggplot objects, it needs to be printed to see it when not running from command line.
|
||||
#'
|
||||
#'
|
||||
#' When \code{render = FALSE}:
|
||||
#' silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}.
|
||||
#' This could be useful if one wants to modify some of the graph attributes
|
||||
@@ -49,23 +49,23 @@
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#'
|
||||
#'
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 3,
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#' # plot all the trees
|
||||
#' xgb.plot.tree(model = bst)
|
||||
#' # plot only the first tree and display the node ID:
|
||||
#' xgb.plot.tree(model = bst, trees = 0, show_node_id = TRUE)
|
||||
#'
|
||||
#'
|
||||
#' \dontrun{
|
||||
#' # Below is an example of how to save this plot to a file.
|
||||
#' # Below is an example of how to save this plot to a file.
|
||||
#' # Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed.
|
||||
#' library(DiagrammeR)
|
||||
#' gr <- xgb.plot.tree(model=bst, trees=0:1, render=FALSE)
|
||||
#' export_graph(gr, 'tree.pdf', width=1500, height=1900)
|
||||
#' export_graph(gr, 'tree.png', width=1500, height=1900)
|
||||
#' }
|
||||
#'
|
||||
#'
|
||||
#' @export
|
||||
xgb.plot.tree <- function(feature_names = NULL, model = NULL, trees = NULL, plot_width = NULL, plot_height = NULL,
|
||||
render = TRUE, show_node_id = FALSE, ...){
|
||||
@@ -77,18 +77,18 @@ xgb.plot.tree <- function(feature_names = NULL, model = NULL, trees = NULL, plot
|
||||
if (!requireNamespace("DiagrammeR", quietly = TRUE)) {
|
||||
stop("DiagrammeR package is required for xgb.plot.tree", call. = FALSE)
|
||||
}
|
||||
|
||||
|
||||
dt <- xgb.model.dt.tree(feature_names = feature_names, model = model, trees = trees)
|
||||
|
||||
dt[, label := paste0(Feature, "\nCover: ", Cover, ifelse(Feature == "Leaf", "\nValue: ", "\nGain: "), Quality)]
|
||||
dt[, label:= paste0(Feature, "\nCover: ", Cover, ifelse(Feature == "Leaf", "\nValue: ", "\nGain: "), Quality)]
|
||||
if (show_node_id)
|
||||
dt[, label := paste0(ID, ": ", label)]
|
||||
dt[Node == 0, label := paste0("Tree ", Tree, "\n", label)]
|
||||
dt[, shape := "rectangle"][Feature == "Leaf", shape := "oval"]
|
||||
dt[, filledcolor := "Beige"][Feature == "Leaf", filledcolor := "Khaki"]
|
||||
dt[, shape:= "rectangle"][Feature == "Leaf", shape:= "oval"]
|
||||
dt[, filledcolor:= "Beige"][Feature == "Leaf", filledcolor:= "Khaki"]
|
||||
# in order to draw the first tree on top:
|
||||
dt <- dt[order(-Tree)]
|
||||
|
||||
|
||||
nodes <- DiagrammeR::create_node_df(
|
||||
n = nrow(dt),
|
||||
ID = dt$ID,
|
||||
@@ -97,7 +97,7 @@ xgb.plot.tree <- function(feature_names = NULL, model = NULL, trees = NULL, plot
|
||||
shape = dt$shape,
|
||||
data = dt$Feature,
|
||||
fontcolor = "black")
|
||||
|
||||
|
||||
edges <- DiagrammeR::create_edge_df(
|
||||
from = match(dt[Feature != "Leaf", c(ID)] %>% rep(2), dt$ID),
|
||||
to = match(dt[Feature != "Leaf", c(Yes, No)], dt$ID),
|
||||
@@ -126,9 +126,9 @@ xgb.plot.tree <- function(feature_names = NULL, model = NULL, trees = NULL, plot
|
||||
attr_type = "edge",
|
||||
attr = c("color", "arrowsize", "arrowhead", "fontname"),
|
||||
value = c("DimGray", "1.5", "vee", "Helvetica"))
|
||||
|
||||
|
||||
if (!render) return(invisible(graph))
|
||||
|
||||
|
||||
DiagrammeR::render_graph(graph, width = plot_width, height = plot_height)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,33 +1,29 @@
|
||||
#' Save xgboost model to binary file
|
||||
#'
|
||||
#'
|
||||
#' Save xgboost model to a file in binary format.
|
||||
#'
|
||||
#'
|
||||
#' @param model model object of \code{xgb.Booster} class.
|
||||
#' @param fname name of the file to write.
|
||||
#'
|
||||
#' @details
|
||||
#' This methods allows to save a model in an xgboost-internal binary format which is universal
|
||||
#'
|
||||
#' @details
|
||||
#' This methods allows to save a model in an xgboost-internal binary format which is universal
|
||||
#' among the various xgboost interfaces. In R, the saved model file could be read-in later
|
||||
#' using either the \code{\link{xgb.load}} function or the \code{xgb_model} parameter
|
||||
#' using either the \code{\link{xgb.load}} function or the \code{xgb_model} parameter
|
||||
#' of \code{\link{xgb.train}}.
|
||||
#'
|
||||
#' Note: a model can also be saved as an R-object (e.g., by using \code{\link[base]{readRDS}}
|
||||
#' or \code{\link[base]{save}}). However, it would then only be compatible with R, and
|
||||
#' corresponding R-methods would need to be used to load it. Moreover, persisting the model with
|
||||
#' \code{\link[base]{readRDS}} or \code{\link[base]{save}}) will cause compatibility problems in
|
||||
#' future versions of XGBoost. Consult \code{\link{a-compatibility-note-for-saveRDS-save}} to learn
|
||||
#' how to persist models in a future-proof way, i.e. to make the model accessible in future
|
||||
#' releases of XGBoost.
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{xgb.load}}, \code{\link{xgb.Booster.complete}}.
|
||||
#'
|
||||
#'
|
||||
#' Note: a model can also be saved as an R-object (e.g., by using \code{\link[base]{readRDS}}
|
||||
#' or \code{\link[base]{save}}). However, it would then only be compatible with R, and
|
||||
#' corresponding R-methods would need to be used to load it.
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{xgb.load}}, \code{\link{xgb.Booster.complete}}.
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' test <- agaricus.test
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#' xgb.save(bst, 'xgb.model')
|
||||
#' bst <- xgb.load('xgb.model')
|
||||
|
||||
@@ -1,23 +1,23 @@
|
||||
#' Save xgboost model to R's raw vector,
|
||||
#' user can call xgb.load.raw to load the model back from raw vector
|
||||
#'
|
||||
#' user can call xgb.load to load the model back from raw vector
|
||||
#'
|
||||
#' Save xgboost model from xgboost or xgb.train
|
||||
#'
|
||||
#'
|
||||
#' @param model the model object.
|
||||
#'
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' test <- agaricus.test
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#' raw <- xgb.save.raw(bst)
|
||||
#' bst <- xgb.load.raw(raw)
|
||||
#' bst <- xgb.load(raw)
|
||||
#' pred <- predict(bst, test$data)
|
||||
#'
|
||||
#' @export
|
||||
xgb.save.raw <- function(model) {
|
||||
handle <- xgb.get.handle(model)
|
||||
.Call(XGBoosterModelToRaw_R, handle)
|
||||
model <- xgb.get.handle(model)
|
||||
.Call(XGBoosterModelToRaw_R, model)
|
||||
}
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
#' Serialize the booster instance into R's raw vector. The serialization method differs
|
||||
#' from \code{\link{xgb.save.raw}} as the latter one saves only the model but not
|
||||
#' parameters. This serialization format is not stable across different xgboost versions.
|
||||
#'
|
||||
#' @param booster the booster instance
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' test <- agaricus.test
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#' raw <- xgb.serialize(bst)
|
||||
#' bst <- xgb.unserialize(raw)
|
||||
#'
|
||||
#' @export
|
||||
xgb.serialize <- function(booster) {
|
||||
handle <- xgb.get.handle(booster)
|
||||
.Call(XGBoosterSerializeToBuffer_R, handle)
|
||||
}
|
||||
@@ -3,9 +3,9 @@
|
||||
#' \code{xgb.train} is an advanced interface for training an xgboost model.
|
||||
#' The \code{xgboost} function is a simpler wrapper for \code{xgb.train}.
|
||||
#'
|
||||
#' @param params the list of parameters. The complete list of parameters is
|
||||
#' available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below
|
||||
#' is a shorter summary:
|
||||
#' @param params the list of parameters.
|
||||
#' The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}.
|
||||
#' Below is a shorter summary:
|
||||
#'
|
||||
#' 1. General Parameters
|
||||
#'
|
||||
@@ -43,23 +43,13 @@
|
||||
#' \item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
|
||||
#' \itemize{
|
||||
#' \item \code{reg:squarederror} Regression with squared loss (Default).
|
||||
#' \item \code{reg:squaredlogerror}: regression with squared log loss \eqn{1/2 * (log(pred + 1) - log(label + 1))^2}. All inputs are required to be greater than -1. Also, see metric rmsle for possible issue with this objective.
|
||||
#' \item \code{reg:logistic} logistic regression.
|
||||
#' \item \code{reg:pseudohubererror}: regression with Pseudo Huber loss, a twice differentiable alternative to absolute loss.
|
||||
#' \item \code{binary:logistic} logistic regression for binary classification. Output probability.
|
||||
#' \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
|
||||
#' \item \code{binary:hinge}: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
|
||||
#' \item \code{count:poisson}: poisson regression for count data, output mean of poisson distribution. \code{max_delta_step} is set to 0.7 by default in poisson regression (used to safeguard optimization).
|
||||
#' \item \code{survival:cox}: Cox regression for right censored survival time data (negative values are considered right censored). Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional hazard function \code{h(t) = h0(t) * HR)}.
|
||||
#' \item \code{survival:aft}: Accelerated failure time model for censored survival time data. See \href{https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html}{Survival Analysis with Accelerated Failure Time} for details.
|
||||
#' \item \code{aft_loss_distribution}: Probabilty Density Function used by \code{survival:aft} and \code{aft-nloglik} metric.
|
||||
#' \item \code{num_class} set the number of classes. To use only with multiclass objectives.
|
||||
#' \item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}.
|
||||
#' \item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class.
|
||||
#' \item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
|
||||
#' \item \code{rank:ndcg}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Discounted_cumulative_gain}{Normalized Discounted Cumulative Gain (NDCG)} is maximized.
|
||||
#' \item \code{rank:map}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision}{Mean Average Precision (MAP)} is maximized.
|
||||
#' \item \code{reg:gamma}: gamma regression with log-link. Output is a mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Gamma_distribution#Applications}{gamma-distributed}.
|
||||
#' \item \code{reg:tweedie}: Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Tweedie_distribution#Applications}{Tweedie-distributed}.
|
||||
#' }
|
||||
#' \item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
|
||||
#' \item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
|
||||
@@ -277,8 +267,8 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
}
|
||||
|
||||
# evaluation printing callback
|
||||
params <- c(params)
|
||||
print_every_n <- max(as.integer(print_every_n), 1L)
|
||||
params <- c(params, list(silent = ifelse(verbose > 1, 0, 1)))
|
||||
print_every_n <- max( as.integer(print_every_n), 1L)
|
||||
if (!has.callbacks(callbacks, 'cb.print.evaluation') &&
|
||||
verbose) {
|
||||
callbacks <- add.cb(callbacks, cb.print.evaluation(print_every_n))
|
||||
@@ -301,10 +291,8 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds,
|
||||
maximize = maximize, verbose = verbose))
|
||||
}
|
||||
|
||||
# Sort the callbacks into categories
|
||||
cb <- categorize.callbacks(callbacks)
|
||||
params['validate_parameters'] <- TRUE
|
||||
if (!is.null(params[['seed']])) {
|
||||
warning("xgb.train: `seed` is ignored in R package. Use `set.seed()` instead.")
|
||||
}
|
||||
@@ -328,9 +316,12 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
niter_init <- xgb.ntree(bst) %/% (num_parallel_tree * num_class)
|
||||
}
|
||||
}
|
||||
if (is_update && nrounds > niter_init)
|
||||
if(is_update && nrounds > niter_init)
|
||||
stop("nrounds cannot be larger than ", niter_init, " (nrounds of xgb_model)")
|
||||
|
||||
# TODO: distributed code
|
||||
rank <- 0
|
||||
|
||||
niter_skip <- ifelse(is_update, 0, niter_init)
|
||||
begin_iteration <- niter_skip + 1
|
||||
end_iteration <- niter_skip + nrounds
|
||||
@@ -342,6 +333,7 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
|
||||
xgb.iter.update(bst$handle, dtrain, iteration - 1, obj)
|
||||
|
||||
bst_evaluation <- numeric(0)
|
||||
if (length(watchlist) > 0)
|
||||
bst_evaluation <- xgb.iter.eval(bst$handle, watchlist, iteration - 1, feval)
|
||||
|
||||
@@ -356,7 +348,7 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
bst <- xgb.Booster.complete(bst, saveraw = TRUE)
|
||||
|
||||
# store the total number of boosting iterations
|
||||
bst$niter <- end_iteration
|
||||
bst$niter = end_iteration
|
||||
|
||||
# store the evaluation results
|
||||
if (length(evaluation_log) > 0 &&
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
#' Load the instance back from \code{\link{xgb.serialize}}
|
||||
#'
|
||||
#' @param buffer the buffer containing booster instance saved by \code{\link{xgb.serialize}}
|
||||
#'
|
||||
#' @export
|
||||
xgb.unserialize <- function(buffer) {
|
||||
cachelist <- list()
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
tryCatch(
|
||||
.Call(XGBoosterUnserializeFromBuffer_R, handle, buffer),
|
||||
error = function(e) {
|
||||
error_msg <- conditionMessage(e)
|
||||
m <- regexec("(src[\\\\/]learner.cc:[0-9]+): Check failed: (header == serialisation_header_)",
|
||||
error_msg, perl = TRUE)
|
||||
groups <- regmatches(error_msg, m)[[1]]
|
||||
if (length(groups) == 3) {
|
||||
warning(paste("The model had been generated by XGBoost version 1.0.0 or earlier and was ",
|
||||
"loaded from a RDS file. We strongly ADVISE AGAINST using saveRDS() ",
|
||||
"function, to ensure that your model can be read in current and upcoming ",
|
||||
"XGBoost releases. Please use xgb.save() instead to preserve models for the ",
|
||||
"long term. For more details and explanation, see ",
|
||||
"https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html",
|
||||
sep = ""))
|
||||
.Call(XGBoosterLoadModelFromRaw_R, handle, buffer)
|
||||
} else {
|
||||
stop(e)
|
||||
}
|
||||
})
|
||||
class(handle) <- "xgb.Booster.handle"
|
||||
return (handle)
|
||||
}
|
||||
18
R-package/configure
vendored
18
R-package/configure
vendored
@@ -613,7 +613,6 @@ infodir
|
||||
docdir
|
||||
oldincludedir
|
||||
includedir
|
||||
runstatedir
|
||||
localstatedir
|
||||
sharedstatedir
|
||||
sysconfdir
|
||||
@@ -683,7 +682,6 @@ datadir='${datarootdir}'
|
||||
sysconfdir='${prefix}/etc'
|
||||
sharedstatedir='${prefix}/com'
|
||||
localstatedir='${prefix}/var'
|
||||
runstatedir='${localstatedir}/run'
|
||||
includedir='${prefix}/include'
|
||||
oldincludedir='/usr/include'
|
||||
docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
|
||||
@@ -936,15 +934,6 @@ do
|
||||
| -silent | --silent | --silen | --sile | --sil)
|
||||
silent=yes ;;
|
||||
|
||||
-runstatedir | --runstatedir | --runstatedi | --runstated \
|
||||
| --runstate | --runstat | --runsta | --runst | --runs \
|
||||
| --run | --ru | --r)
|
||||
ac_prev=runstatedir ;;
|
||||
-runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
|
||||
| --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
|
||||
| --run=* | --ru=* | --r=*)
|
||||
runstatedir=$ac_optarg ;;
|
||||
|
||||
-sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
|
||||
ac_prev=sbindir ;;
|
||||
-sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
|
||||
@@ -1082,7 +1071,7 @@ fi
|
||||
for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
|
||||
datadir sysconfdir sharedstatedir localstatedir includedir \
|
||||
oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
|
||||
libdir localedir mandir runstatedir
|
||||
libdir localedir mandir
|
||||
do
|
||||
eval ac_val=\$$ac_var
|
||||
# Remove trailing slashes.
|
||||
@@ -1235,7 +1224,6 @@ Fine tuning of the installation directories:
|
||||
--sysconfdir=DIR read-only single-machine data [PREFIX/etc]
|
||||
--sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
|
||||
--localstatedir=DIR modifiable single-machine data [PREFIX/var]
|
||||
--runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run]
|
||||
--libdir=DIR object code libraries [EPREFIX/lib]
|
||||
--includedir=DIR C header files [PREFIX/include]
|
||||
--oldincludedir=DIR C header files for non-gcc [/usr/include]
|
||||
@@ -2710,7 +2698,7 @@ fi
|
||||
if test `uname -s` = "Darwin"
|
||||
then
|
||||
OPENMP_CXXFLAGS='-Xclang -fopenmp'
|
||||
OPENMP_LIB='-lomp'
|
||||
OPENMP_LIB='/usr/local/lib/libomp.dylib'
|
||||
ac_pkg_openmp=no
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether OpenMP will work in a package" >&5
|
||||
$as_echo_n "checking whether OpenMP will work in a package... " >&6; }
|
||||
@@ -2725,7 +2713,7 @@ main ()
|
||||
return 0;
|
||||
}
|
||||
_ACEOF
|
||||
${CC} -o conftest conftest.c ${OPENMP_LIB} ${OPENMP_CXXFLAGS} 2>/dev/null && ./conftest && ac_pkg_openmp=yes
|
||||
${CC} -o conftest conftest.c /usr/local/lib/libomp.dylib -Xclang -fopenmp 2>/dev/null && ./conftest && ac_pkg_openmp=yes
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${ac_pkg_openmp}" >&5
|
||||
$as_echo "${ac_pkg_openmp}" >&6; }
|
||||
if test "${ac_pkg_openmp}" = no; then
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
### configure.ac -*- Autoconf -*-
|
||||
|
||||
AC_PREREQ(2.69)
|
||||
AC_PREREQ(2.62)
|
||||
|
||||
AC_INIT([xgboost],[0.6-3],[],[xgboost],[])
|
||||
|
||||
@@ -29,11 +29,11 @@ fi
|
||||
if test `uname -s` = "Darwin"
|
||||
then
|
||||
OPENMP_CXXFLAGS='-Xclang -fopenmp'
|
||||
OPENMP_LIB='-lomp'
|
||||
OPENMP_LIB='/usr/local/lib/libomp.dylib'
|
||||
ac_pkg_openmp=no
|
||||
AC_MSG_CHECKING([whether OpenMP will work in a package])
|
||||
AC_LANG_CONFTEST([AC_LANG_PROGRAM([[#include <omp.h>]], [[ return (omp_get_max_threads() <= 1); ]])])
|
||||
${CC} -o conftest conftest.c ${OPENMP_LIB} ${OPENMP_CXXFLAGS} 2>/dev/null && ./conftest && ac_pkg_openmp=yes
|
||||
${CC} -o conftest conftest.c /usr/local/lib/libomp.dylib -Xclang -fopenmp 2>/dev/null && ./conftest && ac_pkg_openmp=yes
|
||||
AC_MSG_RESULT([${ac_pkg_openmp}])
|
||||
if test "${ac_pkg_openmp}" = no; then
|
||||
OPENMP_CXXFLAGS=''
|
||||
|
||||
@@ -17,4 +17,4 @@ Benchmarks
|
||||
Notes
|
||||
====
|
||||
* Contribution of examples, benchmarks is more than welcomed!
|
||||
* If you like to share how you use xgboost to solve your problem, send a pull request :)
|
||||
* If you like to share how you use xgboost to solve your problem, send a pull request:)
|
||||
|
||||
@@ -3,8 +3,8 @@ require(methods)
|
||||
|
||||
# we load in the agaricus dataset
|
||||
# In this example, we are aiming to predict whether a mushroom is edible
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
# the loaded data is stored in sparseMatrix, and label is a numeric vector in {0,1}
|
||||
@@ -26,7 +26,7 @@ bst <- xgboost(data = as.matrix(train$data), label = train$label, max_depth = 2,
|
||||
# you can also put in xgb.DMatrix object, which stores label, data and other meta datas needed for advanced features
|
||||
print("Training xgboost with xgb.DMatrix")
|
||||
dtrain <- xgb.DMatrix(data = train$data, label = train$label)
|
||||
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, nthread = 2,
|
||||
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, nthread = 2,
|
||||
objective = "binary:logistic")
|
||||
|
||||
# Verbose = 0,1,2
|
||||
@@ -46,7 +46,7 @@ bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2,
|
||||
|
||||
#--------------------basic prediction using xgboost--------------
|
||||
# you can do prediction using the following line
|
||||
# you can put in Matrix, sparseMatrix, or xgb.DMatrix
|
||||
# you can put in Matrix, sparseMatrix, or xgb.DMatrix
|
||||
pred <- predict(bst, test$data)
|
||||
err <- mean(as.numeric(pred > 0.5) != test$label)
|
||||
print(paste("test-error=", err))
|
||||
@@ -58,31 +58,31 @@ xgb.save(bst, "xgboost.model")
|
||||
bst2 <- xgb.load("xgboost.model")
|
||||
pred2 <- predict(bst2, test$data)
|
||||
# pred2 should be identical to pred
|
||||
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2 - pred))))
|
||||
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2-pred))))
|
||||
|
||||
# save model to R's raw vector
|
||||
raw <- xgb.save.raw(bst)
|
||||
raw = xgb.save.raw(bst)
|
||||
# load binary model to R
|
||||
bst3 <- xgb.load(raw)
|
||||
pred3 <- predict(bst3, test$data)
|
||||
# pred3 should be identical to pred
|
||||
print(paste("sum(abs(pred3-pred))=", sum(abs(pred3 - pred))))
|
||||
print(paste("sum(abs(pred3-pred))=", sum(abs(pred3-pred))))
|
||||
|
||||
#----------------Advanced features --------------
|
||||
# to use advanced features, we need to put data in xgb.DMatrix
|
||||
dtrain <- xgb.DMatrix(data = train$data, label = train$label)
|
||||
dtest <- xgb.DMatrix(data = test$data, label = test$label)
|
||||
dtrain <- xgb.DMatrix(data = train$data, label=train$label)
|
||||
dtest <- xgb.DMatrix(data = test$data, label=test$label)
|
||||
#---------------Using watchlist----------------
|
||||
# watchlist is a list of xgb.DMatrix, each of them is tagged with name
|
||||
watchlist <- list(train = dtrain, test = dtest)
|
||||
watchlist <- list(train=dtrain, test=dtest)
|
||||
# to train with watchlist, use xgb.train, which contains more advanced features
|
||||
# watchlist allows us to monitor the evaluation result on all data in the list
|
||||
# watchlist allows us to monitor the evaluation result on all data in the list
|
||||
print("Train xgboost using xgb.train with watchlist")
|
||||
bst <- xgb.train(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, watchlist = watchlist,
|
||||
bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nrounds=2, watchlist=watchlist,
|
||||
nthread = 2, objective = "binary:logistic")
|
||||
# we can change evaluation metrics, or use multiple evaluation metrics
|
||||
print("train xgboost using xgb.train with watchlist, watch logloss and error")
|
||||
bst <- xgb.train(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, watchlist = watchlist,
|
||||
bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nrounds=2, watchlist=watchlist,
|
||||
eval_metric = "error", eval_metric = "logloss",
|
||||
nthread = 2, objective = "binary:logistic")
|
||||
|
||||
@@ -90,17 +90,17 @@ bst <- xgb.train(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, watchlist =
|
||||
xgb.DMatrix.save(dtrain, "dtrain.buffer")
|
||||
# to load it in, simply call xgb.DMatrix
|
||||
dtrain2 <- xgb.DMatrix("dtrain.buffer")
|
||||
bst <- xgb.train(data = dtrain2, max_depth = 2, eta = 1, nrounds = 2, watchlist = watchlist,
|
||||
bst <- xgb.train(data=dtrain2, max_depth=2, eta=1, nrounds=2, watchlist=watchlist,
|
||||
nthread = 2, objective = "binary:logistic")
|
||||
# information can be extracted from xgb.DMatrix using getinfo
|
||||
label <- getinfo(dtest, "label")
|
||||
label = getinfo(dtest, "label")
|
||||
pred <- predict(bst, dtest)
|
||||
err <- as.numeric(sum(as.integer(pred > 0.5) != label)) / length(label)
|
||||
err <- as.numeric(sum(as.integer(pred > 0.5) != label))/length(label)
|
||||
print(paste("test-error=", err))
|
||||
|
||||
# You can dump the tree you learned using xgb.dump into a text file
|
||||
dump_path <- file.path(tempdir(), 'dump.raw.txt')
|
||||
xgb.dump(bst, dump_path, with_stats = TRUE)
|
||||
dump_path = file.path(tempdir(), 'dump.raw.txt')
|
||||
xgb.dump(bst, dump_path, with_stats = T)
|
||||
|
||||
# Finally, you can check which features are the most important.
|
||||
print("Most important features (look at column Gain):")
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
@@ -11,12 +11,12 @@ watchlist <- list(eval = dtest, train = dtrain)
|
||||
#
|
||||
print('start running example to start from a initial prediction')
|
||||
# train xgboost for 1 round
|
||||
param <- list(max_depth = 2, eta = 1, nthread = 2, objective = 'binary:logistic')
|
||||
param <- list(max_depth=2, eta=1, nthread = 2, silent=1, objective='binary:logistic')
|
||||
bst <- xgb.train(param, dtrain, 1, watchlist)
|
||||
# Note: we need the margin value instead of transformed prediction in set_base_margin
|
||||
# do predict with output_margin=TRUE, will always give you margin values before logistic transformation
|
||||
ptrain <- predict(bst, dtrain, outputmargin = TRUE)
|
||||
ptest <- predict(bst, dtest, outputmargin = TRUE)
|
||||
ptrain <- predict(bst, dtrain, outputmargin=TRUE)
|
||||
ptest <- predict(bst, dtest, outputmargin=TRUE)
|
||||
# set the base_margin property of dtrain and dtest
|
||||
# base margin is the base prediction we will boost from
|
||||
setinfo(dtrain, "base_margin", ptrain)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# install development version of caret library that contains xgboost models
|
||||
devtools::install_github("topepo/caret/pkg/caret")
|
||||
devtools::install_github("topepo/caret/pkg/caret")
|
||||
require(caret)
|
||||
require(xgboost)
|
||||
require(data.table)
|
||||
@@ -9,17 +9,17 @@ require(e1071)
|
||||
# Load Arthritis dataset in memory.
|
||||
data(Arthritis)
|
||||
# Create a copy of the dataset with data.table package (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent and its performance are really good).
|
||||
df <- data.table(Arthritis, keep.rownames = FALSE)
|
||||
df <- data.table(Arthritis, keep.rownames = F)
|
||||
|
||||
# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features.
|
||||
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independant values.
|
||||
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
|
||||
df[,AgeDiscret:= as.factor(round(Age/10,0))]
|
||||
|
||||
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!).
|
||||
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
|
||||
df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))]
|
||||
|
||||
# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small).
|
||||
df[, ID := NULL]
|
||||
df[,ID:=NULL]
|
||||
|
||||
#-------------Basic Training using XGBoost in caret Library-----------------
|
||||
# Set up control parameters for caret::train
|
||||
|
||||
@@ -6,10 +6,10 @@ if (!require(vcd)) {
|
||||
require(vcd)
|
||||
}
|
||||
# According to its documentation, Xgboost works only on numbers.
|
||||
# Sometimes the dataset we have to work on have categorical data.
|
||||
# Sometimes the dataset we have to work on have categorical data.
|
||||
# A categorical variable is one which have a fixed number of values. By example, if for each observation a variable called "Colour" can have only "red", "blue" or "green" as value, it is a categorical variable.
|
||||
#
|
||||
# In R, categorical variable is called Factor.
|
||||
# In R, categorical variable is called Factor.
|
||||
# Type ?factor in console for more information.
|
||||
#
|
||||
# In this demo we will see how to transform a dense dataframe with categorical variables to a sparse matrix before analyzing it in Xgboost.
|
||||
@@ -19,7 +19,7 @@ if (!require(vcd)) {
|
||||
data(Arthritis)
|
||||
|
||||
# create a copy of the dataset with data.table package (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent and its performance are really good).
|
||||
df <- data.table(Arthritis, keep.rownames = FALSE)
|
||||
df <- data.table(Arthritis, keep.rownames = F)
|
||||
|
||||
# Let's have a look to the data.table
|
||||
cat("Print the dataset\n")
|
||||
@@ -32,17 +32,17 @@ str(df)
|
||||
# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features.
|
||||
|
||||
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independant values.
|
||||
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
|
||||
df[,AgeDiscret:= as.factor(round(Age/10,0))]
|
||||
|
||||
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!).
|
||||
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
|
||||
df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))]
|
||||
|
||||
# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small).
|
||||
df[, ID := NULL]
|
||||
df[,ID:=NULL]
|
||||
|
||||
# List the different values for the column Treatment: Placebo, Treated.
|
||||
cat("Values of the categorical feature Treatment\n")
|
||||
print(levels(df[, Treatment]))
|
||||
print(levels(df[,Treatment]))
|
||||
|
||||
# Next step, we will transform the categorical data to dummy variables.
|
||||
# This method is also called one hot encoding.
|
||||
@@ -52,16 +52,16 @@ print(levels(df[, Treatment]))
|
||||
#
|
||||
# Formulae Improved~.-1 used below means transform all categorical features but column Improved to binary values.
|
||||
# Column Improved is excluded because it will be our output column, the one we want to predict.
|
||||
sparse_matrix <- sparse.model.matrix(Improved ~ . - 1, data = df)
|
||||
sparse_matrix = sparse.model.matrix(Improved~.-1, data = df)
|
||||
|
||||
cat("Encoding of the sparse Matrix\n")
|
||||
print(sparse_matrix)
|
||||
|
||||
# Create the output vector (not sparse)
|
||||
# 1. Set, for all rows, field in Y column to 0;
|
||||
# 2. set Y to 1 when Improved == Marked;
|
||||
# 1. Set, for all rows, field in Y column to 0;
|
||||
# 2. set Y to 1 when Improved == Marked;
|
||||
# 3. Return Y column
|
||||
output_vector <- df[, Y := 0][Improved == "Marked", Y := 1][, Y]
|
||||
output_vector = df[,Y:=0][Improved == "Marked",Y:=1][,Y]
|
||||
|
||||
# Following is the same process as other demo
|
||||
cat("Learning...\n")
|
||||
|
||||
@@ -1,25 +1,25 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
nrounds <- 2
|
||||
param <- list(max_depth = 2, eta = 1, nthread = 2, objective = 'binary:logistic')
|
||||
param <- list(max_depth=2, eta=1, silent=1, nthread=2, objective='binary:logistic')
|
||||
|
||||
cat('running cross validation\n')
|
||||
# do cross validation, this will print result out as
|
||||
# [iteration] metric_name:mean_value+std_value
|
||||
# std_value is standard deviation of the metric
|
||||
xgb.cv(param, dtrain, nrounds, nfold = 5, metrics = {'error'})
|
||||
xgb.cv(param, dtrain, nrounds, nfold=5, metrics={'error'})
|
||||
|
||||
cat('running cross validation, disable standard deviation display\n')
|
||||
# do cross validation, this will print result out as
|
||||
# [iteration] metric_name:mean_value+std_value
|
||||
# std_value is standard deviation of the metric
|
||||
xgb.cv(param, dtrain, nrounds, nfold = 5,
|
||||
metrics = 'error', showsd = FALSE)
|
||||
xgb.cv(param, dtrain, nrounds, nfold=5,
|
||||
metrics='error', showsd = FALSE)
|
||||
|
||||
###
|
||||
# you can also do cross validation with cutomized loss function
|
||||
@@ -29,18 +29,18 @@ print ('running cross validation, with cutomsized loss function')
|
||||
|
||||
logregobj <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
preds <- 1 / (1 + exp(-preds))
|
||||
preds <- 1/(1 + exp(-preds))
|
||||
grad <- preds - labels
|
||||
hess <- preds * (1 - preds)
|
||||
return(list(grad = grad, hess = hess))
|
||||
}
|
||||
evalerror <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
|
||||
err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
|
||||
return(list(metric = "error", value = err))
|
||||
}
|
||||
|
||||
param <- list(max_depth = 2, eta = 1,
|
||||
param <- list(max_depth=2, eta=1, silent=1,
|
||||
objective = logregobj, eval_metric = evalerror)
|
||||
# train with customized objective
|
||||
xgb.cv(params = param, data = dtrain, nrounds = nrounds, nfold = 5)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
@@ -15,7 +15,7 @@ num_round <- 2
|
||||
# this is loglikelihood loss
|
||||
logregobj <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
preds <- 1 / (1 + exp(-preds))
|
||||
preds <- 1/(1 + exp(-preds))
|
||||
grad <- preds - labels
|
||||
hess <- preds * (1 - preds)
|
||||
return(list(grad = grad, hess = hess))
|
||||
@@ -29,36 +29,36 @@ logregobj <- function(preds, dtrain) {
|
||||
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
|
||||
evalerror <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
|
||||
err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
|
||||
return(list(metric = "error", value = err))
|
||||
}
|
||||
|
||||
param <- list(max_depth = 2, eta = 1, nthread = 2, verbosity = 0,
|
||||
objective = logregobj, eval_metric = evalerror)
|
||||
param <- list(max_depth=2, eta=1, nthread = 2, verbosity=0,
|
||||
objective=logregobj, eval_metric=evalerror)
|
||||
print ('start training with user customized objective')
|
||||
# training with customized objective, we can also do step by step training
|
||||
# simply look at xgboost.py's implementation of train
|
||||
bst <- xgb.train(param, dtrain, num_round, watchlist)
|
||||
|
||||
#
|
||||
# there can be cases where you want additional information
|
||||
# there can be cases where you want additional information
|
||||
# being considered besides the property of DMatrix you can get by getinfo
|
||||
# you can set additional information as attributes if DMatrix
|
||||
|
||||
# set label attribute of dtrain to be label, we use label as an example, it can be anything
|
||||
# set label attribute of dtrain to be label, we use label as an example, it can be anything
|
||||
attr(dtrain, 'label') <- getinfo(dtrain, 'label')
|
||||
# this is new customized objective, where you can access things you set
|
||||
# same thing applies to customized evaluation function
|
||||
logregobjattr <- function(preds, dtrain) {
|
||||
# now you can access the attribute in customized function
|
||||
labels <- attr(dtrain, 'label')
|
||||
preds <- 1 / (1 + exp(-preds))
|
||||
preds <- 1/(1 + exp(-preds))
|
||||
grad <- preds - labels
|
||||
hess <- preds * (1 - preds)
|
||||
return(list(grad = grad, hess = hess))
|
||||
}
|
||||
param <- list(max_depth = 2, eta = 1, nthread = 2, verbosity = 0,
|
||||
objective = logregobjattr, eval_metric = evalerror)
|
||||
param <- list(max_depth=2, eta=1, nthread = 2, verbosity=0,
|
||||
objective=logregobjattr, eval_metric=evalerror)
|
||||
print ('start training with user customized objective, with additional attributes in DMatrix')
|
||||
# training with customized objective, we can also do step by step training
|
||||
# simply look at xgboost.py's implementation of train
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
# note: for customized objective function, we leave objective as default
|
||||
# note: what we are getting is margin value in prediction
|
||||
# you must know what you are doing
|
||||
param <- list(max_depth = 2, eta = 1, nthread = 2, verbosity = 0)
|
||||
param <- list(max_depth=2, eta=1, nthread=2, verbosity=0)
|
||||
watchlist <- list(eval = dtest)
|
||||
num_round <- 20
|
||||
# user define objective function, given prediction, return gradient and second order gradient
|
||||
# this is loglikelihood loss
|
||||
logregobj <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
preds <- 1 / (1 + exp(-preds))
|
||||
preds <- 1/(1 + exp(-preds))
|
||||
grad <- preds - labels
|
||||
hess <- preds * (1 - preds)
|
||||
return(list(grad = grad, hess = hess))
|
||||
@@ -27,7 +27,7 @@ logregobj <- function(preds, dtrain) {
|
||||
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
|
||||
evalerror <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
|
||||
err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
|
||||
return(list(metric = "error", value = err))
|
||||
}
|
||||
print ('start training with early Stopping setting')
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
##
|
||||
@@ -11,14 +11,14 @@ dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
##
|
||||
|
||||
# change booster to gblinear, so that we are fitting a linear model
|
||||
# alpha is the L1 regularizer
|
||||
# alpha is the L1 regularizer
|
||||
# lambda is the L2 regularizer
|
||||
# you can also set lambda_bias which is L2 regularizer on the bias term
|
||||
param <- list(objective = "binary:logistic", booster = "gblinear",
|
||||
nthread = 2, alpha = 0.0001, lambda = 1)
|
||||
|
||||
# normally, you do not need to set eta (step_size)
|
||||
# XGBoost uses a parallel coordinate descent algorithm (shotgun),
|
||||
# XGBoost uses a parallel coordinate descent algorithm (shotgun),
|
||||
# there could be affection on convergence with parallelization on certain cases
|
||||
# setting eta to be smaller value, e.g 0.5 can make the optimization more stable
|
||||
|
||||
@@ -30,4 +30,5 @@ num_round <- 2
|
||||
bst <- xgb.train(param, dtrain, num_round, watchlist)
|
||||
ypred <- predict(bst, dtest)
|
||||
labels <- getinfo(dtest, 'label')
|
||||
cat('error of preds=', mean(as.numeric(ypred > 0.5) != labels), '\n')
|
||||
cat('error of preds=', mean(as.numeric(ypred>0.5)!=labels),'\n')
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
# An example of using GPU-accelerated tree building algorithms
|
||||
#
|
||||
# NOTE: it can only run if you have a CUDA-enable GPU and the package was
|
||||
#
|
||||
# NOTE: it can only run if you have a CUDA-enable GPU and the package was
|
||||
# specially compiled with GPU support.
|
||||
#
|
||||
# For the current functionality, see
|
||||
# For the current functionality, see
|
||||
# https://xgboost.readthedocs.io/en/latest/gpu/index.html
|
||||
#
|
||||
|
||||
@@ -21,8 +21,8 @@ m <- X[, sel] %*% betas - 1 + rnorm(N)
|
||||
y <- rbinom(N, 1, plogis(m))
|
||||
|
||||
tr <- sample.int(N, N * 0.75)
|
||||
dtrain <- xgb.DMatrix(X[tr, ], label = y[tr])
|
||||
dtest <- xgb.DMatrix(X[-tr, ], label = y[-tr])
|
||||
dtrain <- xgb.DMatrix(X[tr,], label = y[tr])
|
||||
dtest <- xgb.DMatrix(X[-tr,], label = y[-tr])
|
||||
wl <- list(train = dtrain, test = dtest)
|
||||
|
||||
# An example of running 'gpu_hist' algorithm
|
||||
|
||||
@@ -4,38 +4,33 @@ library(data.table)
|
||||
set.seed(1024)
|
||||
|
||||
# Function to obtain a list of interactions fitted in trees, requires input of maximum depth
|
||||
treeInteractions <- function(input_tree, input_max_depth) {
|
||||
ID_merge <- i.id <- i.feature <- NULL # Suppress warning "no visible binding for global variable"
|
||||
|
||||
trees <- data.table::copy(input_tree) # copy tree input to prevent overwriting
|
||||
treeInteractions <- function(input_tree, input_max_depth){
|
||||
trees <- copy(input_tree) # copy tree input to prevent overwriting
|
||||
if (input_max_depth < 2) return(list()) # no interactions if max depth < 2
|
||||
if (nrow(input_tree) == 1) return(list())
|
||||
|
||||
# Attach parent nodes
|
||||
for (i in 2:input_max_depth) {
|
||||
if (i == 2) trees[, ID_merge := ID] else trees[, ID_merge := get(paste0('parent_', i - 2))]
|
||||
parents_left <- trees[!is.na(Split), list(i.id = ID, i.feature = Feature, ID_merge = Yes)]
|
||||
parents_right <- trees[!is.na(Split), list(i.id = ID, i.feature = Feature, ID_merge = No)]
|
||||
for (i in 2:input_max_depth){
|
||||
if (i == 2) trees[, ID_merge:=ID] else trees[, ID_merge:=get(paste0('parent_',i-2))]
|
||||
parents_left <- trees[!is.na(Split), list(i.id=ID, i.feature=Feature, ID_merge=Yes)]
|
||||
parents_right <- trees[!is.na(Split), list(i.id=ID, i.feature=Feature, ID_merge=No)]
|
||||
|
||||
data.table::setorderv(trees, 'ID_merge')
|
||||
data.table::setorderv(parents_left, 'ID_merge')
|
||||
data.table::setorderv(parents_right, 'ID_merge')
|
||||
setorderv(trees, 'ID_merge')
|
||||
setorderv(parents_left, 'ID_merge')
|
||||
setorderv(parents_right, 'ID_merge')
|
||||
|
||||
trees <- merge(trees, parents_left, by = 'ID_merge', all.x = TRUE)
|
||||
trees[!is.na(i.id), c(paste0('parent_', i - 1), paste0('parent_feat_', i - 1))
|
||||
:= list(i.id, i.feature)]
|
||||
trees[, c('i.id', 'i.feature') := NULL]
|
||||
trees <- merge(trees, parents_left, by='ID_merge', all.x=T)
|
||||
trees[!is.na(i.id), c(paste0('parent_', i-1), paste0('parent_feat_', i-1)):=list(i.id, i.feature)]
|
||||
trees[, c('i.id','i.feature'):=NULL]
|
||||
|
||||
trees <- merge(trees, parents_right, by = 'ID_merge', all.x = TRUE)
|
||||
trees[!is.na(i.id), c(paste0('parent_', i - 1), paste0('parent_feat_', i - 1))
|
||||
:= list(i.id, i.feature)]
|
||||
trees[, c('i.id', 'i.feature') := NULL]
|
||||
trees <- merge(trees, parents_right, by='ID_merge', all.x=T)
|
||||
trees[!is.na(i.id), c(paste0('parent_', i-1), paste0('parent_feat_', i-1)):=list(i.id, i.feature)]
|
||||
trees[, c('i.id','i.feature'):=NULL]
|
||||
}
|
||||
|
||||
# Extract nodes with interactions
|
||||
interaction_trees <- trees[!is.na(Split) & !is.na(parent_1),
|
||||
c('Feature', paste0('parent_feat_', 1:(input_max_depth - 1))),
|
||||
with = FALSE]
|
||||
interaction_trees <- trees[!is.na(Split) & !is.na(parent_1),
|
||||
c('Feature',paste0('parent_feat_',1:(input_max_depth-1))), with=F]
|
||||
interaction_trees_split <- split(interaction_trees, 1:nrow(interaction_trees))
|
||||
interaction_list <- lapply(interaction_trees_split, as.character)
|
||||
|
||||
@@ -52,62 +47,59 @@ treeInteractions <- function(input_tree, input_max_depth) {
|
||||
|
||||
# Generate sample data
|
||||
x <- list()
|
||||
for (i in 1:10) {
|
||||
x[[i]] <- i * rnorm(1000, 10)
|
||||
for (i in 1:10){
|
||||
x[[i]] = i*rnorm(1000, 10)
|
||||
}
|
||||
x <- as.data.table(x)
|
||||
|
||||
y <- -1 * x[, rowSums(.SD)] + x[['V1']] * x[['V2']] + x[['V3']] * x[['V4']] * x[['V5']]
|
||||
+ rnorm(1000, 0.001) + 3 * sin(x[['V7']])
|
||||
y = -1*x[, rowSums(.SD)] + x[['V1']]*x[['V2']] + x[['V3']]*x[['V4']]*x[['V5']] + rnorm(1000, 0.001) + 3*sin(x[['V7']])
|
||||
|
||||
train <- as.matrix(x)
|
||||
train = as.matrix(x)
|
||||
|
||||
# Interaction constraint list (column names form)
|
||||
interaction_list <- list(c('V1', 'V2'), c('V3', 'V4', 'V5'))
|
||||
interaction_list <- list(c('V1','V2'),c('V3','V4','V5'))
|
||||
|
||||
# Convert interaction constraint list into feature index form
|
||||
cols2ids <- function(object, col_names) {
|
||||
LUT <- seq_along(col_names) - 1
|
||||
names(LUT) <- col_names
|
||||
rapply(object, function(x) LUT[x], classes = "character", how = "replace")
|
||||
rapply(object, function(x) LUT[x], classes="character", how="replace")
|
||||
}
|
||||
interaction_list_fid <- cols2ids(interaction_list, colnames(train))
|
||||
interaction_list_fid = cols2ids(interaction_list, colnames(train))
|
||||
|
||||
# Fit model with interaction constraints
|
||||
bst <- xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000,
|
||||
interaction_constraints = interaction_list_fid)
|
||||
bst = xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000,
|
||||
interaction_constraints = interaction_list_fid)
|
||||
|
||||
bst_tree <- xgb.model.dt.tree(colnames(train), bst)
|
||||
bst_interactions <- treeInteractions(bst_tree, 4)
|
||||
# interactions constrained to combinations of V1*V2 and V3*V4*V5
|
||||
bst_interactions <- treeInteractions(bst_tree, 4) # interactions constrained to combinations of V1*V2 and V3*V4*V5
|
||||
|
||||
# Fit model without interaction constraints
|
||||
bst2 <- xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000)
|
||||
bst2 = xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000)
|
||||
|
||||
bst2_tree <- xgb.model.dt.tree(colnames(train), bst2)
|
||||
bst2_interactions <- treeInteractions(bst2_tree, 4) # much more interactions
|
||||
|
||||
# Fit model with both interaction and monotonicity constraints
|
||||
bst3 <- xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000,
|
||||
interaction_constraints = interaction_list_fid,
|
||||
monotone_constraints = c(-1, 0, 0, 0, 0, 0, 0, 0, 0, 0))
|
||||
bst3 = xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000,
|
||||
interaction_constraints = interaction_list_fid,
|
||||
monotone_constraints = c(-1,0,0,0,0,0,0,0,0,0))
|
||||
|
||||
bst3_tree <- xgb.model.dt.tree(colnames(train), bst3)
|
||||
bst3_interactions <- treeInteractions(bst3_tree, 4)
|
||||
# interactions still constrained to combinations of V1*V2 and V3*V4*V5
|
||||
bst3_interactions <- treeInteractions(bst3_tree, 4) # interactions still constrained to combinations of V1*V2 and V3*V4*V5
|
||||
|
||||
# Show monotonic constraints still apply by checking scores after incrementing V1
|
||||
x1 <- sort(unique(x[['V1']]))
|
||||
for (i in 1:length(x1)){
|
||||
testdata <- copy(x[, -c('V1')])
|
||||
testdata[['V1']] <- x1[i]
|
||||
testdata <- testdata[, paste0('V', 1:10), with = FALSE]
|
||||
testdata <- testdata[, paste0('V',1:10), with=F]
|
||||
pred <- predict(bst3, as.matrix(testdata))
|
||||
|
||||
|
||||
# Should not print out anything due to monotonic constraints
|
||||
if (i > 1) if (any(pred > prev_pred)) print(i)
|
||||
prev_pred <- pred
|
||||
prev_pred <- pred
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
data(mtcars)
|
||||
head(mtcars)
|
||||
bst <- xgboost(data = as.matrix(mtcars[, -11]), label = mtcars[, 11],
|
||||
objective = 'count:poisson', nrounds = 5)
|
||||
pred <- predict(bst, as.matrix(mtcars[, -11]))
|
||||
sqrt(mean((pred - mtcars[, 11]) ^ 2))
|
||||
bst = xgboost(data=as.matrix(mtcars[,-11]),label=mtcars[,11],
|
||||
objective='count:poisson',nrounds=5)
|
||||
pred = predict(bst,as.matrix(mtcars[,-11]))
|
||||
sqrt(mean((pred-mtcars[,11])^2))
|
||||
|
||||
|
||||
@@ -1,23 +1,23 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
param <- list(max_depth = 2, eta = 1, objective = 'binary:logistic')
|
||||
param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic')
|
||||
watchlist <- list(eval = dtest, train = dtrain)
|
||||
nrounds <- 2
|
||||
nrounds = 2
|
||||
|
||||
# training the model for two rounds
|
||||
bst <- xgb.train(param, dtrain, nrounds, nthread = 2, watchlist)
|
||||
bst = xgb.train(param, dtrain, nrounds, nthread = 2, watchlist)
|
||||
cat('start testing prediction from first n trees\n')
|
||||
labels <- getinfo(dtest, 'label')
|
||||
labels <- getinfo(dtest,'label')
|
||||
|
||||
### predict using first 1 tree
|
||||
ypred1 <- predict(bst, dtest, ntreelimit = 1)
|
||||
ypred1 = predict(bst, dtest, ntreelimit=1)
|
||||
# by default, we predict using all the trees
|
||||
ypred2 <- predict(bst, dtest)
|
||||
ypred2 = predict(bst, dtest)
|
||||
|
||||
cat('error of ypred1=', mean(as.numeric(ypred1 > 0.5) != labels), '\n')
|
||||
cat('error of ypred2=', mean(as.numeric(ypred2 > 0.5) != labels), '\n')
|
||||
cat('error of ypred1=', mean(as.numeric(ypred1>0.5)!=labels),'\n')
|
||||
cat('error of ypred2=', mean(as.numeric(ypred2>0.5)!=labels),'\n')
|
||||
|
||||
@@ -5,34 +5,34 @@ require(Matrix)
|
||||
set.seed(1982)
|
||||
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
param <- list(max_depth = 2, eta = 1, objective = 'binary:logistic')
|
||||
nrounds <- 4
|
||||
param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic')
|
||||
nrounds = 4
|
||||
|
||||
# training the model for two rounds
|
||||
bst <- xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
|
||||
bst = xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
|
||||
|
||||
# Model accuracy without new features
|
||||
accuracy.before <- (sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label)
|
||||
/ length(agaricus.test$label))
|
||||
accuracy.before <- sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label) / length(agaricus.test$label)
|
||||
|
||||
# by default, we predict using all the trees
|
||||
pred_with_leaf <- predict(bst, dtest, predleaf = TRUE)
|
||||
|
||||
pred_with_leaf = predict(bst, dtest, predleaf = TRUE)
|
||||
head(pred_with_leaf)
|
||||
|
||||
create.new.tree.features <- function(model, original.features){
|
||||
pred_with_leaf <- predict(model, original.features, predleaf = TRUE)
|
||||
cols <- list()
|
||||
for (i in 1:model$niter) {
|
||||
for(i in 1:model$niter){
|
||||
# max is not the real max but it s not important for the purpose of adding features
|
||||
leaf.id <- sort(unique(pred_with_leaf[, i]))
|
||||
cols[[i]] <- factor(x = pred_with_leaf[, i], level = leaf.id)
|
||||
leaf.id <- sort(unique(pred_with_leaf[,i]))
|
||||
cols[[i]] <- factor(x = pred_with_leaf[,i], level = leaf.id)
|
||||
}
|
||||
cbind(original.features, sparse.model.matrix(~ . - 1, as.data.frame(cols)))
|
||||
cbind(original.features, sparse.model.matrix( ~ . -1, as.data.frame(cols)))
|
||||
}
|
||||
|
||||
# Convert previous features to one hot encoding
|
||||
@@ -47,9 +47,7 @@ watchlist <- list(train = new.dtrain)
|
||||
bst <- xgb.train(params = param, data = new.dtrain, nrounds = nrounds, nthread = 2)
|
||||
|
||||
# Model accuracy with new features
|
||||
accuracy.after <- (sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label)
|
||||
/ length(agaricus.test$label))
|
||||
accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) / length(agaricus.test$label)
|
||||
|
||||
# Here the accuracy was already good and is now perfect.
|
||||
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now",
|
||||
accuracy.after, "!\n"))
|
||||
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now", accuracy.after, "!\n"))
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
# running all scripts in demo folder
|
||||
demo(basic_walkthrough, package = 'xgboost')
|
||||
demo(custom_objective, package = 'xgboost')
|
||||
demo(boost_from_prediction, package = 'xgboost')
|
||||
demo(predict_first_ntree, package = 'xgboost')
|
||||
demo(generalized_linear_model, package = 'xgboost')
|
||||
demo(cross_validation, package = 'xgboost')
|
||||
demo(create_sparse_matrix, package = 'xgboost')
|
||||
demo(predict_leaf_indices, package = 'xgboost')
|
||||
demo(early_stopping, package = 'xgboost')
|
||||
demo(poisson_regression, package = 'xgboost')
|
||||
demo(caret_wrapper, package = 'xgboost')
|
||||
demo(tweedie_regression, package = 'xgboost')
|
||||
#demo(gpu_accelerated, package = 'xgboost') # can only run when built with GPU support
|
||||
demo(basic_walkthrough)
|
||||
demo(custom_objective)
|
||||
demo(boost_from_prediction)
|
||||
demo(predict_first_ntree)
|
||||
demo(generalized_linear_model)
|
||||
demo(cross_validation)
|
||||
demo(create_sparse_matrix)
|
||||
demo(predict_leaf_indices)
|
||||
demo(early_stopping)
|
||||
demo(poisson_regression)
|
||||
demo(caret_wrapper)
|
||||
demo(tweedie_regression)
|
||||
#demo(gpu_accelerated) # can only run when built with GPU support
|
||||
20
R-package/demo/tweedie_regression.R
Normal file → Executable file
20
R-package/demo/tweedie_regression.R
Normal file → Executable file
@@ -8,12 +8,12 @@ data(AutoClaim)
|
||||
dt <- data.table(AutoClaim)
|
||||
|
||||
# exclude these columns from the model matrix
|
||||
exclude <- c('POLICYNO', 'PLCYDATE', 'CLM_FREQ5', 'CLM_AMT5', 'CLM_FLAG', 'IN_YY')
|
||||
exclude <- c('POLICYNO', 'PLCYDATE', 'CLM_FREQ5', 'CLM_AMT5', 'CLM_FLAG', 'IN_YY')
|
||||
|
||||
# retains the missing values
|
||||
# NOTE: this dataset is comes ready out of the box
|
||||
options(na.action = 'na.pass')
|
||||
x <- sparse.model.matrix(~ . - 1, data = dt[, -exclude, with = FALSE])
|
||||
x <- sparse.model.matrix(~ . - 1, data = dt[, -exclude, with = F])
|
||||
options(na.action = 'na.omit')
|
||||
|
||||
# response
|
||||
@@ -21,29 +21,29 @@ y <- dt[, CLM_AMT5]
|
||||
|
||||
d_train <- xgb.DMatrix(data = x, label = y, missing = NA)
|
||||
|
||||
# the tweedie_variance_power parameter determines the shape of
|
||||
# the tweedie_variance_power parameter determines the shape of
|
||||
# distribution
|
||||
# - closer to 1 is more poisson like and the mass
|
||||
# is more concentrated near zero
|
||||
# - closer to 2 is more gamma like and the mass spreads to the
|
||||
# is more concentrated near zero
|
||||
# - closer to 2 is more gamma like and the mass spreads to the
|
||||
# the right with less concentration near zero
|
||||
|
||||
params <- list(
|
||||
objective = 'reg:tweedie',
|
||||
eval_metric = 'rmse',
|
||||
eval_metric = 'rmse',
|
||||
tweedie_variance_power = 1.4,
|
||||
max_depth = 6,
|
||||
eta = 1)
|
||||
|
||||
bst <- xgb.train(
|
||||
data = d_train,
|
||||
params = params,
|
||||
data = d_train,
|
||||
params = params,
|
||||
maximize = FALSE,
|
||||
watchlist = list(train = d_train),
|
||||
watchlist = list(train = d_train),
|
||||
nrounds = 20)
|
||||
|
||||
var_imp <- xgb.importance(attr(x, 'Dimnames')[[2]], model = bst)
|
||||
|
||||
preds <- predict(bst, d_train)
|
||||
|
||||
rmse <- sqrt(sum(mean((y - preds) ^ 2)))
|
||||
rmse <- sqrt(sum(mean((y - preds)^2)))
|
||||
@@ -1,96 +0,0 @@
|
||||
# [description]
|
||||
# Create a definition file (.def) from a .dll file, using objdump. This
|
||||
# is used by FindLibR.cmake when building the R package with MSVC.
|
||||
#
|
||||
# [usage]
|
||||
#
|
||||
# Rscript make-r-def.R something.dll something.def
|
||||
#
|
||||
# [references]
|
||||
# * https://www.cs.colorado.edu/~main/cs1300/doc/mingwfaq.html
|
||||
|
||||
args <- commandArgs(trailingOnly = TRUE)
|
||||
|
||||
IN_DLL_FILE <- args[[1L]]
|
||||
OUT_DEF_FILE <- args[[2L]]
|
||||
DLL_BASE_NAME <- basename(IN_DLL_FILE)
|
||||
|
||||
message(sprintf("Creating '%s' from '%s'", OUT_DEF_FILE, IN_DLL_FILE))
|
||||
|
||||
# system() will not raise an R exception if the process called
|
||||
# fails. Wrapping it here to get that behavior.
|
||||
#
|
||||
# system() introduces a lot of overhead, at least on Windows,
|
||||
# so trying processx if it is available
|
||||
.pipe_shell_command_to_stdout <- function(command, args, out_file) {
|
||||
has_processx <- suppressMessages({
|
||||
suppressWarnings({
|
||||
require("processx") # nolint
|
||||
})
|
||||
})
|
||||
if (has_processx) {
|
||||
p <- processx::process$new(
|
||||
command = command
|
||||
, args = args
|
||||
, stdout = out_file
|
||||
, windows_verbatim_args = FALSE
|
||||
)
|
||||
invisible(p$wait())
|
||||
} else {
|
||||
message(paste0(
|
||||
"Using system2() to run shell commands. Installing "
|
||||
, "'processx' with install.packages('processx') might "
|
||||
, "make this faster."
|
||||
))
|
||||
exit_code <- system2(
|
||||
command = command
|
||||
, args = shQuote(args)
|
||||
, stdout = out_file
|
||||
)
|
||||
if (exit_code != 0L) {
|
||||
stop(paste0("Command failed with exit code: ", exit_code))
|
||||
}
|
||||
}
|
||||
return(invisible(NULL))
|
||||
}
|
||||
|
||||
# use objdump to dump all the symbols
|
||||
OBJDUMP_FILE <- "objdump-out.txt"
|
||||
.pipe_shell_command_to_stdout(
|
||||
command = "objdump"
|
||||
, args = c("-p", IN_DLL_FILE)
|
||||
, out_file = OBJDUMP_FILE
|
||||
)
|
||||
|
||||
objdump_results <- readLines(OBJDUMP_FILE)
|
||||
result <- file.remove(OBJDUMP_FILE)
|
||||
|
||||
# Only one table in the objdump results matters for our purposes,
|
||||
# see https://www.cs.colorado.edu/~main/cs1300/doc/mingwfaq.html
|
||||
start_index <- which(
|
||||
grepl(
|
||||
pattern = "[Ordinal/Name Pointer] Table"
|
||||
, x = objdump_results
|
||||
, fixed = TRUE
|
||||
)
|
||||
)
|
||||
empty_lines <- which(objdump_results == "")
|
||||
end_of_table <- empty_lines[empty_lines > start_index][1L]
|
||||
|
||||
# Read the contents of the table
|
||||
exported_symbols <- objdump_results[(start_index + 1L):end_of_table]
|
||||
exported_symbols <- gsub("\t", "", exported_symbols)
|
||||
exported_symbols <- gsub(".*\\] ", "", exported_symbols)
|
||||
exported_symbols <- gsub(" ", "", exported_symbols)
|
||||
|
||||
# Write R.def file
|
||||
writeLines(
|
||||
text = c(
|
||||
paste0("LIBRARY \"", DLL_BASE_NAME, "\"")
|
||||
, "EXPORTS"
|
||||
, exported_symbols
|
||||
)
|
||||
, con = OUT_DEF_FILE
|
||||
, sep = "\n"
|
||||
)
|
||||
message(sprintf("Successfully created '%s'", OUT_DEF_FILE))
|
||||
@@ -1,62 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/utils.R
|
||||
\name{a-compatibility-note-for-saveRDS-save}
|
||||
\alias{a-compatibility-note-for-saveRDS-save}
|
||||
\title{Do not use \code{\link[base]{saveRDS}} or \code{\link[base]{save}} for long-term archival of
|
||||
models. Instead, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}}.}
|
||||
\description{
|
||||
It is a common practice to use the built-in \code{\link[base]{saveRDS}} function (or
|
||||
\code{\link[base]{save}}) to persist R objects to the disk. While it is possible to persist
|
||||
\code{xgb.Booster} objects using \code{\link[base]{saveRDS}}, it is not advisable to do so if
|
||||
the model is to be accessed in the future. If you train a model with the current version of
|
||||
XGBoost and persist it with \code{\link[base]{saveRDS}}, the model is not guaranteed to be
|
||||
accessible in later releases of XGBoost. To ensure that your model can be accessed in future
|
||||
releases of XGBoost, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}} instead.
|
||||
}
|
||||
\details{
|
||||
Use \code{\link{xgb.save}} to save the XGBoost model as a stand-alone file. You may opt into
|
||||
the JSON format by specifying the JSON extension. To read the model back, use
|
||||
\code{\link{xgb.load}}.
|
||||
|
||||
Use \code{\link{xgb.save.raw}} to save the XGBoost model as a sequence (vector) of raw bytes
|
||||
in a future-proof manner. Future releases of XGBoost will be able to read the raw bytes and
|
||||
re-construct the corresponding model. To read the model back, use \code{\link{xgb.load.raw}}.
|
||||
The \code{\link{xgb.save.raw}} function is useful if you'd like to persist the XGBoost model
|
||||
as part of another R object.
|
||||
|
||||
Note: Do not use \code{\link{xgb.serialize}} to store models long-term. It persists not only the
|
||||
model but also internal configurations and parameters, and its format is not stable across
|
||||
multiple XGBoost versions. Use \code{\link{xgb.serialize}} only for checkpointing.
|
||||
|
||||
For more details and explanation about model persistence and archival, consult the page
|
||||
\url{https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html}.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
|
||||
# Save as a stand-alone file; load it with xgb.load()
|
||||
xgb.save(bst, 'xgb.model')
|
||||
bst2 <- xgb.load('xgb.model')
|
||||
|
||||
# Save as a stand-alone file (JSON); load it with xgb.load()
|
||||
xgb.save(bst, 'xgb.model.json')
|
||||
bst2 <- xgb.load('xgb.model.json')
|
||||
|
||||
# Save as a raw byte vector; load it with xgb.load.raw()
|
||||
xgb_bytes <- xgb.save.raw(bst)
|
||||
bst2 <- xgb.load.raw(xgb_bytes)
|
||||
|
||||
# Persist XGBoost model as part of another R object
|
||||
obj <- list(xgb_model_bytes = xgb.save.raw(bst), description = "My first XGBoost model")
|
||||
# Persist the R object. Here, saveRDS() is okay, since it doesn't persist
|
||||
# xgb.Booster directly. What's being persisted is the future-proof byte representation
|
||||
# as given by xgb.save.raw().
|
||||
saveRDS(obj, 'my_object.rds')
|
||||
# Read back the R object
|
||||
obj2 <- readRDS('my_object.rds')
|
||||
# Re-construct xgb.Booster object from the bytes
|
||||
bst2 <- xgb.load.raw(obj2$xgb_model_bytes)
|
||||
|
||||
}
|
||||
@@ -4,10 +4,8 @@
|
||||
\name{agaricus.test}
|
||||
\alias{agaricus.test}
|
||||
\title{Test part from Mushroom Data Set}
|
||||
\format{
|
||||
A list containing a label vector, and a dgCMatrix object with 1611
|
||||
rows and 126 variables
|
||||
}
|
||||
\format{A list containing a label vector, and a dgCMatrix object with 1611
|
||||
rows and 126 variables}
|
||||
\usage{
|
||||
data(agaricus.test)
|
||||
}
|
||||
|
||||
@@ -4,10 +4,8 @@
|
||||
\name{agaricus.train}
|
||||
\alias{agaricus.train}
|
||||
\title{Training part from Mushroom Data Set}
|
||||
\format{
|
||||
A list containing a label vector, and a dgCMatrix object with 6513
|
||||
rows and 127 variables
|
||||
}
|
||||
\format{A list containing a label vector, and a dgCMatrix object with 6513
|
||||
rows and 127 variables}
|
||||
\usage{
|
||||
data(agaricus.train)
|
||||
}
|
||||
|
||||
@@ -38,8 +38,6 @@ bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_dep
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
saveRDS(bst, "xgb.model.rds")
|
||||
|
||||
# Warning: The resulting RDS file is only compatible with the current XGBoost version.
|
||||
# Refer to the section titled "a-compatibility-note-for-saveRDS-save".
|
||||
bst1 <- readRDS("xgb.model.rds")
|
||||
if (file.exists("xgb.model.rds")) file.remove("xgb.model.rds")
|
||||
# the handle is invalid:
|
||||
|
||||
@@ -55,7 +55,7 @@ than for \code{xgb.Booster}, since only just a handle (pointer) would need to be
|
||||
That would only matter if attributes need to be set many times.
|
||||
Note, however, that when feeding a handle of an \code{xgb.Booster} object to the attribute setters,
|
||||
the raw model cache of an \code{xgb.Booster} object would not be automatically updated,
|
||||
and it would be user's responsibility to call \code{xgb.serialize} to update it.
|
||||
and it would be user's responsibility to call \code{xgb.save.raw} to update it.
|
||||
|
||||
The \code{xgb.attributes<-} setter either updates the existing or adds one or several attributes,
|
||||
but it doesn't delete the other existing attributes.
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.Booster.R
|
||||
\name{xgb.config}
|
||||
\alias{xgb.config}
|
||||
\alias{xgb.config<-}
|
||||
\title{Accessors for model parameters as JSON string.}
|
||||
\usage{
|
||||
xgb.config(object)
|
||||
|
||||
xgb.config(object) <- value
|
||||
}
|
||||
\arguments{
|
||||
\item{object}{Object of class \code{xgb.Booster}}
|
||||
|
||||
\item{value}{A JSON string.}
|
||||
}
|
||||
\description{
|
||||
Accessors for model parameters as JSON string.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
train <- agaricus.train
|
||||
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
config <- xgb.config(bst)
|
||||
|
||||
}
|
||||
@@ -24,9 +24,9 @@ This is the function inspired from the paragraph 3.1 of the paper:
|
||||
|
||||
\strong{Practical Lessons from Predicting Clicks on Ads at Facebook}
|
||||
|
||||
\emph{(Xinran He, Junfeng Pan, Ou Jin, Tianbing Xu, Bo Liu, Tao Xu, Yan, xin Shi, Antoine Atallah, Ralf Herbrich, Stuart Bowers,
|
||||
\emph{(Xinran He, Junfeng Pan, Ou Jin, Tianbing Xu, Bo Liu, Tao Xu, Yan, xin Shi, Antoine Atallah, Ralf Herbrich, Stuart Bowers,
|
||||
Joaquin Quinonero Candela)}
|
||||
|
||||
|
||||
International Workshop on Data Mining for Online Advertising (ADKDD) - August 24, 2014
|
||||
|
||||
\url{https://research.fb.com/publications/practical-lessons-from-predicting-clicks-on-ads-at-facebook/}.
|
||||
@@ -37,10 +37,10 @@ Extract explaining the method:
|
||||
convenient way to implement non-linear and tuple transformations
|
||||
of the kind we just described. We treat each individual
|
||||
tree as a categorical feature that takes as value the
|
||||
index of the leaf an instance ends up falling in. We use
|
||||
1-of-K coding of this type of features.
|
||||
index of the leaf an instance ends up falling in. We use
|
||||
1-of-K coding of this type of features.
|
||||
|
||||
For example, consider the boosted tree model in Figure 1 with 2 subtrees,
|
||||
For example, consider the boosted tree model in Figure 1 with 2 subtrees,
|
||||
where the first subtree has 3 leafs and the second 2 leafs. If an
|
||||
instance ends up in leaf 2 in the first subtree and leaf 1 in
|
||||
second subtree, the overall input to the linear classifier will
|
||||
|
||||
@@ -28,15 +28,12 @@ xgb.cv(
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{params}{the list of parameters. The complete list of parameters is
|
||||
available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below
|
||||
is a shorter summary:
|
||||
\item{params}{the list of parameters. Commonly used ones are:
|
||||
\itemize{
|
||||
\item \code{objective} objective function, common ones are
|
||||
\itemize{
|
||||
\item \code{reg:squarederror} Regression with squared loss.
|
||||
\item \code{binary:logistic} logistic regression for classification.
|
||||
\item See \code{\link[=xgb.train]{xgb.train}()} for complete list of objectives.
|
||||
\item \code{reg:squarederror} Regression with squared loss
|
||||
\item \code{binary:logistic} logistic regression for classification
|
||||
}
|
||||
\item \code{eta} step size of each boosting step
|
||||
\item \code{max_depth} maximum depth of the tree
|
||||
@@ -138,7 +135,7 @@ An object of class \code{xgb.cv.synchronous} with the following elements:
|
||||
(only available with early stopping).
|
||||
\item \code{pred} CV prediction values available when \code{prediction} is set.
|
||||
It is either vector or matrix (see \code{\link{cb.cv.predict}}).
|
||||
\item \code{models} a list of the CV folds' models. It is only available with the explicit
|
||||
\item \code{models} a liost of the CV folds' models. It is only available with the explicit
|
||||
setting of the \code{cb.cv.predict(save_models = TRUE)} callback.
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,14 +16,14 @@ xgb.dump(
|
||||
\arguments{
|
||||
\item{model}{the model object.}
|
||||
|
||||
\item{fname}{the name of the text file where to save the model text dump.
|
||||
\item{fname}{the name of the text file where to save the model text dump.
|
||||
If not provided or set to \code{NULL}, the model is returned as a \code{character} vector.}
|
||||
|
||||
\item{fmap}{feature map file representing feature types.
|
||||
Detailed description could be found at
|
||||
Detailed description could be found at
|
||||
\url{https://github.com/dmlc/xgboost/wiki/Binary-Classification#dump-model}.
|
||||
See demo/ for walkthrough example in R, and
|
||||
\url{https://github.com/dmlc/xgboost/blob/master/demo/data/featmap.txt}
|
||||
\url{https://github.com/dmlc/xgboost/blob/master/demo/data/featmap.txt}
|
||||
for example Format.}
|
||||
|
||||
\item{with_stats}{whether to dump some additional statistics about the splits.
|
||||
@@ -47,7 +47,7 @@ data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
# save the model in file 'xgb.model.dump'
|
||||
dump_path = file.path(tempdir(), 'model.dump')
|
||||
|
||||
@@ -22,7 +22,7 @@ Non-null \code{feature_names} could be provided to override those in the model.}
|
||||
|
||||
\item{trees}{(only for the gbtree booster) an integer vector of tree indices that should be included
|
||||
into the importance calculation. If set to \code{NULL}, all trees of the model are parsed.
|
||||
It could be useful, e.g., in multiclass classification to get feature importances
|
||||
It could be useful, e.g., in multiclass classification to get feature importances
|
||||
for each class separately. IMPORTANT: the tree index in xgboost models
|
||||
is zero-based (e.g., use \code{trees = 0:4} for first 5 trees).}
|
||||
|
||||
@@ -37,7 +37,7 @@ For a tree model, a \code{data.table} with the following columns:
|
||||
\itemize{
|
||||
\item \code{Features} names of the features used in the model;
|
||||
\item \code{Gain} represents fractional contribution of each feature to the model based on
|
||||
the total gain of this feature's splits. Higher percentage means a more important
|
||||
the total gain of this feature's splits. Higher percentage means a more important
|
||||
predictive feature.
|
||||
\item \code{Cover} metric of the number of observation related to this feature;
|
||||
\item \code{Frequency} percentage representing the relative number of times
|
||||
@@ -51,7 +51,7 @@ A linear model's importance \code{data.table} has the following columns:
|
||||
\item \code{Class} (only for multiclass models) class label.
|
||||
}
|
||||
|
||||
If \code{feature_names} is not provided and \code{model} doesn't have \code{feature_names},
|
||||
If \code{feature_names} is not provided and \code{model} doesn't have \code{feature_names},
|
||||
index of the features will be used instead. Because the index is extracted from the model dump
|
||||
(based on C++ code), it starts at 0 (as in C/C++ or Python) instead of 1 (usual in R).
|
||||
}
|
||||
@@ -61,21 +61,21 @@ Creates a \code{data.table} of feature importances in a model.
|
||||
\details{
|
||||
This function works for both linear and tree models.
|
||||
|
||||
For linear models, the importance is the absolute magnitude of linear coefficients.
|
||||
For that reason, in order to obtain a meaningful ranking by importance for a linear model,
|
||||
the features need to be on the same scale (which you also would want to do when using either
|
||||
For linear models, the importance is the absolute magnitude of linear coefficients.
|
||||
For that reason, in order to obtain a meaningful ranking by importance for a linear model,
|
||||
the features need to be on the same scale (which you also would want to do when using either
|
||||
L1 or L2 regularization).
|
||||
}
|
||||
\examples{
|
||||
|
||||
# binomial classification using gbtree:
|
||||
data(agaricus.train, package='xgboost')
|
||||
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
|
||||
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
xgb.importance(model = bst)
|
||||
|
||||
# binomial classification using gblinear:
|
||||
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, booster = "gblinear",
|
||||
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, booster = "gblinear",
|
||||
eta = 0.3, nthread = 1, nrounds = 20, objective = "binary:logistic")
|
||||
xgb.importance(model = bst)
|
||||
|
||||
|
||||
@@ -17,8 +17,8 @@ Load xgboost model from the binary model file.
|
||||
}
|
||||
\details{
|
||||
The input file is expected to contain a model saved in an xgboost-internal binary format
|
||||
using either \code{\link{xgb.save}} or \code{\link{cb.save.model}} in R, or using some
|
||||
appropriate methods from other xgboost interfaces. E.g., a model trained in Python and
|
||||
using either \code{\link{xgb.save}} or \code{\link{cb.save.model}} in R, or using some
|
||||
appropriate methods from other xgboost interfaces. E.g., a model trained in Python and
|
||||
saved from there in xgboost format, could be loaded from R.
|
||||
|
||||
Note: a model saved as an R-object, has to be loaded using corresponding R-methods,
|
||||
@@ -29,7 +29,7 @@ data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
xgb.save(bst, 'xgb.model')
|
||||
bst <- xgb.load('xgb.model')
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.load.raw.R
|
||||
\name{xgb.load.raw}
|
||||
\alias{xgb.load.raw}
|
||||
\title{Load serialised xgboost model from R's raw vector}
|
||||
\usage{
|
||||
xgb.load.raw(buffer)
|
||||
}
|
||||
\arguments{
|
||||
\item{buffer}{the buffer returned by xgb.save.raw}
|
||||
}
|
||||
\description{
|
||||
User can generate raw memory buffer by calling xgb.save.raw
|
||||
}
|
||||
@@ -20,7 +20,7 @@ Non-null \code{feature_names} could be provided to override those in the model.}
|
||||
|
||||
\item{model}{object of class \code{xgb.Booster}}
|
||||
|
||||
\item{text}{\code{character} vector previously generated by the \code{xgb.dump}
|
||||
\item{text}{\code{character} vector previously generated by the \code{xgb.dump}
|
||||
function (where parameter \code{with_stats = TRUE} should have been set).
|
||||
\code{text} takes precedence over \code{model}.}
|
||||
|
||||
@@ -53,10 +53,10 @@ The columns of the \code{data.table} are:
|
||||
\item \code{Quality}: either the split gain (change in loss) or the leaf value
|
||||
\item \code{Cover}: metric related to the number of observation either seen by a split
|
||||
or collected by a leaf during training.
|
||||
}
|
||||
}
|
||||
|
||||
When \code{use_int_id=FALSE}, columns "Yes", "No", and "Missing" point to model-wide node identifiers
|
||||
in the "ID" column. When \code{use_int_id=TRUE}, those columns point to node identifiers from
|
||||
in the "ID" column. When \code{use_int_id=TRUE}, those columns point to node identifiers from
|
||||
the corresponding trees in the "Node" column.
|
||||
}
|
||||
\description{
|
||||
@@ -67,17 +67,17 @@ Parse a boosted tree model text dump into a \code{data.table} structure.
|
||||
|
||||
data(agaricus.train, package='xgboost')
|
||||
|
||||
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
|
||||
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
|
||||
(dt <- xgb.model.dt.tree(colnames(agaricus.train$data), bst))
|
||||
|
||||
# This bst model already has feature_names stored with it, so those would be used when
|
||||
# This bst model already has feature_names stored with it, so those would be used when
|
||||
# feature_names is not set:
|
||||
(dt <- xgb.model.dt.tree(model = bst))
|
||||
|
||||
# How to match feature names of splits that are following a current 'Yes' branch:
|
||||
|
||||
merge(dt, dt[, .(ID, Y.Feature=Feature)], by.x='Yes', by.y='ID', all.x=TRUE)[order(Tree,Node)]
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ or a data.table result of the \code{xgb.model.dt.tree} function.}
|
||||
|
||||
\item{which}{which distribution to plot (see details).}
|
||||
|
||||
\item{plot}{(base R barplot) whether a barplot should be produced.
|
||||
\item{plot}{(base R barplot) whether a barplot should be produced.
|
||||
If FALSE, only a data.table is returned.}
|
||||
|
||||
\item{...}{other parameters passed to \code{barplot} or \code{plot}.}
|
||||
@@ -45,10 +45,10 @@ When \code{which="2x1"}, two distributions with respect to the leaf depth
|
||||
are plotted on top of each other:
|
||||
\itemize{
|
||||
\item the distribution of the number of leafs in a tree model at a certain depth;
|
||||
\item the distribution of average weighted number of observations ("cover")
|
||||
\item the distribution of average weighted number of observations ("cover")
|
||||
ending up in leafs at certain depth.
|
||||
}
|
||||
Those could be helpful in determining sensible ranges of the \code{max_depth}
|
||||
Those could be helpful in determining sensible ranges of the \code{max_depth}
|
||||
and \code{min_child_weight} parameters.
|
||||
|
||||
When \code{which="max.depth"} or \code{which="med.depth"}, plots of either maximum or median depth
|
||||
|
||||
@@ -60,7 +60,7 @@ The content of each node is organised that way:
|
||||
\item \code{Gain} (for split nodes): the information gain metric of a split
|
||||
(corresponds to the importance of the node in the model).
|
||||
\item \code{Value} (for leafs): the margin value that the leaf may contribute to prediction.
|
||||
}
|
||||
}
|
||||
The tree root nodes also indicate the Tree index (0-based).
|
||||
|
||||
The "Yes" branches are marked by the "< split_value" label.
|
||||
@@ -80,7 +80,7 @@ xgb.plot.tree(model = bst)
|
||||
xgb.plot.tree(model = bst, trees = 0, show_node_id = TRUE)
|
||||
|
||||
\dontrun{
|
||||
# Below is an example of how to save this plot to a file.
|
||||
# Below is an example of how to save this plot to a file.
|
||||
# Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed.
|
||||
library(DiagrammeR)
|
||||
gr <- xgb.plot.tree(model=bst, trees=0:1, render=FALSE)
|
||||
|
||||
@@ -15,25 +15,21 @@ xgb.save(model, fname)
|
||||
Save xgboost model to a file in binary format.
|
||||
}
|
||||
\details{
|
||||
This methods allows to save a model in an xgboost-internal binary format which is universal
|
||||
This methods allows to save a model in an xgboost-internal binary format which is universal
|
||||
among the various xgboost interfaces. In R, the saved model file could be read-in later
|
||||
using either the \code{\link{xgb.load}} function or the \code{xgb_model} parameter
|
||||
using either the \code{\link{xgb.load}} function or the \code{xgb_model} parameter
|
||||
of \code{\link{xgb.train}}.
|
||||
|
||||
Note: a model can also be saved as an R-object (e.g., by using \code{\link[base]{readRDS}}
|
||||
or \code{\link[base]{save}}). However, it would then only be compatible with R, and
|
||||
corresponding R-methods would need to be used to load it. Moreover, persisting the model with
|
||||
\code{\link[base]{readRDS}} or \code{\link[base]{save}}) will cause compatibility problems in
|
||||
future versions of XGBoost. Consult \code{\link{a-compatibility-note-for-saveRDS-save}} to learn
|
||||
how to persist models in a future-proof way, i.e. to make the model accessible in future
|
||||
releases of XGBoost.
|
||||
Note: a model can also be saved as an R-object (e.g., by using \code{\link[base]{readRDS}}
|
||||
or \code{\link[base]{save}}). However, it would then only be compatible with R, and
|
||||
corresponding R-methods would need to be used to load it.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
xgb.save(bst, 'xgb.model')
|
||||
bst <- xgb.load('xgb.model')
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
\name{xgb.save.raw}
|
||||
\alias{xgb.save.raw}
|
||||
\title{Save xgboost model to R's raw vector,
|
||||
user can call xgb.load.raw to load the model back from raw vector}
|
||||
user can call xgb.load to load the model back from raw vector}
|
||||
\usage{
|
||||
xgb.save.raw(model)
|
||||
}
|
||||
@@ -18,10 +18,10 @@ data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
raw <- xgb.save.raw(bst)
|
||||
bst <- xgb.load.raw(raw)
|
||||
bst <- xgb.load(raw)
|
||||
pred <- predict(bst, test$data)
|
||||
|
||||
}
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.serialize.R
|
||||
\name{xgb.serialize}
|
||||
\alias{xgb.serialize}
|
||||
\title{Serialize the booster instance into R's raw vector. The serialization method differs
|
||||
from \code{\link{xgb.save.raw}} as the latter one saves only the model but not
|
||||
parameters. This serialization format is not stable across different xgboost versions.}
|
||||
\usage{
|
||||
xgb.serialize(booster)
|
||||
}
|
||||
\arguments{
|
||||
\item{booster}{the booster instance}
|
||||
}
|
||||
\description{
|
||||
Serialize the booster instance into R's raw vector. The serialization method differs
|
||||
from \code{\link{xgb.save.raw}} as the latter one saves only the model but not
|
||||
parameters. This serialization format is not stable across different xgboost versions.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
raw <- xgb.serialize(bst)
|
||||
bst <- xgb.unserialize(raw)
|
||||
|
||||
}
|
||||
@@ -42,9 +42,9 @@ xgboost(
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{params}{the list of parameters. The complete list of parameters is
|
||||
available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below
|
||||
is a shorter summary:
|
||||
\item{params}{the list of parameters.
|
||||
The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}.
|
||||
Below is a shorter summary:
|
||||
|
||||
1. General Parameters
|
||||
|
||||
@@ -82,23 +82,13 @@ xgboost(
|
||||
\item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
|
||||
\itemize{
|
||||
\item \code{reg:squarederror} Regression with squared loss (Default).
|
||||
\item \code{reg:squaredlogerror}: regression with squared log loss \eqn{1/2 * (log(pred + 1) - log(label + 1))^2}. All inputs are required to be greater than -1. Also, see metric rmsle for possible issue with this objective.
|
||||
\item \code{reg:logistic} logistic regression.
|
||||
\item \code{reg:pseudohubererror}: regression with Pseudo Huber loss, a twice differentiable alternative to absolute loss.
|
||||
\item \code{binary:logistic} logistic regression for binary classification. Output probability.
|
||||
\item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
|
||||
\item \code{binary:hinge}: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
|
||||
\item \code{count:poisson}: poisson regression for count data, output mean of poisson distribution. \code{max_delta_step} is set to 0.7 by default in poisson regression (used to safeguard optimization).
|
||||
\item \code{survival:cox}: Cox regression for right censored survival time data (negative values are considered right censored). Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional hazard function \code{h(t) = h0(t) * HR)}.
|
||||
\item \code{survival:aft}: Accelerated failure time model for censored survival time data. See \href{https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html}{Survival Analysis with Accelerated Failure Time} for details.
|
||||
\item \code{aft_loss_distribution}: Probabilty Density Function used by \code{survival:aft} and \code{aft-nloglik} metric.
|
||||
\item \code{num_class} set the number of classes. To use only with multiclass objectives.
|
||||
\item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}.
|
||||
\item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class.
|
||||
\item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
|
||||
\item \code{rank:ndcg}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Discounted_cumulative_gain}{Normalized Discounted Cumulative Gain (NDCG)} is maximized.
|
||||
\item \code{rank:map}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision}{Mean Average Precision (MAP)} is maximized.
|
||||
\item \code{reg:gamma}: gamma regression with log-link. Output is a mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Gamma_distribution#Applications}{gamma-distributed}.
|
||||
\item \code{reg:tweedie}: Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Tweedie_distribution#Applications}{Tweedie-distributed}.
|
||||
}
|
||||
\item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
|
||||
\item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.unserialize.R
|
||||
\name{xgb.unserialize}
|
||||
\alias{xgb.unserialize}
|
||||
\title{Load the instance back from \code{\link{xgb.serialize}}}
|
||||
\usage{
|
||||
xgb.unserialize(buffer)
|
||||
}
|
||||
\arguments{
|
||||
\item{buffer}{the buffer containing booster instance saved by \code{\link{xgb.serialize}}}
|
||||
}
|
||||
\description{
|
||||
Load the instance back from \code{\link{xgb.serialize}}
|
||||
}
|
||||
@@ -3,7 +3,7 @@ PKGROOT=../../
|
||||
ENABLE_STD_THREAD=1
|
||||
# _*_ mode: Makefile; _*_
|
||||
|
||||
CXX_STD = CXX14
|
||||
CXX_STD = CXX11
|
||||
|
||||
XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\
|
||||
-DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) -DDMLC_DISABLE_STDIN=1\
|
||||
|
||||
@@ -15,7 +15,7 @@ xgblib:
|
||||
cp -r ../../include .
|
||||
cp -r ../../amalgamation .
|
||||
|
||||
CXX_STD = CXX14
|
||||
CXX_STD = CXX11
|
||||
|
||||
XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\
|
||||
-DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) -DDMLC_DISABLE_STDIN=1\
|
||||
|
||||
@@ -23,10 +23,6 @@ extern SEXP XGBoosterGetAttrNames_R(SEXP);
|
||||
extern SEXP XGBoosterGetAttr_R(SEXP, SEXP);
|
||||
extern SEXP XGBoosterLoadModelFromRaw_R(SEXP, SEXP);
|
||||
extern SEXP XGBoosterLoadModel_R(SEXP, SEXP);
|
||||
extern SEXP XGBoosterSaveJsonConfig_R(SEXP handle);
|
||||
extern SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value);
|
||||
extern SEXP XGBoosterSerializeToBuffer_R(SEXP handle);
|
||||
extern SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw);
|
||||
extern SEXP XGBoosterModelToRaw_R(SEXP);
|
||||
extern SEXP XGBoosterPredict_R(SEXP, SEXP, SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterSaveModel_R(SEXP, SEXP);
|
||||
@@ -53,10 +49,6 @@ static const R_CallMethodDef CallEntries[] = {
|
||||
{"XGBoosterGetAttr_R", (DL_FUNC) &XGBoosterGetAttr_R, 2},
|
||||
{"XGBoosterLoadModelFromRaw_R", (DL_FUNC) &XGBoosterLoadModelFromRaw_R, 2},
|
||||
{"XGBoosterLoadModel_R", (DL_FUNC) &XGBoosterLoadModel_R, 2},
|
||||
{"XGBoosterSaveJsonConfig_R", (DL_FUNC) &XGBoosterSaveJsonConfig_R, 1},
|
||||
{"XGBoosterLoadJsonConfig_R", (DL_FUNC) &XGBoosterLoadJsonConfig_R, 2},
|
||||
{"XGBoosterSerializeToBuffer_R", (DL_FUNC) &XGBoosterSerializeToBuffer_R, 1},
|
||||
{"XGBoosterUnserializeFromBuffer_R", (DL_FUNC) &XGBoosterUnserializeFromBuffer_R, 2},
|
||||
{"XGBoosterModelToRaw_R", (DL_FUNC) &XGBoosterModelToRaw_R, 1},
|
||||
{"XGBoosterPredict_R", (DL_FUNC) &XGBoosterPredict_R, 5},
|
||||
{"XGBoosterSaveModel_R", (DL_FUNC) &XGBoosterSaveModel_R, 2},
|
||||
|
||||
@@ -338,6 +338,15 @@ SEXP XGBoosterSaveModel_R(SEXP handle, SEXP fname) {
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw) {
|
||||
R_API_BEGIN();
|
||||
CHECK_CALL(XGBoosterLoadModelFromBuffer(R_ExternalPtrAddr(handle),
|
||||
RAW(raw),
|
||||
length(raw)));
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterModelToRaw_R(SEXP handle) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
@@ -353,57 +362,6 @@ SEXP XGBoosterModelToRaw_R(SEXP handle) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw) {
|
||||
R_API_BEGIN();
|
||||
CHECK_CALL(XGBoosterLoadModelFromBuffer(R_ExternalPtrAddr(handle),
|
||||
RAW(raw),
|
||||
length(raw)));
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterSaveJsonConfig_R(SEXP handle) {
|
||||
const char* ret;
|
||||
R_API_BEGIN();
|
||||
bst_ulong len {0};
|
||||
CHECK_CALL(XGBoosterSaveJsonConfig(R_ExternalPtrAddr(handle),
|
||||
&len,
|
||||
&ret));
|
||||
R_API_END();
|
||||
return mkString(ret);
|
||||
}
|
||||
|
||||
SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value) {
|
||||
R_API_BEGIN();
|
||||
CHECK_CALL(XGBoosterLoadJsonConfig(R_ExternalPtrAddr(handle), CHAR(asChar(value))));
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterSerializeToBuffer_R(SEXP handle) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
bst_ulong out_len;
|
||||
const char *raw;
|
||||
CHECK_CALL(XGBoosterSerializeToBuffer(R_ExternalPtrAddr(handle), &out_len, &raw));
|
||||
ret = PROTECT(allocVector(RAWSXP, out_len));
|
||||
if (out_len != 0) {
|
||||
memcpy(RAW(ret), raw, out_len);
|
||||
}
|
||||
R_API_END();
|
||||
UNPROTECT(1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw) {
|
||||
R_API_BEGIN();
|
||||
CHECK_CALL(XGBoosterUnserializeFromBuffer(R_ExternalPtrAddr(handle),
|
||||
RAW(raw),
|
||||
length(raw)));
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterDumpModel_R(SEXP handle, SEXP fmap, SEXP with_stats, SEXP dump_format) {
|
||||
SEXP out;
|
||||
R_API_BEGIN();
|
||||
|
||||
@@ -179,39 +179,9 @@ XGB_DLL SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw);
|
||||
* \brief save model into R's raw array
|
||||
* \param handle handle
|
||||
* \return raw array
|
||||
*/
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterModelToRaw_R(SEXP handle);
|
||||
|
||||
/*!
|
||||
* \brief Save internal parameters as a JSON string
|
||||
* \param handle handle
|
||||
* \return JSON string
|
||||
*/
|
||||
|
||||
XGB_DLL SEXP XGBoosterSaveJsonConfig_R(SEXP handle);
|
||||
/*!
|
||||
* \brief Load the JSON string returnd by XGBoosterSaveJsonConfig_R
|
||||
* \param handle handle
|
||||
* \param value JSON string
|
||||
* \return R_NilValue
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value);
|
||||
|
||||
/*!
|
||||
* \brief Memory snapshot based serialization method. Saves everything states
|
||||
* into buffer.
|
||||
* \param handle handle to booster
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterSerializeToBuffer_R(SEXP handle);
|
||||
|
||||
/*!
|
||||
* \brief Memory snapshot based serialization method. Loads the buffer returned
|
||||
* from `XGBoosterSerializeToBuffer'.
|
||||
* \param handle handle to booster
|
||||
* \return raw byte array
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw);
|
||||
|
||||
/*!
|
||||
* \brief dump model into a string
|
||||
* \param handle handle
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
# Script to generate reference models. The reference models are used to test backward compatibility
|
||||
# of saved model files from XGBoost version 0.90 and 1.0.x.
|
||||
library(xgboost)
|
||||
library(Matrix)
|
||||
source('./generate_models_params.R')
|
||||
|
||||
set.seed(0)
|
||||
metadata <- model_generator_metadata()
|
||||
X <- Matrix(data = rnorm(metadata$kRows * metadata$kCols), nrow = metadata$kRows,
|
||||
ncol = metadata$kCols, sparse = TRUE)
|
||||
w <- runif(metadata$kRows)
|
||||
|
||||
version <- packageVersion('xgboost')
|
||||
target_dir <- 'models'
|
||||
|
||||
save_booster <- function (booster, model_name) {
|
||||
booster_bin <- function (model_name) {
|
||||
return (file.path(target_dir, paste('xgboost-', version, '.', model_name, '.bin', sep = '')))
|
||||
}
|
||||
booster_json <- function (model_name) {
|
||||
return (file.path(target_dir, paste('xgboost-', version, '.', model_name, '.json', sep = '')))
|
||||
}
|
||||
booster_rds <- function (model_name) {
|
||||
return (file.path(target_dir, paste('xgboost-', version, '.', model_name, '.rds', sep = '')))
|
||||
}
|
||||
xgb.save(booster, booster_bin(model_name))
|
||||
saveRDS(booster, booster_rds(model_name))
|
||||
if (version >= '1.0.0') {
|
||||
xgb.save(booster, booster_json(model_name))
|
||||
}
|
||||
}
|
||||
|
||||
generate_regression_model <- function () {
|
||||
print('Regression')
|
||||
y <- rnorm(metadata$kRows)
|
||||
|
||||
data <- xgb.DMatrix(X, label = y)
|
||||
params <- list(tree_method = 'hist', num_parallel_tree = metadata$kForests,
|
||||
max_depth = metadata$kMaxDepth)
|
||||
booster <- xgb.train(params, data, nrounds = metadata$kRounds)
|
||||
save_booster(booster, 'reg')
|
||||
}
|
||||
|
||||
generate_logistic_model <- function () {
|
||||
print('Binary classification with logistic loss')
|
||||
y <- sample(0:1, size = metadata$kRows, replace = TRUE)
|
||||
stopifnot(max(y) == 1, min(y) == 0)
|
||||
|
||||
data <- xgb.DMatrix(X, label = y, weight = w)
|
||||
params <- list(tree_method = 'hist', num_parallel_tree = metadata$kForests,
|
||||
max_depth = metadata$kMaxDepth, objective = 'binary:logistic')
|
||||
booster <- xgb.train(params, data, nrounds = metadata$kRounds)
|
||||
save_booster(booster, 'logit')
|
||||
}
|
||||
|
||||
generate_classification_model <- function () {
|
||||
print('Multi-class classification')
|
||||
y <- sample(0:(metadata$kClasses - 1), size = metadata$kRows, replace = TRUE)
|
||||
stopifnot(max(y) == metadata$kClasses - 1, min(y) == 0)
|
||||
|
||||
data <- xgb.DMatrix(X, label = y, weight = w)
|
||||
params <- list(num_class = metadata$kClasses, tree_method = 'hist',
|
||||
num_parallel_tree = metadata$kForests, max_depth = metadata$kMaxDepth,
|
||||
objective = 'multi:softmax')
|
||||
booster <- xgb.train(params, data, nrounds = metadata$kRounds)
|
||||
save_booster(booster, 'cls')
|
||||
}
|
||||
|
||||
generate_ranking_model <- function () {
|
||||
print('Learning to rank')
|
||||
y <- sample(0:4, size = metadata$kRows, replace = TRUE)
|
||||
stopifnot(max(y) == 4, min(y) == 0)
|
||||
kGroups <- 20
|
||||
w <- runif(kGroups)
|
||||
g <- rep(50, times = kGroups)
|
||||
|
||||
data <- xgb.DMatrix(X, label = y, group = g)
|
||||
# setinfo(data, 'weight', w)
|
||||
# ^^^ does not work in version <= 1.1.0; see https://github.com/dmlc/xgboost/issues/5942
|
||||
# So call low-level function XGDMatrixSetInfo_R directly. Since this function is not an exported
|
||||
# symbol, use the triple-colon operator.
|
||||
.Call(xgboost:::XGDMatrixSetInfo_R, data, 'weight', as.numeric(w))
|
||||
params <- list(objective = 'rank:ndcg', num_parallel_tree = metadata$kForests,
|
||||
tree_method = 'hist', max_depth = metadata$kMaxDepth)
|
||||
booster <- xgb.train(params, data, nrounds = metadata$kRounds)
|
||||
save_booster(booster, 'ltr')
|
||||
}
|
||||
|
||||
dir.create(target_dir)
|
||||
|
||||
invisible(generate_regression_model())
|
||||
invisible(generate_logistic_model())
|
||||
invisible(generate_classification_model())
|
||||
invisible(generate_ranking_model())
|
||||
@@ -1,10 +0,0 @@
|
||||
model_generator_metadata <- function() {
|
||||
return (list(
|
||||
kRounds = 2,
|
||||
kRows = 1000,
|
||||
kCols = 4,
|
||||
kForests = 2,
|
||||
kMaxDepth = 2,
|
||||
kClasses = 3
|
||||
))
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
library(lintr)
|
||||
library(crayon)
|
||||
|
||||
my_linters <- list(
|
||||
absolute_path_linter = lintr::absolute_path_linter,
|
||||
assignment_linter = lintr::assignment_linter,
|
||||
closed_curly_linter = lintr::closed_curly_linter,
|
||||
commas_linter = lintr::commas_linter,
|
||||
# commented_code_linter = lintr::commented_code_linter,
|
||||
infix_spaces_linter = lintr::infix_spaces_linter,
|
||||
line_length_linter = lintr::line_length_linter,
|
||||
no_tab_linter = lintr::no_tab_linter,
|
||||
object_usage_linter = lintr::object_usage_linter,
|
||||
# snake_case_linter = lintr::snake_case_linter,
|
||||
# multiple_dots_linter = lintr::multiple_dots_linter,
|
||||
object_length_linter = lintr::object_length_linter,
|
||||
open_curly_linter = lintr::open_curly_linter,
|
||||
# single_quotes_linter = lintr::single_quotes_linter,
|
||||
spaces_inside_linter = lintr::spaces_inside_linter,
|
||||
spaces_left_parentheses_linter = lintr::spaces_left_parentheses_linter,
|
||||
trailing_blank_lines_linter = lintr::trailing_blank_lines_linter,
|
||||
trailing_whitespace_linter = lintr::trailing_whitespace_linter,
|
||||
true_false = lintr::T_and_F_symbol_linter
|
||||
)
|
||||
|
||||
results <- lapply(
|
||||
list.files(path = '.', pattern = '\\.[Rr]$', recursive = TRUE),
|
||||
function (r_file) {
|
||||
cat(sprintf("Processing %s ...\n", r_file))
|
||||
list(r_file = r_file,
|
||||
output = lintr::lint(filename = r_file, linters = my_linters))
|
||||
})
|
||||
num_issue <- Reduce(sum, lapply(results, function (e) length(e$output)))
|
||||
|
||||
lint2str <- function(lint_entry) {
|
||||
color <- function(type) {
|
||||
switch(type,
|
||||
"warning" = crayon::magenta,
|
||||
"error" = crayon::red,
|
||||
"style" = crayon::blue,
|
||||
crayon::bold
|
||||
)
|
||||
}
|
||||
|
||||
paste0(
|
||||
lapply(lint_entry$output,
|
||||
function (lint_line) {
|
||||
paste0(
|
||||
crayon::bold(lint_entry$r_file, ":",
|
||||
as.character(lint_line$line_number), ":",
|
||||
as.character(lint_line$column_number), ": ", sep = ""),
|
||||
color(lint_line$type)(lint_line$type, ": ", sep = ""),
|
||||
crayon::bold(lint_line$message), "\n",
|
||||
lint_line$line, "\n",
|
||||
lintr:::highlight_string(lint_line$message, lint_line$column_number, lint_line$ranges),
|
||||
"\n",
|
||||
collapse = "")
|
||||
}),
|
||||
collapse = "")
|
||||
}
|
||||
|
||||
if (num_issue > 0) {
|
||||
cat(sprintf('R linters found %d issues:\n', num_issue))
|
||||
for (entry in results) {
|
||||
if (length(entry$output)) {
|
||||
cat(paste0('**** ', crayon::bold(entry$r_file), '\n'))
|
||||
cat(paste0(lint2str(entry), collapse = ''))
|
||||
}
|
||||
}
|
||||
quit(save = 'no', status = 1) # Signal error to parent shell
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
library(testthat)
|
||||
library(xgboost)
|
||||
|
||||
test_check("xgboost", reporter = ProgressReporter)
|
||||
test_check("xgboost")
|
||||
|
||||
@@ -2,19 +2,19 @@ require(xgboost)
|
||||
|
||||
context("basic functions")
|
||||
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
set.seed(1994)
|
||||
|
||||
# disable some tests for Win32
|
||||
windows_flag <- .Platform$OS.type == "windows" &&
|
||||
windows_flag = .Platform$OS.type == "windows" &&
|
||||
.Machine$sizeof.pointer != 8
|
||||
solaris_flag <- (Sys.info()['sysname'] == "SunOS")
|
||||
solaris_flag = (Sys.info()['sysname'] == "SunOS")
|
||||
|
||||
test_that("train and predict binary classification", {
|
||||
nrounds <- 2
|
||||
nrounds = 2
|
||||
expect_output(
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = nrounds, objective = "binary:logistic")
|
||||
@@ -30,55 +30,21 @@ test_that("train and predict binary classification", {
|
||||
|
||||
pred1 <- predict(bst, train$data, ntreelimit = 1)
|
||||
expect_length(pred1, 6513)
|
||||
err_pred1 <- sum((pred1 > 0.5) != train$label) / length(train$label)
|
||||
err_pred1 <- sum((pred1 > 0.5) != train$label)/length(train$label)
|
||||
err_log <- bst$evaluation_log[1, train_error]
|
||||
expect_lt(abs(err_pred1 - err_log), 10e-6)
|
||||
})
|
||||
|
||||
test_that("parameter validation works", {
|
||||
p <- list(foo = "bar")
|
||||
nrounds <- 1
|
||||
set.seed(1994)
|
||||
|
||||
d <- cbind(
|
||||
x1 = rnorm(10),
|
||||
x2 = rnorm(10),
|
||||
x3 = rnorm(10))
|
||||
y <- d[, "x1"] + d[, "x2"]^2 +
|
||||
ifelse(d[, "x3"] > .5, d[, "x3"]^2, 2^d[, "x3"]) +
|
||||
rnorm(10)
|
||||
dtrain <- xgb.DMatrix(data = d, info = list(label = y))
|
||||
|
||||
correct <- function() {
|
||||
params <- list(max_depth = 2, booster = "dart",
|
||||
rate_drop = 0.5, one_drop = TRUE,
|
||||
objective = "reg:squarederror")
|
||||
xgb.train(params = params, data = dtrain, nrounds = nrounds)
|
||||
}
|
||||
expect_silent(correct())
|
||||
incorrect <- function() {
|
||||
params <- list(max_depth = 2, booster = "dart",
|
||||
rate_drop = 0.5, one_drop = TRUE,
|
||||
objective = "reg:squarederror",
|
||||
foo = "bar", bar = "foo")
|
||||
output <- capture.output(
|
||||
xgb.train(params = params, data = dtrain, nrounds = nrounds))
|
||||
print(output)
|
||||
}
|
||||
expect_output(incorrect(), "bar, foo")
|
||||
})
|
||||
|
||||
|
||||
test_that("dart prediction works", {
|
||||
nrounds <- 32
|
||||
nrounds = 32
|
||||
set.seed(1994)
|
||||
|
||||
d <- cbind(
|
||||
x1 = rnorm(100),
|
||||
x2 = rnorm(100),
|
||||
x3 = rnorm(100))
|
||||
y <- d[, "x1"] + d[, "x2"]^2 +
|
||||
ifelse(d[, "x3"] > .5, d[, "x3"]^2, 2^d[, "x3"]) +
|
||||
y <- d[,"x1"] + d[,"x2"]^2 +
|
||||
ifelse(d[,"x3"] > .5, d[,"x3"]^2, 2^d[,"x3"]) +
|
||||
rnorm(100)
|
||||
|
||||
set.seed(1994)
|
||||
@@ -87,23 +53,24 @@ test_that("dart prediction works", {
|
||||
eta = 1, nthread = 2, nrounds = nrounds, objective = "reg:squarederror")
|
||||
pred_by_xgboost_0 <- predict(booster_by_xgboost, newdata = d, ntreelimit = 0)
|
||||
pred_by_xgboost_1 <- predict(booster_by_xgboost, newdata = d, ntreelimit = nrounds)
|
||||
expect_true(all(matrix(pred_by_xgboost_0, byrow = TRUE) == matrix(pred_by_xgboost_1, byrow = TRUE)))
|
||||
expect_true(all(matrix(pred_by_xgboost_0, byrow=TRUE) == matrix(pred_by_xgboost_1, byrow=TRUE)))
|
||||
|
||||
pred_by_xgboost_2 <- predict(booster_by_xgboost, newdata = d, training = TRUE)
|
||||
expect_false(all(matrix(pred_by_xgboost_0, byrow = TRUE) == matrix(pred_by_xgboost_2, byrow = TRUE)))
|
||||
expect_false(all(matrix(pred_by_xgboost_0, byrow=TRUE) == matrix(pred_by_xgboost_2, byrow=TRUE)))
|
||||
|
||||
set.seed(1994)
|
||||
dtrain <- xgb.DMatrix(data = d, info = list(label = y))
|
||||
booster_by_train <- xgb.train(params = list(
|
||||
booster = "dart",
|
||||
max_depth = 2,
|
||||
eta = 1,
|
||||
rate_drop = 0.5,
|
||||
one_drop = TRUE,
|
||||
nthread = 1,
|
||||
tree_method = "exact",
|
||||
objective = "reg:squarederror"
|
||||
),
|
||||
dtrain <- xgb.DMatrix(data=d, info = list(label=y))
|
||||
booster_by_train <- xgb.train( params = list(
|
||||
booster = "dart",
|
||||
max_depth = 2,
|
||||
eta = 1,
|
||||
rate_drop = 0.5,
|
||||
one_drop = TRUE,
|
||||
nthread = 1,
|
||||
tree_method= "exact",
|
||||
verbosity = 3,
|
||||
objective = "reg:squarederror"
|
||||
),
|
||||
data = dtrain,
|
||||
nrounds = nrounds
|
||||
)
|
||||
@@ -111,9 +78,9 @@ test_that("dart prediction works", {
|
||||
pred_by_train_1 <- predict(booster_by_train, newdata = dtrain, ntreelimit = nrounds)
|
||||
pred_by_train_2 <- predict(booster_by_train, newdata = dtrain, training = TRUE)
|
||||
|
||||
expect_true(all(matrix(pred_by_train_0, byrow = TRUE) == matrix(pred_by_xgboost_0, byrow = TRUE)))
|
||||
expect_true(all(matrix(pred_by_train_1, byrow = TRUE) == matrix(pred_by_xgboost_1, byrow = TRUE)))
|
||||
expect_true(all(matrix(pred_by_train_2, byrow = TRUE) == matrix(pred_by_xgboost_2, byrow = TRUE)))
|
||||
expect_true(all(matrix(pred_by_train_0, byrow=TRUE) == matrix(pred_by_xgboost_0, byrow=TRUE)))
|
||||
expect_true(all(matrix(pred_by_train_1, byrow=TRUE) == matrix(pred_by_xgboost_1, byrow=TRUE)))
|
||||
expect_true(all(matrix(pred_by_train_2, byrow=TRUE) == matrix(pred_by_xgboost_2, byrow=TRUE)))
|
||||
})
|
||||
|
||||
test_that("train and predict softprob", {
|
||||
@@ -122,7 +89,7 @@ test_that("train and predict softprob", {
|
||||
expect_output(
|
||||
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
|
||||
max_depth = 3, eta = 0.5, nthread = 2, nrounds = 5,
|
||||
objective = "multi:softprob", num_class = 3)
|
||||
objective = "multi:softprob", num_class=3)
|
||||
, "train-merror")
|
||||
expect_false(is.null(bst$evaluation_log))
|
||||
expect_lt(bst$evaluation_log[, min(train_merror)], 0.025)
|
||||
@@ -130,17 +97,17 @@ test_that("train and predict softprob", {
|
||||
pred <- predict(bst, as.matrix(iris[, -5]))
|
||||
expect_length(pred, nrow(iris) * 3)
|
||||
# row sums add up to total probability of 1:
|
||||
expect_equal(rowSums(matrix(pred, ncol = 3, byrow = TRUE)), rep(1, nrow(iris)), tolerance = 1e-7)
|
||||
expect_equal(rowSums(matrix(pred, ncol=3, byrow=TRUE)), rep(1, nrow(iris)), tolerance = 1e-7)
|
||||
# manually calculate error at the last iteration:
|
||||
mpred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE)
|
||||
expect_equal(as.numeric(t(mpred)), pred)
|
||||
pred_labels <- max.col(mpred) - 1
|
||||
err <- sum(pred_labels != lb) / length(lb)
|
||||
err <- sum(pred_labels != lb)/length(lb)
|
||||
expect_equal(bst$evaluation_log[5, train_merror], err, tolerance = 5e-6)
|
||||
# manually calculate error at the 1st iteration:
|
||||
mpred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE, ntreelimit = 1)
|
||||
pred_labels <- max.col(mpred) - 1
|
||||
err <- sum(pred_labels != lb) / length(lb)
|
||||
err <- sum(pred_labels != lb)/length(lb)
|
||||
expect_equal(bst$evaluation_log[1, train_merror], err, tolerance = 5e-6)
|
||||
})
|
||||
|
||||
@@ -150,7 +117,7 @@ test_that("train and predict softmax", {
|
||||
expect_output(
|
||||
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
|
||||
max_depth = 3, eta = 0.5, nthread = 2, nrounds = 5,
|
||||
objective = "multi:softmax", num_class = 3)
|
||||
objective = "multi:softmax", num_class=3)
|
||||
, "train-merror")
|
||||
expect_false(is.null(bst$evaluation_log))
|
||||
expect_lt(bst$evaluation_log[, min(train_merror)], 0.025)
|
||||
@@ -158,7 +125,7 @@ test_that("train and predict softmax", {
|
||||
|
||||
pred <- predict(bst, as.matrix(iris[, -5]))
|
||||
expect_length(pred, nrow(iris))
|
||||
err <- sum(pred != lb) / length(lb)
|
||||
err <- sum(pred != lb)/length(lb)
|
||||
expect_equal(bst$evaluation_log[5, train_merror], err, tolerance = 5e-6)
|
||||
})
|
||||
|
||||
@@ -173,12 +140,12 @@ test_that("train and predict RF", {
|
||||
expect_equal(xgb.ntree(bst), 20)
|
||||
|
||||
pred <- predict(bst, train$data)
|
||||
pred_err <- sum((pred > 0.5) != lb) / length(lb)
|
||||
pred_err <- sum((pred > 0.5) != lb)/length(lb)
|
||||
expect_lt(abs(bst$evaluation_log[1, train_error] - pred_err), 10e-6)
|
||||
#expect_lt(pred_err, 0.03)
|
||||
|
||||
pred <- predict(bst, train$data, ntreelimit = 20)
|
||||
pred_err_20 <- sum((pred > 0.5) != lb) / length(lb)
|
||||
pred_err_20 <- sum((pred > 0.5) != lb)/length(lb)
|
||||
expect_equal(pred_err_20, pred_err)
|
||||
|
||||
#pred <- predict(bst, train$data, ntreelimit = 1)
|
||||
@@ -193,19 +160,19 @@ test_that("train and predict RF with softprob", {
|
||||
set.seed(11)
|
||||
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
|
||||
max_depth = 3, eta = 0.9, nthread = 2, nrounds = nrounds,
|
||||
objective = "multi:softprob", num_class = 3, verbose = 0,
|
||||
objective = "multi:softprob", num_class=3, verbose = 0,
|
||||
num_parallel_tree = 4, subsample = 0.5, colsample_bytree = 0.5)
|
||||
expect_equal(bst$niter, 15)
|
||||
expect_equal(xgb.ntree(bst), 15 * 3 * 4)
|
||||
expect_equal(xgb.ntree(bst), 15*3*4)
|
||||
# predict for all iterations:
|
||||
pred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE)
|
||||
pred <- predict(bst, as.matrix(iris[, -5]), reshape=TRUE)
|
||||
expect_equal(dim(pred), c(nrow(iris), 3))
|
||||
pred_labels <- max.col(pred) - 1
|
||||
err <- sum(pred_labels != lb) / length(lb)
|
||||
err <- sum(pred_labels != lb)/length(lb)
|
||||
expect_equal(bst$evaluation_log[nrounds, train_merror], err, tolerance = 5e-6)
|
||||
# predict for 7 iterations and adjust for 4 parallel trees per iteration
|
||||
pred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE, ntreelimit = 7 * 4)
|
||||
err <- sum((max.col(pred) - 1) != lb) / length(lb)
|
||||
pred <- predict(bst, as.matrix(iris[, -5]), reshape=TRUE, ntreelimit = 7 * 4)
|
||||
err <- sum((max.col(pred) - 1) != lb)/length(lb)
|
||||
expect_equal(bst$evaluation_log[7, train_merror], err, tolerance = 5e-6)
|
||||
})
|
||||
|
||||
@@ -223,7 +190,7 @@ test_that("use of multiple eval metrics works", {
|
||||
|
||||
test_that("training continuation works", {
|
||||
dtrain <- xgb.DMatrix(train$data, label = train$label)
|
||||
watchlist <- list(train = dtrain)
|
||||
watchlist = list(train=dtrain)
|
||||
param <- list(objective = "binary:logistic", max_depth = 2, eta = 1, nthread = 2)
|
||||
|
||||
# for the reference, use 4 iterations at once:
|
||||
@@ -252,28 +219,13 @@ test_that("training continuation works", {
|
||||
expect_equal(dim(bst2$evaluation_log), c(2, 2))
|
||||
})
|
||||
|
||||
test_that("model serialization works", {
|
||||
out_path <- "model_serialization"
|
||||
dtrain <- xgb.DMatrix(train$data, label = train$label)
|
||||
watchlist <- list(train = dtrain)
|
||||
param <- list(objective = "binary:logistic")
|
||||
booster <- xgb.train(param, dtrain, nrounds = 4, watchlist)
|
||||
raw <- xgb.serialize(booster)
|
||||
saveRDS(raw, out_path)
|
||||
raw <- readRDS(out_path)
|
||||
|
||||
loaded <- xgb.unserialize(raw)
|
||||
raw_from_loaded <- xgb.serialize(loaded)
|
||||
expect_equal(raw, raw_from_loaded)
|
||||
file.remove(out_path)
|
||||
})
|
||||
|
||||
test_that("xgb.cv works", {
|
||||
set.seed(11)
|
||||
expect_output(
|
||||
cv <- xgb.cv(data = train$data, label = train$label, max_depth = 2, nfold = 5,
|
||||
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
|
||||
verbose = TRUE)
|
||||
verbose=TRUE)
|
||||
, "train-error:")
|
||||
expect_is(cv, 'xgb.cv.synchronous')
|
||||
expect_false(is.null(cv$evaluation_log))
|
||||
@@ -292,11 +244,11 @@ test_that("xgb.cv works with stratified folds", {
|
||||
set.seed(314159)
|
||||
cv <- xgb.cv(data = dtrain, max_depth = 2, nfold = 5,
|
||||
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
|
||||
verbose = TRUE, stratified = FALSE)
|
||||
verbose=TRUE, stratified = FALSE)
|
||||
set.seed(314159)
|
||||
cv2 <- xgb.cv(data = dtrain, max_depth = 2, nfold = 5,
|
||||
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
|
||||
verbose = TRUE, stratified = TRUE)
|
||||
verbose=TRUE, stratified = TRUE)
|
||||
# Stratified folds should result in a different evaluation logs
|
||||
expect_true(all(cv$evaluation_log[, test_error_mean] != cv2$evaluation_log[, test_error_mean]))
|
||||
})
|
||||
@@ -319,7 +271,7 @@ test_that("train and predict with non-strict classes", {
|
||||
expect_equal(pr0, pr)
|
||||
|
||||
# dense matrix-like input of non-matrix class with some inheritance
|
||||
class(train_dense) <- c('pphmatrix', 'shmatrix')
|
||||
class(train_dense) <- c('pphmatrix','shmatrix')
|
||||
expect_true(is.matrix(train_dense))
|
||||
expect_error(
|
||||
bst <- xgboost(data = train_dense, label = train$label, max_depth = 2,
|
||||
@@ -337,15 +289,15 @@ test_that("train and predict with non-strict classes", {
|
||||
test_that("max_delta_step works", {
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
watchlist <- list(train = dtrain)
|
||||
param <- list(objective = "binary:logistic", eval_metric = "logloss", max_depth = 2, nthread = 2, eta = 0.5)
|
||||
nrounds <- 5
|
||||
param <- list(objective = "binary:logistic", eval_metric="logloss", max_depth = 2, nthread = 2, eta = 0.5)
|
||||
nrounds = 5
|
||||
# model with no restriction on max_delta_step
|
||||
bst1 <- xgb.train(param, dtrain, nrounds, watchlist, verbose = 1)
|
||||
# model with restricted max_delta_step
|
||||
bst2 <- xgb.train(param, dtrain, nrounds, watchlist, verbose = 1, max_delta_step = 1)
|
||||
# the no-restriction model is expected to have consistently lower loss during the initial interations
|
||||
expect_true(all(bst1$evaluation_log$train_logloss < bst2$evaluation_log$train_logloss))
|
||||
expect_lt(mean(bst1$evaluation_log$train_logloss) / mean(bst2$evaluation_log$train_logloss), 0.8)
|
||||
expect_lt(mean(bst1$evaluation_log$train_logloss)/mean(bst2$evaluation_log$train_logloss), 0.8)
|
||||
})
|
||||
|
||||
test_that("colsample_bytree works", {
|
||||
@@ -357,28 +309,18 @@ test_that("colsample_bytree works", {
|
||||
test_y <- as.numeric(rowSums(test_x) > 0)
|
||||
colnames(train_x) <- paste0("Feature_", sprintf("%03d", 1:100))
|
||||
colnames(test_x) <- paste0("Feature_", sprintf("%03d", 1:100))
|
||||
dtrain <- xgb.DMatrix(train_x, label = train_y)
|
||||
dtrain <- xgb.DMatrix(train_x, label = train_y)
|
||||
dtest <- xgb.DMatrix(test_x, label = test_y)
|
||||
watchlist <- list(train = dtrain, eval = dtest)
|
||||
## Use colsample_bytree = 0.01, so that roughly one out of 100 features is chosen for
|
||||
## each tree
|
||||
param <- list(max_depth = 2, eta = 0, nthread = 2,
|
||||
# Use colsample_bytree = 0.01, so that roughly one out of 100 features is
|
||||
# chosen for each tree
|
||||
param <- list(max_depth = 2, eta = 0, silent = 1, nthread = 2,
|
||||
colsample_bytree = 0.01, objective = "binary:logistic",
|
||||
eval_metric = "auc")
|
||||
set.seed(2)
|
||||
set.seed(2)
|
||||
bst <- xgb.train(param, dtrain, nrounds = 100, watchlist, verbose = 0)
|
||||
xgb.importance(model = bst)
|
||||
# If colsample_bytree works properly, a variety of features should be used
|
||||
# in the 100 trees
|
||||
expect_gte(nrow(xgb.importance(model = bst)), 30)
|
||||
})
|
||||
|
||||
test_that("Configuration works", {
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic",
|
||||
eval_metric = 'error', eval_metric = 'auc', eval_metric = "logloss")
|
||||
config <- xgb.config(bst)
|
||||
xgb.config(bst) <- config
|
||||
reloaded_config <- xgb.config(bst)
|
||||
expect_equal(config, reloaded_config);
|
||||
})
|
||||
|
||||
@@ -5,8 +5,8 @@ require(data.table)
|
||||
|
||||
context("callbacks")
|
||||
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
|
||||
@@ -21,25 +21,25 @@ ltrain <- add.noise(train$label, 0.2)
|
||||
ltest <- add.noise(test$label, 0.2)
|
||||
dtrain <- xgb.DMatrix(train$data, label = ltrain)
|
||||
dtest <- xgb.DMatrix(test$data, label = ltest)
|
||||
watchlist <- list(train = dtrain, test = dtest)
|
||||
watchlist = list(train=dtrain, test=dtest)
|
||||
|
||||
|
||||
err <- function(label, pr) sum((pr > 0.5) != label) / length(label)
|
||||
err <- function(label, pr) sum((pr > 0.5) != label)/length(label)
|
||||
|
||||
param <- list(objective = "binary:logistic", max_depth = 2, nthread = 2)
|
||||
|
||||
|
||||
test_that("cb.print.evaluation works as expected", {
|
||||
|
||||
bst_evaluation <- c('train-auc' = 0.9, 'test-auc' = 0.8)
|
||||
|
||||
bst_evaluation <- c('train-auc'=0.9, 'test-auc'=0.8)
|
||||
bst_evaluation_err <- NULL
|
||||
begin_iteration <- 1
|
||||
end_iteration <- 7
|
||||
|
||||
f0 <- cb.print.evaluation(period = 0)
|
||||
f1 <- cb.print.evaluation(period = 1)
|
||||
f5 <- cb.print.evaluation(period = 5)
|
||||
|
||||
|
||||
f0 <- cb.print.evaluation(period=0)
|
||||
f1 <- cb.print.evaluation(period=1)
|
||||
f5 <- cb.print.evaluation(period=5)
|
||||
|
||||
expect_false(is.null(attr(f1, 'call')))
|
||||
expect_equal(attr(f1, 'name'), 'cb.print.evaluation')
|
||||
|
||||
@@ -48,60 +48,60 @@ test_that("cb.print.evaluation works as expected", {
|
||||
expect_output(f1(), "\\[1\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
|
||||
expect_output(f5(), "\\[1\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
|
||||
expect_null(f1())
|
||||
|
||||
|
||||
iteration <- 2
|
||||
expect_output(f1(), "\\[2\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
|
||||
expect_silent(f5())
|
||||
|
||||
|
||||
iteration <- 7
|
||||
expect_output(f1(), "\\[7\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
|
||||
expect_output(f5(), "\\[7\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
|
||||
|
||||
bst_evaluation_err <- c('train-auc' = 0.1, 'test-auc' = 0.2)
|
||||
|
||||
bst_evaluation_err <- c('train-auc'=0.1, 'test-auc'=0.2)
|
||||
expect_output(f1(), "\\[7\\]\ttrain-auc:0.900000\\+0.100000\ttest-auc:0.800000\\+0.200000")
|
||||
})
|
||||
|
||||
test_that("cb.evaluation.log works as expected", {
|
||||
|
||||
bst_evaluation <- c('train-auc' = 0.9, 'test-auc' = 0.8)
|
||||
bst_evaluation <- c('train-auc'=0.9, 'test-auc'=0.8)
|
||||
bst_evaluation_err <- NULL
|
||||
|
||||
|
||||
evaluation_log <- list()
|
||||
f <- cb.evaluation.log()
|
||||
|
||||
|
||||
expect_false(is.null(attr(f, 'call')))
|
||||
expect_equal(attr(f, 'name'), 'cb.evaluation.log')
|
||||
|
||||
|
||||
iteration <- 1
|
||||
expect_silent(f())
|
||||
expect_equal(evaluation_log,
|
||||
list(c(iter = 1, bst_evaluation)))
|
||||
expect_equal(evaluation_log,
|
||||
list(c(iter=1, bst_evaluation)))
|
||||
iteration <- 2
|
||||
expect_silent(f())
|
||||
expect_equal(evaluation_log,
|
||||
list(c(iter = 1, bst_evaluation), c(iter = 2, bst_evaluation)))
|
||||
expect_equal(evaluation_log,
|
||||
list(c(iter=1, bst_evaluation), c(iter=2, bst_evaluation)))
|
||||
expect_silent(f(finalize = TRUE))
|
||||
expect_equal(evaluation_log,
|
||||
data.table(iter = 1:2, train_auc = c(0.9, 0.9), test_auc = c(0.8, 0.8)))
|
||||
|
||||
bst_evaluation_err <- c('train-auc' = 0.1, 'test-auc' = 0.2)
|
||||
expect_equal(evaluation_log,
|
||||
data.table(iter=1:2, train_auc=c(0.9,0.9), test_auc=c(0.8,0.8)))
|
||||
|
||||
bst_evaluation_err <- c('train-auc'=0.1, 'test-auc'=0.2)
|
||||
evaluation_log <- list()
|
||||
f <- cb.evaluation.log()
|
||||
|
||||
|
||||
iteration <- 1
|
||||
expect_silent(f())
|
||||
expect_equal(evaluation_log,
|
||||
list(c(iter = 1, c(bst_evaluation, bst_evaluation_err))))
|
||||
expect_equal(evaluation_log,
|
||||
list(c(iter=1, c(bst_evaluation, bst_evaluation_err))))
|
||||
iteration <- 2
|
||||
expect_silent(f())
|
||||
expect_equal(evaluation_log,
|
||||
list(c(iter = 1, c(bst_evaluation, bst_evaluation_err)),
|
||||
c(iter = 2, c(bst_evaluation, bst_evaluation_err))))
|
||||
expect_equal(evaluation_log,
|
||||
list(c(iter=1, c(bst_evaluation, bst_evaluation_err)),
|
||||
c(iter=2, c(bst_evaluation, bst_evaluation_err))))
|
||||
expect_silent(f(finalize = TRUE))
|
||||
expect_equal(evaluation_log,
|
||||
data.table(iter = 1:2,
|
||||
train_auc_mean = c(0.9, 0.9), train_auc_std = c(0.1, 0.1),
|
||||
test_auc_mean = c(0.8, 0.8), test_auc_std = c(0.2, 0.2)))
|
||||
expect_equal(evaluation_log,
|
||||
data.table(iter=1:2,
|
||||
train_auc_mean=c(0.9,0.9), train_auc_std=c(0.1,0.1),
|
||||
test_auc_mean=c(0.8,0.8), test_auc_std=c(0.2,0.2)))
|
||||
})
|
||||
|
||||
|
||||
@@ -130,18 +130,18 @@ test_that("cb.reset.parameters works as expected", {
|
||||
bst1 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0,
|
||||
callbacks = list(cb.reset.parameters(my_par)))
|
||||
expect_false(is.null(bst1$evaluation_log$train_error))
|
||||
expect_equal(bst0$evaluation_log$train_error,
|
||||
expect_equal(bst0$evaluation_log$train_error,
|
||||
bst1$evaluation_log$train_error)
|
||||
|
||||
|
||||
# same eta but re-set via a function in the callback
|
||||
set.seed(111)
|
||||
my_par <- list(eta = function(itr, itr_end) 0.9)
|
||||
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0,
|
||||
callbacks = list(cb.reset.parameters(my_par)))
|
||||
expect_false(is.null(bst2$evaluation_log$train_error))
|
||||
expect_equal(bst0$evaluation_log$train_error,
|
||||
expect_equal(bst0$evaluation_log$train_error,
|
||||
bst2$evaluation_log$train_error)
|
||||
|
||||
|
||||
# different eta re-set as a vector parameter in the callback
|
||||
set.seed(111)
|
||||
my_par <- list(eta = c(0.6, 0.5))
|
||||
@@ -149,7 +149,7 @@ test_that("cb.reset.parameters works as expected", {
|
||||
callbacks = list(cb.reset.parameters(my_par)))
|
||||
expect_false(is.null(bst3$evaluation_log$train_error))
|
||||
expect_false(all(bst0$evaluation_log$train_error == bst3$evaluation_log$train_error))
|
||||
|
||||
|
||||
# resetting multiple parameters at the same time runs with no error
|
||||
my_par <- list(eta = c(1., 0.5), gamma = c(1, 2), max_depth = c(4, 8))
|
||||
expect_error(
|
||||
@@ -175,7 +175,7 @@ test_that("cb.reset.parameters works as expected", {
|
||||
test_that("cb.save.model works as expected", {
|
||||
files <- c('xgboost_01.model', 'xgboost_02.model', 'xgboost.model')
|
||||
for (f in files) if (file.exists(f)) file.remove(f)
|
||||
|
||||
|
||||
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 1, verbose = 0,
|
||||
save_period = 1, save_name = "xgboost_%02d.model")
|
||||
expect_true(file.exists('xgboost_01.model'))
|
||||
@@ -184,9 +184,6 @@ test_that("cb.save.model works as expected", {
|
||||
expect_equal(xgb.ntree(b1), 1)
|
||||
b2 <- xgb.load('xgboost_02.model')
|
||||
expect_equal(xgb.ntree(b2), 2)
|
||||
|
||||
xgb.config(b2) <- xgb.config(bst)
|
||||
expect_equal(xgb.config(bst), xgb.config(b2))
|
||||
expect_equal(bst$raw, b2$raw)
|
||||
|
||||
# save_period = 0 saves the last iteration's model
|
||||
@@ -194,9 +191,8 @@ test_that("cb.save.model works as expected", {
|
||||
save_period = 0)
|
||||
expect_true(file.exists('xgboost.model'))
|
||||
b2 <- xgb.load('xgboost.model')
|
||||
xgb.config(b2) <- xgb.config(bst)
|
||||
expect_equal(bst$raw, b2$raw)
|
||||
|
||||
|
||||
for (f in files) if (file.exists(f)) file.remove(f)
|
||||
})
|
||||
|
||||
@@ -215,29 +211,20 @@ test_that("early stopping xgb.train works", {
|
||||
err_pred <- err(ltest, pred)
|
||||
err_log <- bst$evaluation_log[bst$best_iteration, test_error]
|
||||
expect_equal(err_log, err_pred, tolerance = 5e-6)
|
||||
|
||||
|
||||
set.seed(11)
|
||||
expect_silent(
|
||||
bst0 <- xgb.train(param, dtrain, nrounds = 20, watchlist, eta = 0.3,
|
||||
early_stopping_rounds = 3, maximize = FALSE, verbose = 0)
|
||||
)
|
||||
expect_equal(bst$evaluation_log, bst0$evaluation_log)
|
||||
|
||||
xgb.save(bst, "model.bin")
|
||||
loaded <- xgb.load("model.bin")
|
||||
|
||||
expect_false(is.null(loaded$best_iteration))
|
||||
expect_equal(loaded$best_iteration, bst$best_ntreelimit)
|
||||
expect_equal(loaded$best_ntreelimit, bst$best_ntreelimit)
|
||||
|
||||
file.remove("model.bin")
|
||||
})
|
||||
|
||||
test_that("early stopping using a specific metric works", {
|
||||
set.seed(11)
|
||||
expect_output(
|
||||
bst <- xgb.train(param, dtrain, nrounds = 20, watchlist, eta = 0.6,
|
||||
eval_metric = "logloss", eval_metric = "auc",
|
||||
eval_metric="logloss", eval_metric="auc",
|
||||
callbacks = list(cb.early.stop(stopping_rounds = 3, maximize = FALSE,
|
||||
metric_name = 'test_logloss')))
|
||||
, "Stopping. Best iteration")
|
||||
@@ -267,12 +254,12 @@ test_that("early stopping xgb.cv works", {
|
||||
|
||||
test_that("prediction in xgb.cv works", {
|
||||
set.seed(11)
|
||||
nrounds <- 4
|
||||
nrounds = 4
|
||||
cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.5, nrounds = nrounds, prediction = TRUE, verbose = 0)
|
||||
expect_false(is.null(cv$evaluation_log))
|
||||
expect_false(is.null(cv$pred))
|
||||
expect_length(cv$pred, nrow(train$data))
|
||||
err_pred <- mean(sapply(cv$folds, function(f) mean(err(ltrain[f], cv$pred[f]))))
|
||||
err_pred <- mean( sapply(cv$folds, function(f) mean(err(ltrain[f], cv$pred[f]))) )
|
||||
err_log <- cv$evaluation_log[nrounds, test_error_mean]
|
||||
expect_equal(err_pred, err_log, tolerance = 1e-6)
|
||||
|
||||
@@ -301,14 +288,14 @@ test_that("prediction in early-stopping xgb.cv works", {
|
||||
early_stopping_rounds = 5, maximize = FALSE, stratified = FALSE,
|
||||
prediction = TRUE)
|
||||
, "Stopping. Best iteration")
|
||||
|
||||
|
||||
expect_false(is.null(cv$best_iteration))
|
||||
expect_lt(cv$best_iteration, 19)
|
||||
expect_false(is.null(cv$evaluation_log))
|
||||
expect_false(is.null(cv$pred))
|
||||
expect_length(cv$pred, nrow(train$data))
|
||||
|
||||
err_pred <- mean(sapply(cv$folds, function(f) mean(err(ltrain[f], cv$pred[f]))))
|
||||
|
||||
err_pred <- mean( sapply(cv$folds, function(f) mean(err(ltrain[f], cv$pred[f]))) )
|
||||
err_log <- cv$evaluation_log[cv$best_iteration, test_error_mean]
|
||||
expect_equal(err_pred, err_log, tolerance = 1e-6)
|
||||
err_log_last <- cv$evaluation_log[cv$niter, test_error_mean]
|
||||
|
||||
@@ -4,8 +4,8 @@ require(xgboost)
|
||||
|
||||
set.seed(1994)
|
||||
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
watchlist <- list(eval = dtest, train = dtrain)
|
||||
@@ -20,12 +20,12 @@ logregobj <- function(preds, dtrain) {
|
||||
|
||||
evalerror <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
err <- as.numeric(sum(labels != (preds > 0.5))) / length(labels)
|
||||
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
|
||||
return(list(metric = "error", value = err))
|
||||
}
|
||||
|
||||
param <- list(max_depth = 2, eta = 1, nthread = 2,
|
||||
objective = logregobj, eval_metric = evalerror)
|
||||
param <- list(max_depth=2, eta=1, nthread = 2,
|
||||
objective=logregobj, eval_metric=evalerror)
|
||||
num_round <- 2
|
||||
|
||||
test_that("custom objective works", {
|
||||
@@ -37,19 +37,12 @@ test_that("custom objective works", {
|
||||
})
|
||||
|
||||
test_that("custom objective in CV works", {
|
||||
cv <- xgb.cv(param, dtrain, num_round, nfold = 10, verbose = FALSE)
|
||||
cv <- xgb.cv(param, dtrain, num_round, nfold=10, verbose=FALSE)
|
||||
expect_false(is.null(cv$evaluation_log))
|
||||
expect_equal(dim(cv$evaluation_log), c(2, 5))
|
||||
expect_lt(cv$evaluation_log[num_round, test_error_mean], 0.03)
|
||||
})
|
||||
|
||||
test_that("custom objective with early stop works", {
|
||||
bst <- xgb.train(param, dtrain, 10, watchlist)
|
||||
expect_equal(class(bst), "xgb.Booster")
|
||||
train_log <- bst$evaluation_log$train_error
|
||||
expect_true(all(diff(train_log)) <= 0)
|
||||
})
|
||||
|
||||
test_that("custom objective using DMatrix attr works", {
|
||||
|
||||
attr(dtrain, 'label') <- getinfo(dtrain, 'label')
|
||||
@@ -61,28 +54,7 @@ test_that("custom objective using DMatrix attr works", {
|
||||
hess <- preds * (1 - preds)
|
||||
return(list(grad = grad, hess = hess))
|
||||
}
|
||||
param$objective <- logregobjattr
|
||||
param$objective = logregobjattr
|
||||
bst <- xgb.train(param, dtrain, num_round, watchlist)
|
||||
expect_equal(class(bst), "xgb.Booster")
|
||||
})
|
||||
|
||||
test_that("custom objective with multi-class works", {
|
||||
data <- as.matrix(iris[, -5])
|
||||
label <- as.numeric(iris$Species) - 1
|
||||
dtrain <- xgb.DMatrix(data = data, label = label)
|
||||
nclasses <- 3
|
||||
|
||||
fake_softprob <- function(preds, dtrain) {
|
||||
expect_true(all(matrix(preds) == 0.5))
|
||||
grad <- rnorm(dim(as.matrix(preds))[1])
|
||||
expect_equal(dim(data)[1] * nclasses, dim(as.matrix(preds))[1])
|
||||
hess <- rnorm(dim(as.matrix(preds))[1])
|
||||
return (list(grad = grad, hess = hess))
|
||||
}
|
||||
fake_merror <- function(preds, dtrain) {
|
||||
expect_equal(dim(data)[1] * nclasses, dim(as.matrix(preds))[1])
|
||||
}
|
||||
param$objective <- fake_softprob
|
||||
param$eval_metric <- fake_merror
|
||||
bst <- xgb.train(param, dtrain, 1, num_class = nclasses)
|
||||
})
|
||||
|
||||
@@ -3,29 +3,29 @@ require(Matrix)
|
||||
|
||||
context("testing xgb.DMatrix functionality")
|
||||
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
test_data <- agaricus.test$data[1:100, ]
|
||||
data(agaricus.test, package='xgboost')
|
||||
test_data <- agaricus.test$data[1:100,]
|
||||
test_label <- agaricus.test$label[1:100]
|
||||
|
||||
test_that("xgb.DMatrix: basic construction", {
|
||||
# from sparse matrix
|
||||
dtest1 <- xgb.DMatrix(test_data, label = test_label)
|
||||
dtest1 <- xgb.DMatrix(test_data, label=test_label)
|
||||
|
||||
# from dense matrix
|
||||
dtest2 <- xgb.DMatrix(as.matrix(test_data), label = test_label)
|
||||
dtest2 <- xgb.DMatrix(as.matrix(test_data), label=test_label)
|
||||
expect_equal(getinfo(dtest1, 'label'), getinfo(dtest2, 'label'))
|
||||
expect_equal(dim(dtest1), dim(dtest2))
|
||||
|
||||
#from dense integer matrix
|
||||
int_data <- as.matrix(test_data)
|
||||
storage.mode(int_data) <- "integer"
|
||||
dtest3 <- xgb.DMatrix(int_data, label = test_label)
|
||||
dtest3 <- xgb.DMatrix(int_data, label=test_label)
|
||||
expect_equal(dim(dtest1), dim(dtest3))
|
||||
})
|
||||
|
||||
test_that("xgb.DMatrix: saving, loading", {
|
||||
# save to a local file
|
||||
dtest1 <- xgb.DMatrix(test_data, label = test_label)
|
||||
dtest1 <- xgb.DMatrix(test_data, label=test_label)
|
||||
tmp_file <- tempfile('xgb.DMatrix_')
|
||||
expect_true(xgb.DMatrix.save(dtest1, tmp_file))
|
||||
# read from a local file
|
||||
@@ -35,12 +35,12 @@ test_that("xgb.DMatrix: saving, loading", {
|
||||
expect_equal(getinfo(dtest1, 'label'), getinfo(dtest3, 'label'))
|
||||
|
||||
# from a libsvm text file
|
||||
tmp <- c("0 1:1 2:1", "1 3:1", "0 1:1")
|
||||
tmp <- c("0 1:1 2:1","1 3:1","0 1:1")
|
||||
tmp_file <- 'tmp.libsvm'
|
||||
writeLines(tmp, tmp_file)
|
||||
dtest4 <- xgb.DMatrix(tmp_file, silent = TRUE)
|
||||
expect_equal(dim(dtest4), c(3, 4))
|
||||
expect_equal(getinfo(dtest4, 'label'), c(0, 1, 0))
|
||||
expect_equal(getinfo(dtest4, 'label'), c(0,1,0))
|
||||
unlink(tmp_file)
|
||||
})
|
||||
|
||||
@@ -50,57 +50,51 @@ test_that("xgb.DMatrix: getinfo & setinfo", {
|
||||
labels <- getinfo(dtest, 'label')
|
||||
expect_equal(test_label, getinfo(dtest, 'label'))
|
||||
|
||||
expect_true(setinfo(dtest, 'label_lower_bound', test_label))
|
||||
expect_equal(test_label, getinfo(dtest, 'label_lower_bound'))
|
||||
|
||||
expect_true(setinfo(dtest, 'label_upper_bound', test_label))
|
||||
expect_equal(test_label, getinfo(dtest, 'label_upper_bound'))
|
||||
|
||||
expect_true(length(getinfo(dtest, 'weight')) == 0)
|
||||
expect_true(length(getinfo(dtest, 'base_margin')) == 0)
|
||||
|
||||
expect_true(setinfo(dtest, 'weight', test_label))
|
||||
expect_true(setinfo(dtest, 'base_margin', test_label))
|
||||
expect_true(setinfo(dtest, 'group', c(50, 50)))
|
||||
expect_true(setinfo(dtest, 'group', c(50,50)))
|
||||
expect_error(setinfo(dtest, 'group', test_label))
|
||||
|
||||
# providing character values will give a warning
|
||||
expect_warning(setinfo(dtest, 'weight', rep('a', nrow(test_data))))
|
||||
expect_warning( setinfo(dtest, 'weight', rep('a', nrow(test_data))) )
|
||||
|
||||
# any other label should error
|
||||
expect_error(setinfo(dtest, 'asdf', test_label))
|
||||
})
|
||||
|
||||
test_that("xgb.DMatrix: slice, dim", {
|
||||
dtest <- xgb.DMatrix(test_data, label = test_label)
|
||||
dtest <- xgb.DMatrix(test_data, label=test_label)
|
||||
expect_equal(dim(dtest), dim(test_data))
|
||||
dsub1 <- slice(dtest, 1:42)
|
||||
expect_equal(nrow(dsub1), 42)
|
||||
expect_equal(ncol(dsub1), ncol(test_data))
|
||||
|
||||
dsub2 <- dtest[1:42, ]
|
||||
dsub2 <- dtest[1:42,]
|
||||
expect_equal(dim(dtest), dim(test_data))
|
||||
expect_equal(getinfo(dsub1, 'label'), getinfo(dsub2, 'label'))
|
||||
})
|
||||
|
||||
test_that("xgb.DMatrix: slice, trailing empty rows", {
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.train, package='xgboost')
|
||||
train_data <- agaricus.train$data
|
||||
train_label <- agaricus.train$label
|
||||
dtrain <- xgb.DMatrix(data = train_data, label = train_label)
|
||||
dtrain <- xgb.DMatrix(data=train_data, label=train_label)
|
||||
slice(dtrain, 6513L)
|
||||
train_data[6513, ] <- 0
|
||||
dtrain <- xgb.DMatrix(data = train_data, label = train_label)
|
||||
dtrain <- xgb.DMatrix(data=train_data, label=train_label)
|
||||
slice(dtrain, 6513L)
|
||||
expect_equal(nrow(dtrain), 6513)
|
||||
})
|
||||
|
||||
test_that("xgb.DMatrix: colnames", {
|
||||
dtest <- xgb.DMatrix(test_data, label = test_label)
|
||||
dtest <- xgb.DMatrix(test_data, label=test_label)
|
||||
expect_equal(colnames(dtest), colnames(test_data))
|
||||
expect_error(colnames(dtest) <- 'asdf')
|
||||
expect_error( colnames(dtest) <- 'asdf')
|
||||
new_names <- make.names(1:ncol(test_data))
|
||||
expect_silent(colnames(dtest) <- new_names)
|
||||
expect_silent( colnames(dtest) <- new_names)
|
||||
expect_equal(colnames(dtest), new_names)
|
||||
expect_silent(colnames(dtest) <- NULL)
|
||||
expect_null(colnames(dtest))
|
||||
@@ -109,7 +103,7 @@ test_that("xgb.DMatrix: colnames", {
|
||||
test_that("xgb.DMatrix: nrow is correct for a very sparse matrix", {
|
||||
set.seed(123)
|
||||
nr <- 1000
|
||||
x <- rsparsematrix(nr, 100, density = 0.0005)
|
||||
x <- rsparsematrix(nr, 100, density=0.0005)
|
||||
# we want it very sparse, so that last rows are empty
|
||||
expect_lt(max(x@i), nr)
|
||||
dtest <- xgb.DMatrix(x)
|
||||
|
||||
@@ -3,8 +3,8 @@ require(xgboost)
|
||||
context("Garbage Collection Safety Check")
|
||||
|
||||
test_that("train and prediction when gctorture is on", {
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
gctorture(TRUE)
|
||||
|
||||
@@ -3,8 +3,8 @@ context('Test generalized linear models')
|
||||
require(xgboost)
|
||||
|
||||
test_that("gblinear works", {
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
@@ -16,7 +16,7 @@ test_that("gblinear works", {
|
||||
ERR_UL <- 0.005 # upper limit for the test set error
|
||||
VERB <- 0 # chatterbox switch
|
||||
|
||||
param$updater <- 'shotgun'
|
||||
param$updater = 'shotgun'
|
||||
bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'shuffle')
|
||||
ypred <- predict(bst, dtest)
|
||||
expect_equal(length(getinfo(dtest, 'label')), 1611)
|
||||
@@ -29,7 +29,7 @@ test_that("gblinear works", {
|
||||
expect_equal(dim(h), c(n, ncol(dtrain) + 1))
|
||||
expect_is(h, "matrix")
|
||||
|
||||
param$updater <- 'coord_descent'
|
||||
param$updater = 'coord_descent'
|
||||
bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'cyclic')
|
||||
expect_lt(bst$evaluation_log$eval_error[n], ERR_UL)
|
||||
|
||||
@@ -40,7 +40,7 @@ test_that("gblinear works", {
|
||||
expect_lt(bst$evaluation_log$eval_error[2], ERR_UL)
|
||||
|
||||
bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'thrifty',
|
||||
top_k = 50, callbacks = list(cb.gblinear.history(sparse = TRUE)))
|
||||
top_n = 50, callbacks = list(cb.gblinear.history(sparse = TRUE)))
|
||||
expect_lt(bst$evaluation_log$eval_error[n], ERR_UL)
|
||||
h <- xgb.gblinear.history(bst)
|
||||
expect_equal(dim(h), c(n, ncol(dtrain) + 1))
|
||||
|
||||
@@ -5,18 +5,18 @@ require(data.table)
|
||||
require(Matrix)
|
||||
require(vcd, quietly = TRUE)
|
||||
|
||||
float_tolerance <- 5e-6
|
||||
float_tolerance = 5e-6
|
||||
|
||||
# disable some tests for 32-bit environment
|
||||
flag_32bit <- .Machine$sizeof.pointer != 8
|
||||
# disable some tests for Win32
|
||||
win32_flag = .Platform$OS.type == "windows" && .Machine$sizeof.pointer != 8
|
||||
|
||||
set.seed(1982)
|
||||
data(Arthritis)
|
||||
df <- data.table(Arthritis, keep.rownames = FALSE)
|
||||
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
|
||||
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
|
||||
df[, ID := NULL]
|
||||
sparse_matrix <- sparse.model.matrix(Improved~.-1, data = df) # nolint
|
||||
df <- data.table(Arthritis, keep.rownames = F)
|
||||
df[,AgeDiscret := as.factor(round(Age / 10,0))]
|
||||
df[,AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
|
||||
df[,ID := NULL]
|
||||
sparse_matrix <- sparse.model.matrix(Improved~.-1, data = df)
|
||||
label <- df[, ifelse(Improved == "Marked", 1, 0)]
|
||||
|
||||
# binary
|
||||
@@ -44,17 +44,17 @@ mbst.GLM <- xgboost(data = as.matrix(iris[, -5]), label = mlabel, verbose = 0,
|
||||
|
||||
|
||||
test_that("xgb.dump works", {
|
||||
if (!flag_32bit)
|
||||
if (!win32_flag)
|
||||
expect_length(xgb.dump(bst.Tree), 200)
|
||||
dump_file <- file.path(tempdir(), 'xgb.model.dump')
|
||||
expect_true(xgb.dump(bst.Tree, dump_file, with_stats = TRUE))
|
||||
dump_file = file.path(tempdir(), 'xgb.model.dump')
|
||||
expect_true(xgb.dump(bst.Tree, dump_file, with_stats = T))
|
||||
expect_true(file.exists(dump_file))
|
||||
expect_gt(file.size(dump_file), 8000)
|
||||
|
||||
# JSON format
|
||||
dmp <- xgb.dump(bst.Tree, dump_format = "json")
|
||||
expect_length(dmp, 1)
|
||||
if (!flag_32bit)
|
||||
if (!win32_flag)
|
||||
expect_length(grep('nodeid', strsplit(dmp, '\n')[[1]]), 188)
|
||||
})
|
||||
|
||||
@@ -63,7 +63,7 @@ test_that("xgb.dump works for gblinear", {
|
||||
# also make sure that it works properly for a sparse model where some coefficients
|
||||
# are 0 from setting large L1 regularization:
|
||||
bst.GLM.sp <- xgboost(data = sparse_matrix, label = label, eta = 1, nthread = 2, nrounds = 1,
|
||||
alpha = 2, objective = "binary:logistic", booster = "gblinear")
|
||||
alpha=2, objective = "binary:logistic", booster = "gblinear")
|
||||
d.sp <- xgb.dump(bst.GLM.sp)
|
||||
expect_length(d.sp, 14)
|
||||
expect_gt(sum(d.sp == "0"), 0)
|
||||
@@ -110,9 +110,9 @@ test_that("predict feature contributions works", {
|
||||
pred <- predict(bst.GLM, sparse_matrix, outputmargin = TRUE)
|
||||
expect_lt(max(abs(rowSums(pred_contr) - pred)), 1e-5)
|
||||
# manual calculation of linear terms
|
||||
coefs <- xgb.dump(bst.GLM)[-c(1, 2, 4)] %>% as.numeric
|
||||
coefs <- xgb.dump(bst.GLM)[-c(1,2,4)] %>% as.numeric
|
||||
coefs <- c(coefs[-1], coefs[1]) # intercept must be the last
|
||||
pred_contr_manual <- sweep(cbind(sparse_matrix, 1), 2, coefs, FUN = "*")
|
||||
pred_contr_manual <- sweep(cbind(sparse_matrix, 1), 2, coefs, FUN="*")
|
||||
expect_equal(as.numeric(pred_contr), as.numeric(pred_contr_manual),
|
||||
tolerance = float_tolerance)
|
||||
|
||||
@@ -130,13 +130,13 @@ test_that("predict feature contributions works", {
|
||||
pred <- predict(mbst.GLM, as.matrix(iris[, -5]), outputmargin = TRUE, reshape = TRUE)
|
||||
pred_contr <- predict(mbst.GLM, as.matrix(iris[, -5]), predcontrib = TRUE)
|
||||
expect_length(pred_contr, 3)
|
||||
coefs_all <- xgb.dump(mbst.GLM)[-c(1, 2, 6)] %>% as.numeric %>% matrix(ncol = 3, byrow = TRUE)
|
||||
coefs_all <- xgb.dump(mbst.GLM)[-c(1,2,6)] %>% as.numeric %>% matrix(ncol = 3, byrow = TRUE)
|
||||
for (g in seq_along(pred_contr)) {
|
||||
expect_equal(colnames(pred_contr[[g]]), c(colnames(iris[, -5]), "BIAS"))
|
||||
expect_lt(max(abs(rowSums(pred_contr[[g]]) - pred[, g])), float_tolerance)
|
||||
# manual calculation of linear terms
|
||||
coefs <- c(coefs_all[-1, g], coefs_all[1, g]) # intercept needs to be the last
|
||||
pred_contr_manual <- sweep(as.matrix(cbind(iris[, -5], 1)), 2, coefs, FUN = "*")
|
||||
pred_contr_manual <- sweep(as.matrix(cbind(iris[,-5], 1)), 2, coefs, FUN="*")
|
||||
expect_equal(as.numeric(pred_contr[[g]]), as.numeric(pred_contr_manual),
|
||||
tolerance = float_tolerance)
|
||||
}
|
||||
@@ -147,8 +147,8 @@ test_that("SHAPs sum to predictions, with or without DART", {
|
||||
x1 = rnorm(100),
|
||||
x2 = rnorm(100),
|
||||
x3 = rnorm(100))
|
||||
y <- d[, "x1"] + d[, "x2"]^2 +
|
||||
ifelse(d[, "x3"] > .5, d[, "x3"]^2, 2^d[, "x3"]) +
|
||||
y <- d[,"x1"] + d[,"x2"]^2 +
|
||||
ifelse(d[,"x3"] > .5, d[,"x3"]^2, 2^d[,"x3"]) +
|
||||
rnorm(100)
|
||||
nrounds <- 30
|
||||
|
||||
@@ -160,7 +160,7 @@ test_that("SHAPs sum to predictions, with or without DART", {
|
||||
objective = "reg:squarederror",
|
||||
eval_metric = "rmse"),
|
||||
if (booster == "dart")
|
||||
list(rate_drop = .01, one_drop = TRUE)),
|
||||
list(rate_drop = .01, one_drop = T)),
|
||||
data = d,
|
||||
label = y,
|
||||
nrounds = nrounds)
|
||||
@@ -168,21 +168,21 @@ test_that("SHAPs sum to predictions, with or without DART", {
|
||||
pr <- function(...)
|
||||
predict(fit, newdata = d, ...)
|
||||
pred <- pr()
|
||||
shap <- pr(predcontrib = TRUE)
|
||||
shapi <- pr(predinteraction = TRUE)
|
||||
tol <- 1e-5
|
||||
shap <- pr(predcontrib = T)
|
||||
shapi <- pr(predinteraction = T)
|
||||
tol = 1e-5
|
||||
|
||||
expect_equal(rowSums(shap), pred, tol = tol)
|
||||
expect_equal(apply(shapi, 1, sum), pred, tol = tol)
|
||||
for (i in 1 : nrow(d))
|
||||
for (f in list(rowSums, colSums))
|
||||
expect_equal(f(shapi[i, , ]), shap[i, ], tol = tol)
|
||||
expect_equal(f(shapi[i,,]), shap[i,], tol = tol)
|
||||
}
|
||||
})
|
||||
|
||||
test_that("xgb-attribute functionality", {
|
||||
val <- "my attribute value"
|
||||
list.val <- list(my_attr = val, a = 123, b = 'ok')
|
||||
list.val <- list(my_attr=val, a=123, b='ok')
|
||||
list.ch <- list.val[order(names(list.val))]
|
||||
list.ch <- lapply(list.ch, as.character)
|
||||
# note: iter is 0-index in xgb attributes
|
||||
@@ -208,9 +208,9 @@ test_that("xgb-attribute functionality", {
|
||||
xgb.attr(bst, "my_attr") <- NULL
|
||||
expect_null(xgb.attr(bst, "my_attr"))
|
||||
expect_equal(xgb.attributes(bst), list.ch[c("a", "b", "niter")])
|
||||
xgb.attributes(bst) <- list(a = NULL, b = NULL)
|
||||
xgb.attributes(bst) <- list(a=NULL, b=NULL)
|
||||
expect_equal(xgb.attributes(bst), list.default)
|
||||
xgb.attributes(bst) <- list(niter = NULL)
|
||||
xgb.attributes(bst) <- list(niter=NULL)
|
||||
expect_null(xgb.attributes(bst))
|
||||
})
|
||||
|
||||
@@ -256,7 +256,7 @@ test_that("xgb.model.dt.tree works with and without feature names", {
|
||||
names.dt.trees <- c("Tree", "Node", "ID", "Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover")
|
||||
dt.tree <- xgb.model.dt.tree(feature_names = feature.names, model = bst.Tree)
|
||||
expect_equal(names.dt.trees, names(dt.tree))
|
||||
if (!flag_32bit)
|
||||
if (!win32_flag)
|
||||
expect_equal(dim(dt.tree), c(188, 10))
|
||||
expect_output(str(dt.tree), 'Feature.*\\"Age\\"')
|
||||
|
||||
@@ -268,7 +268,7 @@ test_that("xgb.model.dt.tree works with and without feature names", {
|
||||
bst.Tree.x$feature_names <- NULL
|
||||
dt.tree.x <- xgb.model.dt.tree(model = bst.Tree.x)
|
||||
expect_output(str(dt.tree.x), 'Feature.*\\"3\\"')
|
||||
expect_equal(dt.tree[, -4, with = FALSE], dt.tree.x[, -4, with = FALSE])
|
||||
expect_equal(dt.tree[, -4, with=FALSE], dt.tree.x[, -4, with=FALSE])
|
||||
|
||||
# using integer node ID instead of character
|
||||
dt.tree.int <- xgb.model.dt.tree(model = bst.Tree, use_int_id = TRUE)
|
||||
@@ -283,7 +283,7 @@ test_that("xgb.model.dt.tree throws error for gblinear", {
|
||||
|
||||
test_that("xgb.importance works with and without feature names", {
|
||||
importance.Tree <- xgb.importance(feature_names = feature.names, model = bst.Tree)
|
||||
if (!flag_32bit)
|
||||
if (!win32_flag)
|
||||
expect_equal(dim(importance.Tree), c(7, 4))
|
||||
expect_equal(colnames(importance.Tree), c("Feature", "Gain", "Cover", "Frequency"))
|
||||
expect_output(str(importance.Tree), 'Feature.*\\"Age\\"')
|
||||
@@ -295,7 +295,7 @@ test_that("xgb.importance works with and without feature names", {
|
||||
bst.Tree.x <- bst.Tree
|
||||
bst.Tree.x$feature_names <- NULL
|
||||
importance.Tree.x <- xgb.importance(model = bst.Tree)
|
||||
expect_equal(importance.Tree[, -1, with = FALSE], importance.Tree.x[, -1, with = FALSE],
|
||||
expect_equal(importance.Tree[, -1, with=FALSE], importance.Tree.x[, -1, with=FALSE],
|
||||
tolerance = float_tolerance)
|
||||
|
||||
imp2plot <- xgb.plot.importance(importance_matrix = importance.Tree)
|
||||
@@ -305,7 +305,7 @@ test_that("xgb.importance works with and without feature names", {
|
||||
# for multiclass
|
||||
imp.Tree <- xgb.importance(model = mbst.Tree)
|
||||
expect_equal(dim(imp.Tree), c(4, 4))
|
||||
xgb.importance(model = mbst.Tree, trees = seq(from = 0, by = nclass, length.out = nrounds))
|
||||
xgb.importance(model = mbst.Tree, trees = seq(from=0, by=nclass, length.out=nrounds))
|
||||
})
|
||||
|
||||
test_that("xgb.importance works with GLM model", {
|
||||
@@ -320,7 +320,7 @@ test_that("xgb.importance works with GLM model", {
|
||||
# for multiclass
|
||||
imp.GLM <- xgb.importance(model = mbst.GLM)
|
||||
expect_equal(dim(imp.GLM), c(12, 3))
|
||||
expect_equal(imp.GLM$Class, rep(0:2, each = 4))
|
||||
expect_equal(imp.GLM$Class, rep(0:2, each=4))
|
||||
})
|
||||
|
||||
test_that("xgb.model.dt.tree and xgb.importance work with a single split model", {
|
||||
|
||||
@@ -5,51 +5,34 @@ context("interaction constraints")
|
||||
set.seed(1024)
|
||||
x1 <- rnorm(1000, 1)
|
||||
x2 <- rnorm(1000, 1)
|
||||
x3 <- sample(c(1, 2, 3), size = 1000, replace = TRUE)
|
||||
y <- x1 + x2 + x3 + x1 * x2 * x3 + rnorm(1000, 0.001) + 3 * sin(x1)
|
||||
train <- matrix(c(x1, x2, x3), ncol = 3)
|
||||
x3 <- sample(c(1,2,3), size=1000, replace=TRUE)
|
||||
y <- x1 + x2 + x3 + x1*x2*x3 + rnorm(1000, 0.001) + 3*sin(x1)
|
||||
train <- matrix(c(x1,x2,x3), ncol = 3)
|
||||
|
||||
test_that("interaction constraints for regression", {
|
||||
# Fit a model that only allows interaction between x1 and x2
|
||||
bst <- xgboost(data = train, label = y, max_depth = 3,
|
||||
eta = 0.1, nthread = 2, nrounds = 100, verbose = 0,
|
||||
interaction_constraints = list(c(0, 1)))
|
||||
|
||||
interaction_constraints = list(c(0,1)))
|
||||
|
||||
# Set all observations to have the same x3 values then increment
|
||||
# by the same amount
|
||||
preds <- lapply(c(1, 2, 3), function(x){
|
||||
tmat <- matrix(c(x1, x2, rep(x, 1000)), ncol = 3)
|
||||
return(predict(bst, tmat))
|
||||
})
|
||||
preds <- lapply(c(1,2,3), function(x){
|
||||
tmat <- matrix(c(x1,x2,rep(x,1000)), ncol=3)
|
||||
return(predict(bst, tmat))
|
||||
})
|
||||
|
||||
# Check incrementing x3 has the same effect on all observations
|
||||
# since x3 is constrained to be independent of x1 and x2
|
||||
# and all observations start off from the same x3 value
|
||||
diff1 <- preds[[2]] - preds[[1]]
|
||||
test1 <- all(abs(diff1 - diff1[1]) < 1e-4)
|
||||
|
||||
diff2 <- preds[[3]] - preds[[2]]
|
||||
test2 <- all(abs(diff2 - diff2[1]) < 1e-4)
|
||||
|
||||
diff1 <- preds[[2]] - preds[[1]]
|
||||
test1 <- all(abs(diff1 - diff1[1]) < 1e-4)
|
||||
|
||||
diff2 <- preds[[3]] - preds[[2]]
|
||||
test2 <- all(abs(diff2 - diff2[1]) < 1e-4)
|
||||
|
||||
expect_true({
|
||||
test1 & test2
|
||||
}, "Interaction Contraint Satisfied")
|
||||
})
|
||||
|
||||
test_that("interaction constraints scientific representation", {
|
||||
rows <- 10
|
||||
## When number exceeds 1e5, R paste function uses scientific representation.
|
||||
## See: https://github.com/dmlc/xgboost/issues/5179
|
||||
cols <- 1e5 + 10
|
||||
|
||||
d <- matrix(rexp(rows, rate = .1), nrow = rows, ncol = cols)
|
||||
y <- rnorm(rows)
|
||||
|
||||
dtrain <- xgb.DMatrix(data = d, info = list(label = y))
|
||||
inc <- list(c(seq.int(from = 0, to = cols, by = 1)))
|
||||
|
||||
with_inc <- xgb.train(data = dtrain, tree_method = 'hist',
|
||||
interaction_constraints = inc, nrounds = 10)
|
||||
without_inc <- xgb.train(data = dtrain, tree_method = 'hist', nrounds = 10)
|
||||
expect_equal(xgb.save.raw(with_inc), xgb.save.raw(without_inc))
|
||||
|
||||
})
|
||||
|
||||
@@ -9,9 +9,9 @@ test_that("predict feature interactions works", {
|
||||
# simulate some binary data and a linear outcome with an interaction term
|
||||
N <- 1000
|
||||
P <- 5
|
||||
X <- matrix(rbinom(N * P, 1, 0.5), ncol = P, dimnames = list(NULL, letters[1:P]))
|
||||
X <- matrix(rbinom(N * P, 1, 0.5), ncol=P, dimnames = list(NULL, letters[1:P]))
|
||||
# center the data (as contributions are computed WRT feature means)
|
||||
X <- scale(X, scale = FALSE)
|
||||
X <- scale(X, scale=FALSE)
|
||||
|
||||
# outcome without any interactions, without any noise:
|
||||
f <- function(x) 2 * x[, 1] - 3 * x[, 2]
|
||||
@@ -23,14 +23,14 @@ test_that("predict feature interactions works", {
|
||||
y <- f_int(X)
|
||||
|
||||
dm <- xgb.DMatrix(X, label = y)
|
||||
param <- list(eta = 0.1, max_depth = 4, base_score = mean(y), lambda = 0, nthread = 2)
|
||||
param <- list(eta=0.1, max_depth=4, base_score=mean(y), lambda=0, nthread=2)
|
||||
b <- xgb.train(param, dm, 100)
|
||||
|
||||
pred <- predict(b, dm, outputmargin = TRUE)
|
||||
|
||||
pred = predict(b, dm, outputmargin=TRUE)
|
||||
|
||||
# SHAP contributions:
|
||||
cont <- predict(b, dm, predcontrib = TRUE)
|
||||
expect_equal(dim(cont), c(N, P + 1))
|
||||
cont <- predict(b, dm, predcontrib=TRUE)
|
||||
expect_equal(dim(cont), c(N, P+1))
|
||||
# make sure for each row they add up to marginal predictions
|
||||
max(abs(rowSums(cont) - pred)) %>% expect_lt(0.001)
|
||||
# Hand-construct the 'ground truth' feature contributions:
|
||||
@@ -39,43 +39,43 @@ test_that("predict feature interactions works", {
|
||||
-3. * X[, 2] + 1. * X[, 2] * X[, 3], # attribute a HALF of the interaction term to feature #2
|
||||
1. * X[, 2] * X[, 3] # and another HALF of the interaction term to feature #3
|
||||
)
|
||||
gt_cont <- cbind(gt_cont, matrix(0, nrow = N, ncol = P + 1 - 3))
|
||||
gt_cont <- cbind(gt_cont, matrix(0, nrow=N, ncol=P + 1 - 3))
|
||||
# These should be relatively close:
|
||||
expect_lt(max(abs(cont - gt_cont)), 0.05)
|
||||
|
||||
|
||||
# SHAP interaction contributions:
|
||||
intr <- predict(b, dm, predinteraction = TRUE)
|
||||
expect_equal(dim(intr), c(N, P + 1, P + 1))
|
||||
intr <- predict(b, dm, predinteraction=TRUE)
|
||||
expect_equal(dim(intr), c(N, P+1, P+1))
|
||||
# check assigned colnames
|
||||
cn <- c(letters[1:P], "BIAS")
|
||||
expect_equal(dimnames(intr), list(NULL, cn, cn))
|
||||
|
||||
# check the symmetry
|
||||
max(abs(aperm(intr, c(1, 3, 2)) - intr)) %>% expect_lt(0.00001)
|
||||
max(abs(aperm(intr, c(1,3,2)) - intr)) %>% expect_lt(0.00001)
|
||||
|
||||
# sums WRT columns must be close to feature contributions
|
||||
max(abs(apply(intr, c(1, 2), sum) - cont)) %>% expect_lt(0.00001)
|
||||
max(abs(apply(intr, c(1,2), sum) - cont)) %>% expect_lt(0.00001)
|
||||
|
||||
# diagonal terms for features 3,4,5 must be close to zero
|
||||
Reduce(max, sapply(3:P, function(i) max(abs(intr[, i, i])))) %>% expect_lt(0.05)
|
||||
|
||||
# BIAS must have no interactions
|
||||
max(abs(intr[, 1:P, P + 1])) %>% expect_lt(0.00001)
|
||||
max(abs(intr[, 1:P, P+1])) %>% expect_lt(0.00001)
|
||||
|
||||
# interactions other than 2 x 3 must be close to zero
|
||||
intr23 <- intr
|
||||
intr23[, 2, 3] <- 0
|
||||
Reduce(max, sapply(1:P, function(i) max(abs(intr23[, i, (i + 1):(P + 1)])))) %>% expect_lt(0.05)
|
||||
intr23[,2,3] <- 0
|
||||
Reduce(max, sapply(1:P, function(i) max(abs(intr23[, i, (i+1):(P+1)])))) %>% expect_lt(0.05)
|
||||
|
||||
# Construct the 'ground truth' contributions of interactions directly from the linear terms:
|
||||
gt_intr <- array(0, c(N, P + 1, P + 1))
|
||||
gt_intr[, 2, 3] <- 1. * X[, 2] * X[, 3] # attribute a HALF of the interaction term to each symmetric element
|
||||
gt_intr[, 3, 2] <- gt_intr[, 2, 3]
|
||||
gt_intr <- array(0, c(N, P+1, P+1))
|
||||
gt_intr[,2,3] <- 1. * X[, 2] * X[, 3] # attribute a HALF of the interaction term to each symmetric element
|
||||
gt_intr[,3,2] <- gt_intr[, 2, 3]
|
||||
# merge-in the diagonal based on 'ground truth' feature contributions
|
||||
intr_diag <- gt_cont - apply(gt_intr, c(1, 2), sum)
|
||||
for (j in seq_len(P)) {
|
||||
gt_intr[, j, j] <- intr_diag[, j]
|
||||
intr_diag = gt_cont - apply(gt_intr, c(1,2), sum)
|
||||
for(j in seq_len(P)) {
|
||||
gt_intr[,j,j] = intr_diag[,j]
|
||||
}
|
||||
# These should be relatively close:
|
||||
expect_lt(max(abs(intr - gt_intr)), 0.1)
|
||||
@@ -107,7 +107,7 @@ test_that("SHAP contribution values are not NAN", {
|
||||
|
||||
shaps <- as.data.frame(predict(fit,
|
||||
newdata = as.matrix(subset(d, fold == 1)[, ivs]),
|
||||
predcontrib = TRUE))
|
||||
predcontrib = T))
|
||||
result <- cbind(shaps, sum = rowSums(shaps), pred = predict(fit,
|
||||
newdata = as.matrix(subset(d, fold == 1)[, ivs])))
|
||||
|
||||
@@ -116,26 +116,26 @@ test_that("SHAP contribution values are not NAN", {
|
||||
|
||||
|
||||
test_that("multiclass feature interactions work", {
|
||||
dm <- xgb.DMatrix(as.matrix(iris[, -5]), label = as.numeric(iris$Species) - 1)
|
||||
param <- list(eta = 0.1, max_depth = 4, objective = 'multi:softprob', num_class = 3)
|
||||
dm <- xgb.DMatrix(as.matrix(iris[,-5]), label=as.numeric(iris$Species)-1)
|
||||
param <- list(eta=0.1, max_depth=4, objective='multi:softprob', num_class=3)
|
||||
b <- xgb.train(param, dm, 40)
|
||||
pred <- predict(b, dm, outputmargin = TRUE) %>% array(c(3, 150)) %>% t
|
||||
pred = predict(b, dm, outputmargin=TRUE) %>% array(c(3, 150)) %>% t
|
||||
|
||||
# SHAP contributions:
|
||||
cont <- predict(b, dm, predcontrib = TRUE)
|
||||
cont <- predict(b, dm, predcontrib=TRUE)
|
||||
expect_length(cont, 3)
|
||||
# rewrap them as a 3d array
|
||||
cont <- unlist(cont) %>% array(c(150, 5, 3))
|
||||
# make sure for each row they add up to marginal predictions
|
||||
max(abs(apply(cont, c(1, 3), sum) - pred)) %>% expect_lt(0.001)
|
||||
max(abs(apply(cont, c(1,3), sum) - pred)) %>% expect_lt(0.001)
|
||||
|
||||
# SHAP interaction contributions:
|
||||
intr <- predict(b, dm, predinteraction = TRUE)
|
||||
intr <- predict(b, dm, predinteraction=TRUE)
|
||||
expect_length(intr, 3)
|
||||
# rewrap them as a 4d array
|
||||
intr <- unlist(intr) %>% array(c(150, 5, 5, 3)) %>% aperm(c(4, 1, 2, 3)) # [grp, row, col, col]
|
||||
# check the symmetry
|
||||
max(abs(aperm(intr, c(1, 2, 4, 3)) - intr)) %>% expect_lt(0.00001)
|
||||
max(abs(aperm(intr, c(1,2,4,3)) - intr)) %>% expect_lt(0.00001)
|
||||
# sums WRT columns must be close to feature contributions
|
||||
max(abs(apply(intr, c(1, 2, 3), sum) - aperm(cont, c(3, 1, 2)))) %>% expect_lt(0.00001)
|
||||
max(abs(apply(intr, c(1,2,3), sum) - aperm(cont, c(3,1,2)))) %>% expect_lt(0.00001)
|
||||
})
|
||||
|
||||
27
R-package/tests/testthat/test_lint.R
Normal file
27
R-package/tests/testthat/test_lint.R
Normal file
@@ -0,0 +1,27 @@
|
||||
context("Code is of high quality and lint free")
|
||||
test_that("Code Lint", {
|
||||
skip_on_cran()
|
||||
skip_on_travis()
|
||||
skip_if_not_installed("lintr")
|
||||
my_linters <- list(
|
||||
absolute_paths_linter=lintr::absolute_paths_linter,
|
||||
assignment_linter=lintr::assignment_linter,
|
||||
closed_curly_linter=lintr::closed_curly_linter,
|
||||
commas_linter=lintr::commas_linter,
|
||||
# commented_code_linter=lintr::commented_code_linter,
|
||||
infix_spaces_linter=lintr::infix_spaces_linter,
|
||||
line_length_linter=lintr::line_length_linter,
|
||||
no_tab_linter=lintr::no_tab_linter,
|
||||
object_usage_linter=lintr::object_usage_linter,
|
||||
# snake_case_linter=lintr::snake_case_linter,
|
||||
# multiple_dots_linter=lintr::multiple_dots_linter,
|
||||
object_length_linter=lintr::object_length_linter,
|
||||
open_curly_linter=lintr::open_curly_linter,
|
||||
# single_quotes_linter=lintr::single_quotes_linter,
|
||||
spaces_inside_linter=lintr::spaces_inside_linter,
|
||||
spaces_left_parentheses_linter=lintr::spaces_left_parentheses_linter,
|
||||
trailing_blank_lines_linter=lintr::trailing_blank_lines_linter,
|
||||
trailing_whitespace_linter=lintr::trailing_whitespace_linter
|
||||
)
|
||||
# lintr::expect_lint_free(linters=my_linters) # uncomment this if you want to check code quality
|
||||
})
|
||||
@@ -1,78 +0,0 @@
|
||||
require(xgboost)
|
||||
require(jsonlite)
|
||||
source('../generate_models_params.R')
|
||||
|
||||
context("Models from previous versions of XGBoost can be loaded")
|
||||
|
||||
metadata <- model_generator_metadata()
|
||||
|
||||
run_model_param_check <- function (config) {
|
||||
testthat::expect_equal(config$learner$learner_model_param$num_feature, '4')
|
||||
testthat::expect_equal(config$learner$learner_train_param$booster, 'gbtree')
|
||||
}
|
||||
|
||||
get_num_tree <- function (booster) {
|
||||
dump <- xgb.dump(booster)
|
||||
m <- regexec('booster\\[[0-9]+\\]', dump, perl = TRUE)
|
||||
m <- regmatches(dump, m)
|
||||
num_tree <- Reduce('+', lapply(m, length))
|
||||
return (num_tree)
|
||||
}
|
||||
|
||||
run_booster_check <- function (booster, name) {
|
||||
# If given a handle, we need to call xgb.Booster.complete() prior to using xgb.config().
|
||||
if (inherits(booster, "xgb.Booster") && xgboost:::is.null.handle(booster$handle)) {
|
||||
booster <- xgb.Booster.complete(booster)
|
||||
}
|
||||
config <- jsonlite::fromJSON(xgb.config(booster))
|
||||
run_model_param_check(config)
|
||||
if (name == 'cls') {
|
||||
testthat::expect_equal(get_num_tree(booster),
|
||||
metadata$kForests * metadata$kRounds * metadata$kClasses)
|
||||
testthat::expect_equal(as.numeric(config$learner$learner_model_param$base_score), 0.5)
|
||||
testthat::expect_equal(config$learner$learner_train_param$objective, 'multi:softmax')
|
||||
testthat::expect_equal(as.numeric(config$learner$learner_model_param$num_class),
|
||||
metadata$kClasses)
|
||||
} else if (name == 'logit') {
|
||||
testthat::expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
|
||||
testthat::expect_equal(as.numeric(config$learner$learner_model_param$num_class), 0)
|
||||
testthat::expect_equal(config$learner$learner_train_param$objective, 'binary:logistic')
|
||||
} else if (name == 'ltr') {
|
||||
testthat::expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
|
||||
testthat::expect_equal(config$learner$learner_train_param$objective, 'rank:ndcg')
|
||||
} else {
|
||||
testthat::expect_equal(name, 'reg')
|
||||
testthat::expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
|
||||
testthat::expect_equal(as.numeric(config$learner$learner_model_param$base_score), 0.5)
|
||||
testthat::expect_equal(config$learner$learner_train_param$objective, 'reg:squarederror')
|
||||
}
|
||||
}
|
||||
|
||||
test_that("Models from previous versions of XGBoost can be loaded", {
|
||||
bucket <- 'xgboost-ci-jenkins-artifacts'
|
||||
region <- 'us-west-2'
|
||||
file_name <- 'xgboost_r_model_compatibility_test.zip'
|
||||
zipfile <- file.path(getwd(), file_name)
|
||||
model_dir <- file.path(getwd(), 'models')
|
||||
download.file(paste('https://', bucket, '.s3-', region, '.amazonaws.com/', file_name, sep = ''),
|
||||
destfile = zipfile, mode = 'wb')
|
||||
unzip(zipfile, overwrite = TRUE)
|
||||
|
||||
pred_data <- xgb.DMatrix(matrix(c(0, 0, 0, 0), nrow = 1, ncol = 4))
|
||||
|
||||
lapply(list.files(model_dir), function (x) {
|
||||
model_file <- file.path(model_dir, x)
|
||||
m <- regexec("xgboost-([0-9\\.]+)\\.([a-z]+)\\.[a-z]+", model_file, perl = TRUE)
|
||||
m <- regmatches(model_file, m)[[1]]
|
||||
model_xgb_ver <- m[2]
|
||||
name <- m[3]
|
||||
|
||||
if (endsWith(model_file, '.rds')) {
|
||||
booster <- readRDS(model_file)
|
||||
} else {
|
||||
booster <- xgb.load(model_file)
|
||||
}
|
||||
predict(booster, newdata = pred_data)
|
||||
run_booster_check(booster, name)
|
||||
})
|
||||
})
|
||||
@@ -3,21 +3,22 @@ require(xgboost)
|
||||
context("monotone constraints")
|
||||
|
||||
set.seed(1024)
|
||||
x <- rnorm(1000, 10)
|
||||
y <- -1 * x + rnorm(1000, 0.001) + 3 * sin(x)
|
||||
train <- matrix(x, ncol = 1)
|
||||
x = rnorm(1000, 10)
|
||||
y = -1*x + rnorm(1000, 0.001) + 3*sin(x)
|
||||
train = matrix(x, ncol = 1)
|
||||
|
||||
|
||||
test_that("monotone constraints for regression", {
|
||||
bst <- xgboost(data = train, label = y, max_depth = 2,
|
||||
eta = 0.1, nthread = 2, nrounds = 100, verbose = 0,
|
||||
monotone_constraints = -1)
|
||||
|
||||
pred <- predict(bst, train)
|
||||
|
||||
ind <- order(train[, 1])
|
||||
pred.ord <- pred[ind]
|
||||
expect_true({
|
||||
!any(diff(pred.ord) > 0)
|
||||
}, "Monotone Contraint Satisfied")
|
||||
bst = xgboost(data = train, label = y, max_depth = 2,
|
||||
eta = 0.1, nthread = 2, nrounds = 100, verbose = 0,
|
||||
monotone_constraints = -1)
|
||||
|
||||
pred = predict(bst, train)
|
||||
|
||||
ind = order(train[,1])
|
||||
pred.ord = pred[ind]
|
||||
expect_true({
|
||||
!any(diff(pred.ord) > 0)
|
||||
}, "Monotone Contraint Satisfied")
|
||||
|
||||
})
|
||||
|
||||
@@ -2,8 +2,8 @@ context('Test model params and call are exposed to R')
|
||||
|
||||
require(xgboost)
|
||||
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
@@ -5,10 +5,10 @@ set.seed(1994)
|
||||
|
||||
test_that("poisson regression works", {
|
||||
data(mtcars)
|
||||
bst <- xgboost(data = as.matrix(mtcars[, -11]), label = mtcars[, 11],
|
||||
objective = 'count:poisson', nrounds = 10, verbose = 0)
|
||||
bst <- xgboost(data = as.matrix(mtcars[,-11]), label = mtcars[,11],
|
||||
objective = 'count:poisson', nrounds=10, verbose=0)
|
||||
expect_equal(class(bst), "xgb.Booster")
|
||||
pred <- predict(bst, as.matrix(mtcars[, -11]))
|
||||
expect_equal(length(pred), 32)
|
||||
expect_lt(sqrt(mean((pred - mtcars[, 11])^2)), 1.2)
|
||||
expect_lt(sqrt(mean( (pred - mtcars[,11])^2 )), 1.2)
|
||||
})
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
require(xgboost)
|
||||
require(Matrix)
|
||||
|
||||
context('Learning to rank')
|
||||
|
||||
test_that('Test ranking with unweighted data', {
|
||||
X <- sparseMatrix(i = c(2, 3, 7, 9, 12, 15, 17, 18),
|
||||
j = c(1, 1, 2, 2, 3, 3, 4, 4),
|
||||
x = rep(1.0, 8), dims = c(20, 4))
|
||||
y <- c(0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0)
|
||||
group <- c(5, 5, 5, 5)
|
||||
dtrain <- xgb.DMatrix(X, label = y, group = group)
|
||||
|
||||
params <- list(eta = 1, tree_method = 'exact', objective = 'rank:pairwise', max_depth = 1,
|
||||
eval_metric = 'auc', eval_metric = 'aucpr')
|
||||
bst <- xgb.train(params, dtrain, nrounds = 10, watchlist = list(train = dtrain))
|
||||
# Check if the metric is monotone increasing
|
||||
expect_true(all(diff(bst$evaluation_log$train_auc) >= 0))
|
||||
expect_true(all(diff(bst$evaluation_log$train_aucpr) >= 0))
|
||||
})
|
||||
|
||||
test_that('Test ranking with weighted data', {
|
||||
X <- sparseMatrix(i = c(2, 3, 7, 9, 12, 15, 17, 18),
|
||||
j = c(1, 1, 2, 2, 3, 3, 4, 4),
|
||||
x = rep(1.0, 8), dims = c(20, 4))
|
||||
y <- c(0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0)
|
||||
group <- c(5, 5, 5, 5)
|
||||
weight <- c(1.0, 2.0, 3.0, 4.0)
|
||||
dtrain <- xgb.DMatrix(X, label = y, group = group, weight = weight)
|
||||
|
||||
params <- list(eta = 1, tree_method = 'exact', objective = 'rank:pairwise', max_depth = 1,
|
||||
eval_metric = 'auc', eval_metric = 'aucpr')
|
||||
bst <- xgb.train(params, dtrain, nrounds = 10, watchlist = list(train = dtrain))
|
||||
# Check if the metric is monotone increasing
|
||||
expect_true(all(diff(bst$evaluation_log$train_auc) >= 0))
|
||||
expect_true(all(diff(bst$evaluation_log$train_aucpr) >= 0))
|
||||
for (i in 1:10) {
|
||||
pred <- predict(bst, newdata = dtrain, ntreelimit = i)
|
||||
# is_sorted[i]: is i-th group correctly sorted by the ranking predictor?
|
||||
is_sorted <- lapply(seq(1, 20, by = 5),
|
||||
function (k) {
|
||||
ind <- order(-pred[k:(k + 4)])
|
||||
z <- y[ind + (k - 1)]
|
||||
all(diff(z) <= 0) # Check if z is monotone decreasing
|
||||
})
|
||||
# Since we give weights 1, 2, 3, 4 to the four query groups,
|
||||
# the ranking predictor will first try to correctly sort the last query group
|
||||
# before correctly sorting other groups.
|
||||
expect_true(all(diff(as.numeric(is_sorted)) >= 0))
|
||||
}
|
||||
})
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user