merge latest changes

This commit is contained in:
Hui Liu 2024-01-24 13:41:05 -08:00
commit e3e3e34cd2
442 changed files with 15095 additions and 8911 deletions

View File

@ -29,7 +29,7 @@ jobs:
run: | run: |
mkdir build mkdir build
cd build cd build
cmake .. -DGOOGLE_TEST=ON -DUSE_OPENMP=ON -DUSE_DMLC_GTEST=ON -DPLUGIN_DENSE_PARSER=ON -GNinja -DBUILD_DEPRECATED_CLI=ON cmake .. -DGOOGLE_TEST=ON -DUSE_OPENMP=ON -DUSE_DMLC_GTEST=ON -GNinja -DBUILD_DEPRECATED_CLI=ON
ninja -v ninja -v
- name: Run gtest binary - name: Run gtest binary
run: | run: |
@ -63,6 +63,45 @@ jobs:
cd build cd build
ctest --extra-verbose ctest --extra-verbose
gtest-cpu-sycl:
name: Test Google C++ unittest (CPU SYCL)
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
python-version: ["3.8"]
steps:
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
with:
submodules: 'true'
- uses: mamba-org/provision-with-micromamba@f347426e5745fe3dfc13ec5baf20496990d0281f # v14
with:
cache-downloads: true
cache-env: true
environment-name: linux_sycl_test
environment-file: tests/ci_build/conda_env/linux_sycl_test.yml
- name: Display Conda env
run: |
conda info
conda list
- name: Build and install XGBoost
shell: bash -l {0}
run: |
mkdir build
cd build
cmake .. -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON -DPLUGIN_SYCL=ON -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX
make -j$(nproc)
- name: Run gtest binary for SYCL
run: |
cd build
./testxgboost --gtest_filter=Sycl*
- name: Run gtest binary for non SYCL
run: |
cd build
./testxgboost --gtest_filter=-Sycl*
c-api-demo: c-api-demo:
name: Test installing XGBoost lib + building the C API demo name: Test installing XGBoost lib + building the C API demo
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
@ -144,11 +183,5 @@ jobs:
python -m pip install wheel setuptools cmakelint cpplint pylint python -m pip install wheel setuptools cmakelint cpplint pylint
- name: Run lint - name: Run lint
run: | run: |
python3 tests/ci_build/lint_cpp.py xgboost cpp R-package/src python3 tests/ci_build/lint_cpp.py
python3 tests/ci_build/lint_cpp.py xgboost cpp include src python-package \
--exclude_path python-package/xgboost/dmlc-core python-package/xgboost/include \
python-package/xgboost/lib python-package/xgboost/rabit \
python-package/xgboost/src
sh ./tests/ci_build/lint_cmake.sh sh ./tests/ci_build/lint_cmake.sh

View File

@ -256,6 +256,47 @@ jobs:
run: | run: |
pytest -s -v -rxXs --durations=0 ./tests/test_distributed/test_with_spark pytest -s -v -rxXs --durations=0 ./tests/test_distributed/test_with_spark
python-sycl-tests-on-ubuntu:
name: Test XGBoost Python package with SYCL on ${{ matrix.config.os }}
runs-on: ${{ matrix.config.os }}
timeout-minutes: 90
strategy:
matrix:
config:
- {os: ubuntu-latest, python-version: "3.8"}
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: mamba-org/provision-with-micromamba@f347426e5745fe3dfc13ec5baf20496990d0281f # v14
with:
cache-downloads: true
cache-env: true
environment-name: linux_sycl_test
environment-file: tests/ci_build/conda_env/linux_sycl_test.yml
- name: Display Conda env
run: |
conda info
conda list
- name: Build XGBoost on Ubuntu
run: |
mkdir build
cd build
cmake .. -DPLUGIN_SYCL=ON -DCMAKE_PREFIX_PATH=$CONDA_PREFIX
make -j$(nproc)
- name: Install Python package
run: |
cd python-package
python --version
pip install -v .
- name: Test Python package
run: |
pytest -s -v -rxXs --durations=0 ./tests/python-sycl/
python-system-installation-on-ubuntu: python-system-installation-on-ubuntu:
name: Test XGBoost Python package System Installation on ${{ matrix.os }} name: Test XGBoost Python package System Installation on ${{ matrix.os }}
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}

View File

@ -25,7 +25,7 @@ jobs:
with: with:
submodules: 'true' submodules: 'true'
- uses: r-lib/actions/setup-r@11a22a908006c25fe054c4ef0ac0436b1de3edbe # v2.6.4 - uses: r-lib/actions/setup-r@e40ad904310fc92e96951c1b0d64f3de6cbe9e14 # v2.6.5
with: with:
r-version: ${{ matrix.config.r }} r-version: ${{ matrix.config.r }}
@ -54,7 +54,6 @@ jobs:
matrix: matrix:
config: config:
- {os: windows-latest, r: 'release', compiler: 'mingw', build: 'autotools'} - {os: windows-latest, r: 'release', compiler: 'mingw', build: 'autotools'}
- {os: windows-latest, r: '4.2.0', compiler: 'msvc', build: 'cmake'}
env: env:
R_REMOTES_NO_ERRORS_FROM_WARNINGS: true R_REMOTES_NO_ERRORS_FROM_WARNINGS: true
RSPM: ${{ matrix.config.rspm }} RSPM: ${{ matrix.config.rspm }}
@ -64,7 +63,7 @@ jobs:
with: with:
submodules: 'true' submodules: 'true'
- uses: r-lib/actions/setup-r@11a22a908006c25fe054c4ef0ac0436b1de3edbe # v2.6.4 - uses: r-lib/actions/setup-r@e40ad904310fc92e96951c1b0d64f3de6cbe9e14 # v2.6.5
with: with:
r-version: ${{ matrix.config.r }} r-version: ${{ matrix.config.r }}

View File

@ -1,4 +1,11 @@
cmake_minimum_required(VERSION 3.18 FATAL_ERROR) cmake_minimum_required(VERSION 3.18 FATAL_ERROR)
if(PLUGIN_SYCL)
set(CMAKE_CXX_COMPILER "g++")
set(CMAKE_C_COMPILER "gcc")
string(REPLACE " -isystem ${CONDA_PREFIX}/include" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
endif()
project(xgboost LANGUAGES CXX C VERSION 2.1.0) project(xgboost LANGUAGES CXX C VERSION 2.1.0)
include(cmake/Utils.cmake) include(cmake/Utils.cmake)
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules") list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
@ -69,7 +76,10 @@ option(KEEP_BUILD_ARTIFACTS_IN_BINARY_DIR "Output build artifacts in CMake binar
option(USE_CUDA "Build with GPU acceleration" OFF) option(USE_CUDA "Build with GPU acceleration" OFF)
option(USE_PER_THREAD_DEFAULT_STREAM "Build with per-thread default stream" ON) option(USE_PER_THREAD_DEFAULT_STREAM "Build with per-thread default stream" ON)
option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF) option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF)
# This is specifically designed for PyPI binary release and should be disabled for most of the cases.
option(USE_DLOPEN_NCCL "Whether to load nccl dynamically." OFF)
option(BUILD_WITH_SHARED_NCCL "Build with shared NCCL library." OFF) option(BUILD_WITH_SHARED_NCCL "Build with shared NCCL library." OFF)
if(USE_CUDA) if(USE_CUDA)
if(NOT DEFINED CMAKE_CUDA_ARCHITECTURES AND NOT DEFINED ENV{CUDAARCHS}) if(NOT DEFINED CMAKE_CUDA_ARCHITECTURES AND NOT DEFINED ENV{CUDAARCHS})
set(GPU_COMPUTE_VER "" CACHE STRING set(GPU_COMPUTE_VER "" CACHE STRING
@ -80,6 +90,7 @@ if(USE_CUDA)
unset(GPU_COMPUTE_VER CACHE) unset(GPU_COMPUTE_VER CACHE)
endif() endif()
endif() endif()
# CUDA device LTO was introduced in CMake v3.25 and requires host LTO to also be enabled but can still # CUDA device LTO was introduced in CMake v3.25 and requires host LTO to also be enabled but can still
# be explicitly disabled allowing for LTO on host only, host and device, or neither, but device-only LTO # be explicitly disabled allowing for LTO on host only, host and device, or neither, but device-only LTO
# is not a supproted configuration # is not a supproted configuration
@ -91,6 +102,8 @@ cmake_dependent_option(USE_CUDA_LTO
## HIP ## HIP
option(USE_HIP "Build with GPU acceleration" OFF) option(USE_HIP "Build with GPU acceleration" OFF)
option(USE_RCCL "Build with RCCL to enable distributed GPU support." OFF) option(USE_RCCL "Build with RCCL to enable distributed GPU support." OFF)
# This is specifically designed for PyPI binary release and should be disabled for most of the cases.
option(USE_DLOPEN_RCCL "Whether to load nccl dynamically." OFF)
option(BUILD_WITH_SHARED_RCCL "Build with shared RCCL library." OFF) option(BUILD_WITH_SHARED_RCCL "Build with shared RCCL library." OFF)
## Sanitizers ## Sanitizers
option(USE_SANITIZER "Use santizer flags" OFF) option(USE_SANITIZER "Use santizer flags" OFF)
@ -99,11 +112,10 @@ set(ENABLED_SANITIZERS "address" "leak" CACHE STRING
"Semicolon separated list of sanitizer names. E.g 'address;leak'. Supported sanitizers are "Semicolon separated list of sanitizer names. E.g 'address;leak'. Supported sanitizers are
address, leak, undefined and thread.") address, leak, undefined and thread.")
## Plugins ## Plugins
option(PLUGIN_DENSE_PARSER "Build dense parser plugin" OFF)
option(PLUGIN_RMM "Build with RAPIDS Memory Manager (RMM)" OFF) option(PLUGIN_RMM "Build with RAPIDS Memory Manager (RMM)" OFF)
option(PLUGIN_FEDERATED "Build with Federated Learning" OFF) option(PLUGIN_FEDERATED "Build with Federated Learning" OFF)
## TODO: 1. Add check if DPC++ compiler is used for building ## TODO: 1. Add check if DPC++ compiler is used for building
option(PLUGIN_UPDATER_ONEAPI "DPC++ updater" OFF) option(PLUGIN_SYCL "SYCL plugin" OFF)
option(ADD_PKGCONFIG "Add xgboost.pc into system." ON) option(ADD_PKGCONFIG "Add xgboost.pc into system." ON)
#-- Checks for building XGBoost #-- Checks for building XGBoost
@ -119,12 +131,24 @@ endif()
if(BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL)) if(BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
message(SEND_ERROR "Build XGBoost with -DUSE_NCCL=ON to enable BUILD_WITH_SHARED_NCCL.") message(SEND_ERROR "Build XGBoost with -DUSE_NCCL=ON to enable BUILD_WITH_SHARED_NCCL.")
endif() endif()
if(USE_DLOPEN_NCCL AND (NOT USE_NCCL))
message(SEND_ERROR "Build XGBoost with -DUSE_NCCL=ON to enable USE_DLOPEN_NCCL.")
endif()
if(USE_DLOPEN_NCCL AND (NOT (CMAKE_SYSTEM_NAME STREQUAL "Linux")))
message(SEND_ERROR "`USE_DLOPEN_NCCL` supports only Linux at the moment.")
endif()
if(USE_RCCL AND NOT (USE_HIP)) if(USE_RCCL AND NOT (USE_HIP))
message(SEND_ERROR "`USE_RCCL` must be enabled with `USE_HIP` flag.") message(SEND_ERROR "`USE_RCCL` must be enabled with `USE_HIP` flag.")
endif() endif()
if(BUILD_WITH_SHARED_RCCL AND (NOT USE_RCCL)) if(BUILD_WITH_SHARED_RCCL AND (NOT USE_RCCL))
message(SEND_ERROR "Build XGBoost with -DUSE_RCCL=ON to enable BUILD_WITH_SHARED_RCCL.") message(SEND_ERROR "Build XGBoost with -DUSE_RCCL=ON to enable BUILD_WITH_SHARED_RCCL.")
endif() endif()
if(USE_DLOPEN_RCCL AND (NOT USE_RCCL))
message(SEND_ERROR "Build XGBoost with -DUSE_RCCL=ON to enable USE_DLOPEN_RCCL.")
endif()
if(USE_DLOPEN_RCCL AND (NOT (CMAKE_SYSTEM_NAME STREQUAL "Linux")))
message(SEND_ERROR "`USE_DLOPEN_RCCL` supports only Linux at the moment.")
endif()
if(JVM_BINDINGS AND R_LIB) if(JVM_BINDINGS AND R_LIB)
message(SEND_ERROR "`R_LIB' is not compatible with `JVM_BINDINGS' as they both have customized configurations.") message(SEND_ERROR "`R_LIB' is not compatible with `JVM_BINDINGS' as they both have customized configurations.")
endif() endif()
@ -185,6 +209,9 @@ endif()
if(USE_HDFS) if(USE_HDFS)
message(SEND_ERROR "The option `USE_HDFS` has been removed from XGBoost") message(SEND_ERROR "The option `USE_HDFS` has been removed from XGBoost")
endif() endif()
if(PLUGIN_DENSE_PARSER)
message(SEND_ERROR "The option `PLUGIN_DENSE_PARSER` has been removed from XGBoost.")
endif()
#-- Sanitizer #-- Sanitizer
if(USE_SANITIZER) if(USE_SANITIZER)
@ -229,7 +256,7 @@ if (USE_HIP)
find_package(rocthrust REQUIRED) find_package(rocthrust REQUIRED)
find_package(hipcub REQUIRED) find_package(hipcub REQUIRED)
set(CMAKE_HIP_FLAGS "${CMAKE_HIP_FLAGS} -I${HIP_INCLUDE_DIRS} -I${HIP_INCLUDE_DIRS}/hip") set(CMAKE_HIP_FLAGS "${CMAKE_HIP_FLAGS} -I${HIP_INCLUDE_DIRS}")
set(CMAKE_HIP_FLAGS "${CMAKE_HIP_FLAGS} -Wunused-result -w") set(CMAKE_HIP_FLAGS "${CMAKE_HIP_FLAGS} -Wunused-result -w")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D__HIP_PLATFORM_AMD__") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D__HIP_PLATFORM_AMD__")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -I${HIP_INCLUDE_DIRS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -I${HIP_INCLUDE_DIRS}")
@ -333,6 +360,15 @@ if(PLUGIN_RMM)
get_target_property(rmm_link_libs rmm::rmm INTERFACE_LINK_LIBRARIES) get_target_property(rmm_link_libs rmm::rmm INTERFACE_LINK_LIBRARIES)
endif() endif()
if(PLUGIN_SYCL)
set(CMAKE_CXX_LINK_EXECUTABLE
"icpx <FLAGS> <CMAKE_CXX_LINK_FLAGS> -qopenmp <LINK_FLAGS> <OBJECTS> -o <TARGET> <LINK_LIBRARIES>")
set(CMAKE_CXX_CREATE_SHARED_LIBRARY
"icpx <CMAKE_SHARED_LIBRARY_CXX_FLAGS> -qopenmp <LANGUAGE_COMPILE_FLAGS> \
<CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS> <SONAME_FLAG>,<TARGET_SONAME> \
-o <TARGET> <OBJECTS> <LINK_LIBRARIES>")
endif()
#-- library #-- library
if(BUILD_STATIC_LIB) if(BUILD_STATIC_LIB)
add_library(xgboost STATIC) add_library(xgboost STATIC)

View File

@ -10,8 +10,8 @@ The Project Management Committee(PMC) consists group of active committers that m
- Tianqi is a Ph.D. student working on large-scale machine learning. He is the creator of the project. - Tianqi is a Ph.D. student working on large-scale machine learning. He is the creator of the project.
* [Michael Benesty](https://github.com/pommedeterresautee) * [Michael Benesty](https://github.com/pommedeterresautee)
- Michael is a lawyer and data scientist in France. He is the creator of XGBoost interactive analysis module in R. - Michael is a lawyer and data scientist in France. He is the creator of XGBoost interactive analysis module in R.
* [Yuan Tang](https://github.com/terrytangyuan), Akuity * [Yuan Tang](https://github.com/terrytangyuan), Red Hat
- Yuan is a founding engineer at Akuity. He contributed mostly in R and Python packages. - Yuan is a principal software engineer at Red Hat. He contributed mostly in R and Python packages.
* [Nan Zhu](https://github.com/CodingCat), Uber * [Nan Zhu](https://github.com/CodingCat), Uber
- Nan is a software engineer in Uber. He contributed mostly in JVM packages. - Nan is a software engineer in Uber. He contributed mostly in JVM packages.
* [Jiaming Yuan](https://github.com/trivialfis) * [Jiaming Yuan](https://github.com/trivialfis)

View File

@ -14,6 +14,15 @@ if(ENABLE_ALL_WARNINGS)
target_compile_options(xgboost-r PRIVATE -Wall -Wextra) target_compile_options(xgboost-r PRIVATE -Wall -Wextra)
endif() endif()
if(MSVC)
# https://github.com/microsoft/LightGBM/pull/6061
# MSVC doesn't work with anonymous types in structs. (R complex)
#
# syntax error: missing ';' before identifier 'private_data_c'
#
target_compile_definitions(xgboost-r PRIVATE -DR_LEGACY_RCOMPLEX)
endif()
target_compile_definitions( target_compile_definitions(
xgboost-r PUBLIC xgboost-r PUBLIC
-DXGBOOST_STRICT_R_MODE=1 -DXGBOOST_STRICT_R_MODE=1

View File

@ -58,12 +58,13 @@ Suggests:
float, float,
titanic titanic
Depends: Depends:
R (>= 3.3.0) R (>= 4.3.0)
Imports: Imports:
Matrix (>= 1.1-0), Matrix (>= 1.1-0),
methods, methods,
data.table (>= 1.9.6), data.table (>= 1.9.6),
jsonlite (>= 1.0), jsonlite (>= 1.0)
RoxygenNote: 7.2.3 Roxygen: list(markdown = TRUE)
RoxygenNote: 7.3.0
Encoding: UTF-8 Encoding: UTF-8
SystemRequirements: GNU make, C++17 SystemRequirements: GNU make, C++17

View File

@ -1,17 +1,22 @@
# Generated by roxygen2: do not edit by hand # Generated by roxygen2: do not edit by hand
S3method("[",xgb.Booster)
S3method("[",xgb.DMatrix) S3method("[",xgb.DMatrix)
S3method("dimnames<-",xgb.DMatrix) S3method("dimnames<-",xgb.DMatrix)
S3method(coef,xgb.Booster)
S3method(dim,xgb.DMatrix) S3method(dim,xgb.DMatrix)
S3method(dimnames,xgb.DMatrix) S3method(dimnames,xgb.DMatrix)
S3method(getinfo,xgb.Booster)
S3method(getinfo,xgb.DMatrix) S3method(getinfo,xgb.DMatrix)
S3method(length,xgb.Booster)
S3method(predict,xgb.Booster) S3method(predict,xgb.Booster)
S3method(predict,xgb.Booster.handle)
S3method(print,xgb.Booster) S3method(print,xgb.Booster)
S3method(print,xgb.DMatrix) S3method(print,xgb.DMatrix)
S3method(print,xgb.cv.synchronous) S3method(print,xgb.cv.synchronous)
S3method(setinfo,xgb.Booster)
S3method(setinfo,xgb.DMatrix) S3method(setinfo,xgb.DMatrix)
S3method(slice,xgb.DMatrix) S3method(slice,xgb.DMatrix)
S3method(variable.names,xgb.Booster)
export("xgb.attr<-") export("xgb.attr<-")
export("xgb.attributes<-") export("xgb.attributes<-")
export("xgb.config<-") export("xgb.config<-")
@ -26,21 +31,27 @@ export(cb.save.model)
export(getinfo) export(getinfo)
export(setinfo) export(setinfo)
export(slice) export(slice)
export(xgb.Booster.complete)
export(xgb.DMatrix) export(xgb.DMatrix)
export(xgb.DMatrix.hasinfo)
export(xgb.DMatrix.save) export(xgb.DMatrix.save)
export(xgb.attr) export(xgb.attr)
export(xgb.attributes) export(xgb.attributes)
export(xgb.config) export(xgb.config)
export(xgb.copy.Booster)
export(xgb.create.features) export(xgb.create.features)
export(xgb.cv) export(xgb.cv)
export(xgb.dump) export(xgb.dump)
export(xgb.gblinear.history) export(xgb.gblinear.history)
export(xgb.get.DMatrix.data)
export(xgb.get.DMatrix.num.non.missing)
export(xgb.get.DMatrix.qcut)
export(xgb.get.config) export(xgb.get.config)
export(xgb.get.num.boosted.rounds)
export(xgb.ggplot.deepness) export(xgb.ggplot.deepness)
export(xgb.ggplot.importance) export(xgb.ggplot.importance)
export(xgb.ggplot.shap.summary) export(xgb.ggplot.shap.summary)
export(xgb.importance) export(xgb.importance)
export(xgb.is.same.Booster)
export(xgb.load) export(xgb.load)
export(xgb.load.raw) export(xgb.load.raw)
export(xgb.model.dt.tree) export(xgb.model.dt.tree)
@ -52,13 +63,13 @@ export(xgb.plot.shap.summary)
export(xgb.plot.tree) export(xgb.plot.tree)
export(xgb.save) export(xgb.save)
export(xgb.save.raw) export(xgb.save.raw)
export(xgb.serialize)
export(xgb.set.config) export(xgb.set.config)
export(xgb.slice.Booster)
export(xgb.train) export(xgb.train)
export(xgb.unserialize)
export(xgboost) export(xgboost)
import(methods) import(methods)
importClassesFrom(Matrix,dgCMatrix) importClassesFrom(Matrix,dgCMatrix)
importClassesFrom(Matrix,dgRMatrix)
importClassesFrom(Matrix,dgeMatrix) importClassesFrom(Matrix,dgeMatrix)
importFrom(Matrix,colSums) importFrom(Matrix,colSums)
importFrom(Matrix,sparse.model.matrix) importFrom(Matrix,sparse.model.matrix)
@ -82,8 +93,11 @@ importFrom(graphics,points)
importFrom(graphics,title) importFrom(graphics,title)
importFrom(jsonlite,fromJSON) importFrom(jsonlite,fromJSON)
importFrom(jsonlite,toJSON) importFrom(jsonlite,toJSON)
importFrom(methods,new)
importFrom(stats,coef)
importFrom(stats,median) importFrom(stats,median)
importFrom(stats,predict) importFrom(stats,predict)
importFrom(stats,variable.names)
importFrom(utils,head) importFrom(utils,head)
importFrom(utils,object.size) importFrom(utils,object.size)
importFrom(utils,str) importFrom(utils,str)

View File

@ -228,7 +228,7 @@ cb.reset.parameters <- function(new_params) {
}) })
if (!is.null(env$bst)) { if (!is.null(env$bst)) {
xgb.parameters(env$bst$handle) <- pars xgb.parameters(env$bst) <- pars
} else { } else {
for (fd in env$bst_folds) for (fd in env$bst_folds)
xgb.parameters(fd$bst) <- pars xgb.parameters(fd$bst) <- pars
@ -280,7 +280,6 @@ cb.reset.parameters <- function(new_params) {
#' \code{iteration}, #' \code{iteration},
#' \code{begin_iteration}, #' \code{begin_iteration},
#' \code{end_iteration}, #' \code{end_iteration},
#' \code{num_parallel_tree}.
#' #'
#' @seealso #' @seealso
#' \code{\link{callbacks}}, #' \code{\link{callbacks}},
@ -291,7 +290,6 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
metric_name = NULL, verbose = TRUE) { metric_name = NULL, verbose = TRUE) {
# state variables # state variables
best_iteration <- -1 best_iteration <- -1
best_ntreelimit <- -1
best_score <- Inf best_score <- Inf
best_msg <- NULL best_msg <- NULL
metric_idx <- 1 metric_idx <- 1
@ -333,13 +331,13 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
if (!is.null(env$bst)) { if (!is.null(env$bst)) {
if (!inherits(env$bst, 'xgb.Booster')) if (!inherits(env$bst, 'xgb.Booster'))
stop("'bst' in the parent frame must be an 'xgb.Booster'") stop("'bst' in the parent frame must be an 'xgb.Booster'")
if (!is.null(best_score <- xgb.attr(env$bst$handle, 'best_score'))) { if (!is.null(best_score <- xgb.attr(env$bst, 'best_score'))) {
best_score <<- as.numeric(best_score) best_score <<- as.numeric(best_score)
best_iteration <<- as.numeric(xgb.attr(env$bst$handle, 'best_iteration')) + 1 best_iteration <<- as.numeric(xgb.attr(env$bst, 'best_iteration')) + 1
best_msg <<- as.numeric(xgb.attr(env$bst$handle, 'best_msg')) best_msg <<- as.numeric(xgb.attr(env$bst, 'best_msg'))
} else { } else {
xgb.attributes(env$bst$handle) <- list(best_iteration = best_iteration - 1, xgb.attributes(env$bst) <- list(best_iteration = best_iteration - 1,
best_score = best_score) best_score = best_score)
} }
} else if (is.null(env$bst_folds) || is.null(env$basket)) { } else if (is.null(env$bst_folds) || is.null(env$basket)) {
stop("Parent frame has neither 'bst' nor ('bst_folds' and 'basket')") stop("Parent frame has neither 'bst' nor ('bst_folds' and 'basket')")
@ -348,7 +346,7 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
finalizer <- function(env) { finalizer <- function(env) {
if (!is.null(env$bst)) { if (!is.null(env$bst)) {
attr_best_score <- as.numeric(xgb.attr(env$bst$handle, 'best_score')) attr_best_score <- as.numeric(xgb.attr(env$bst, 'best_score'))
if (best_score != attr_best_score) { if (best_score != attr_best_score) {
# If the difference is too big, throw an error # If the difference is too big, throw an error
if (abs(best_score - attr_best_score) >= 1e-14) { if (abs(best_score - attr_best_score) >= 1e-14) {
@ -358,12 +356,10 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
# If the difference is due to floating-point truncation, update best_score # If the difference is due to floating-point truncation, update best_score
best_score <- attr_best_score best_score <- attr_best_score
} }
env$bst$best_iteration <- best_iteration xgb.attr(env$bst, "best_iteration") <- best_iteration - 1
env$bst$best_ntreelimit <- best_ntreelimit xgb.attr(env$bst, "best_score") <- best_score
env$bst$best_score <- best_score
} else { } else {
env$basket$best_iteration <- best_iteration env$basket$best_iteration <- best_iteration
env$basket$best_ntreelimit <- best_ntreelimit
} }
} }
@ -385,14 +381,13 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
) )
best_score <<- score best_score <<- score
best_iteration <<- i best_iteration <<- i
best_ntreelimit <<- best_iteration * env$num_parallel_tree
# save the property to attributes, so they will occur in checkpoint # save the property to attributes, so they will occur in checkpoint
if (!is.null(env$bst)) { if (!is.null(env$bst)) {
xgb.attributes(env$bst) <- list( xgb.attributes(env$bst) <- list(
best_iteration = best_iteration - 1, # convert to 0-based index best_iteration = best_iteration - 1, # convert to 0-based index
best_score = best_score, best_score = best_score,
best_msg = best_msg, best_msg = best_msg
best_ntreelimit = best_ntreelimit) )
} }
} else if (i - best_iteration >= stopping_rounds) { } else if (i - best_iteration >= stopping_rounds) {
env$stop_condition <- TRUE env$stop_condition <- TRUE
@ -412,11 +407,15 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
#' @param save_period save the model to disk after every #' @param save_period save the model to disk after every
#' \code{save_period} iterations; 0 means save the model at the end. #' \code{save_period} iterations; 0 means save the model at the end.
#' @param save_name the name or path for the saved model file. #' @param save_name the name or path for the saved model file.
#'
#' Note that the format of the model being saved is determined by the file
#' extension specified here (see \link{xgb.save} for details about how it works).
#'
#' It can contain a \code{\link[base]{sprintf}} formatting specifier #' It can contain a \code{\link[base]{sprintf}} formatting specifier
#' to include the integer iteration number in the file name. #' to include the integer iteration number in the file name.
#' E.g., with \code{save_name} = 'xgboost_%04d.model', #' E.g., with \code{save_name} = 'xgboost_%04d.ubj',
#' the file saved at iteration 50 would be named "xgboost_0050.model". #' the file saved at iteration 50 would be named "xgboost_0050.ubj".
#' #' @seealso \link{xgb.save}
#' @details #' @details
#' This callback function allows to save an xgb-model file, either periodically after each \code{save_period}'s or at the end. #' This callback function allows to save an xgb-model file, either periodically after each \code{save_period}'s or at the end.
#' #'
@ -430,7 +429,7 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
#' \code{\link{callbacks}} #' \code{\link{callbacks}}
#' #'
#' @export #' @export
cb.save.model <- function(save_period = 0, save_name = "xgboost.model") { cb.save.model <- function(save_period = 0, save_name = "xgboost.ubj") {
if (save_period < 0) if (save_period < 0)
stop("'save_period' cannot be negative") stop("'save_period' cannot be negative")
@ -440,8 +439,13 @@ cb.save.model <- function(save_period = 0, save_name = "xgboost.model") {
stop("'save_model' callback requires the 'bst' booster object in its calling frame") stop("'save_model' callback requires the 'bst' booster object in its calling frame")
if ((save_period > 0 && (env$iteration - env$begin_iteration) %% save_period == 0) || if ((save_period > 0 && (env$iteration - env$begin_iteration) %% save_period == 0) ||
(save_period == 0 && env$iteration == env$end_iteration)) (save_period == 0 && env$iteration == env$end_iteration)) {
xgb.save(env$bst, sprintf(save_name, env$iteration)) # Note: this throws a warning if the name doesn't have anything to format through 'sprintf'
suppressWarnings({
save_name <- sprintf(save_name, env$iteration)
})
xgb.save(env$bst, save_name)
}
} }
attr(callback, 'call') <- match.call() attr(callback, 'call') <- match.call()
attr(callback, 'name') <- 'cb.save.model' attr(callback, 'name') <- 'cb.save.model'
@ -466,8 +470,6 @@ cb.save.model <- function(save_period = 0, save_name = "xgboost.model") {
#' \code{data}, #' \code{data},
#' \code{end_iteration}, #' \code{end_iteration},
#' \code{params}, #' \code{params},
#' \code{num_parallel_tree},
#' \code{num_class}.
#' #'
#' @return #' @return
#' Predictions are returned inside of the \code{pred} element, which is either a vector or a matrix, #' Predictions are returned inside of the \code{pred} element, which is either a vector or a matrix,
@ -490,19 +492,21 @@ cb.cv.predict <- function(save_models = FALSE) {
stop("'cb.cv.predict' callback requires 'basket' and 'bst_folds' lists in its calling frame") stop("'cb.cv.predict' callback requires 'basket' and 'bst_folds' lists in its calling frame")
N <- nrow(env$data) N <- nrow(env$data)
pred <- pred <- NULL
if (env$num_class > 1) {
matrix(NA_real_, N, env$num_class)
} else {
rep(NA_real_, N)
}
iterationrange <- c(1, NVL(env$basket$best_iteration, env$end_iteration) + 1) iterationrange <- c(1, NVL(env$basket$best_iteration, env$end_iteration))
if (NVL(env$params[['booster']], '') == 'gblinear') { if (NVL(env$params[['booster']], '') == 'gblinear') {
iterationrange <- c(1, 1) # must be 0 for gblinear iterationrange <- "all"
} }
for (fd in env$bst_folds) { for (fd in env$bst_folds) {
pr <- predict(fd$bst, fd$watchlist[[2]], iterationrange = iterationrange, reshape = TRUE) pr <- predict(fd$bst, fd$watchlist[[2]], iterationrange = iterationrange, reshape = TRUE)
if (is.null(pred)) {
if (NCOL(pr) > 1L) {
pred <- matrix(NA_real_, N, ncol(pr))
} else {
pred <- matrix(NA_real_, N)
}
}
if (is.matrix(pred)) { if (is.matrix(pred)) {
pred[fd$index, ] <- pr pred[fd$index, ] <- pr
} else { } else {
@ -512,8 +516,7 @@ cb.cv.predict <- function(save_models = FALSE) {
env$basket$pred <- pred env$basket$pred <- pred
if (save_models) { if (save_models) {
env$basket$models <- lapply(env$bst_folds, function(fd) { env$basket$models <- lapply(env$bst_folds, function(fd) {
xgb.attr(fd$bst, 'niter') <- env$end_iteration - 1 return(fd$bst)
xgb.Booster.complete(xgb.handleToBooster(handle = fd$bst, raw = NULL), saveraw = TRUE)
}) })
} }
} }
@ -665,7 +668,7 @@ cb.gblinear.history <- function(sparse = FALSE) {
} else { # xgb.cv: } else { # xgb.cv:
cf <- vector("list", length(env$bst_folds)) cf <- vector("list", length(env$bst_folds))
for (i in seq_along(env$bst_folds)) { for (i in seq_along(env$bst_folds)) {
dmp <- xgb.dump(xgb.handleToBooster(handle = env$bst_folds[[i]]$bst, raw = NULL)) dmp <- xgb.dump(env$bst_folds[[i]]$bst)
cf[[i]] <- as.numeric(grep('(booster|bias|weigh)', dmp, invert = TRUE, value = TRUE)) cf[[i]] <- as.numeric(grep('(booster|bias|weigh)', dmp, invert = TRUE, value = TRUE))
if (sparse) cf[[i]] <- as(cf[[i]], "sparseVector") if (sparse) cf[[i]] <- as(cf[[i]], "sparseVector")
} }
@ -685,14 +688,19 @@ cb.gblinear.history <- function(sparse = FALSE) {
callback callback
} }
#' Extract gblinear coefficients history. #' @title Extract gblinear coefficients history.
#' #' @description A helper function to extract the matrix of linear coefficients' history
#' A helper function to extract the matrix of linear coefficients' history
#' from a gblinear model created while using the \code{cb.gblinear.history()} #' from a gblinear model created while using the \code{cb.gblinear.history()}
#' callback. #' callback.
#' @details Note that this is an R-specific function that relies on R attributes that
#' are not saved when using xgboost's own serialization functions like \link{xgb.load}
#' or \link{xgb.load.raw}.
#' #'
#' In order for a serialized model to be accepted by tgis function, one must use R
#' serializers such as \link{saveRDS}.
#' @param model either an \code{xgb.Booster} or a result of \code{xgb.cv()}, trained #' @param model either an \code{xgb.Booster} or a result of \code{xgb.cv()}, trained
#' using the \code{cb.gblinear.history()} callback. #' using the \code{cb.gblinear.history()} callback, but \bold{not} a booster
#' loaded from \link{xgb.load} or \link{xgb.load.raw}.
#' @param class_index zero-based class index to extract the coefficients for only that #' @param class_index zero-based class index to extract the coefficients for only that
#' specific class in a multinomial multiclass model. When it is NULL, all the #' specific class in a multinomial multiclass model. When it is NULL, all the
#' coefficients are returned. Has no effect in non-multiclass models. #' coefficients are returned. Has no effect in non-multiclass models.
@ -713,20 +721,18 @@ xgb.gblinear.history <- function(model, class_index = NULL) {
stop("model must be an object of either xgb.Booster or xgb.cv.synchronous class") stop("model must be an object of either xgb.Booster or xgb.cv.synchronous class")
is_cv <- inherits(model, "xgb.cv.synchronous") is_cv <- inherits(model, "xgb.cv.synchronous")
if (is.null(model[["callbacks"]]) || is.null(model$callbacks[["cb.gblinear.history"]])) if (is_cv) {
callbacks <- model$callbacks
} else {
callbacks <- attributes(model)$callbacks
}
if (is.null(callbacks) || is.null(callbacks$cb.gblinear.history))
stop("model must be trained while using the cb.gblinear.history() callback") stop("model must be trained while using the cb.gblinear.history() callback")
if (!is_cv) { if (!is_cv) {
# extract num_class & num_feat from the internal model num_class <- xgb.num_class(model)
dmp <- xgb.dump(model) num_feat <- xgb.num_feature(model)
if (length(dmp) < 2 || dmp[2] != "bias:")
stop("It does not appear to be a gblinear model")
dmp <- dmp[-c(1, 2)]
n <- which(dmp == 'weight:')
if (length(n) != 1)
stop("It does not appear to be a gblinear model")
num_class <- n - 1
num_feat <- (length(dmp) - 4) / num_class
} else { } else {
# in case of CV, the object is expected to have this info # in case of CV, the object is expected to have this info
if (model$params$booster != "gblinear") if (model$params$booster != "gblinear")
@ -742,7 +748,7 @@ xgb.gblinear.history <- function(model, class_index = NULL) {
(class_index[1] < 0 || class_index[1] >= num_class)) (class_index[1] < 0 || class_index[1] >= num_class))
stop("class_index has to be within [0,", num_class - 1, "]") stop("class_index has to be within [0,", num_class - 1, "]")
coef_path <- environment(model$callbacks$cb.gblinear.history)[["coefs"]] coef_path <- environment(callbacks$cb.gblinear.history)[["coefs"]]
if (!is.null(class_index) && num_class > 1) { if (!is.null(class_index) && num_class > 1) {
coef_path <- if (is.list(coef_path)) { coef_path <- if (is.list(coef_path)) {
lapply(coef_path, lapply(coef_path,
@ -770,7 +776,8 @@ xgb.gblinear.history <- function(model, class_index = NULL) {
if (!is.null(eval_err)) { if (!is.null(eval_err)) {
if (length(eval_res) != length(eval_err)) if (length(eval_res) != length(eval_err))
stop('eval_res & eval_err lengths mismatch') stop('eval_res & eval_err lengths mismatch')
res <- paste0(sprintf("%s:%f+%f", enames, eval_res, eval_err), collapse = '\t') # Note: UTF-8 code for plus/minus sign is U+00B1
res <- paste0(sprintf("%s:%f\U00B1%f", enames, eval_res, eval_err), collapse = '\t')
} else { } else {
res <- paste0(sprintf("%s:%f", enames, eval_res), collapse = '\t') res <- paste0(sprintf("%s:%f", enames, eval_res), collapse = '\t')
} }

View File

@ -93,6 +93,14 @@ check.booster.params <- function(params, ...) {
interaction_constraints <- sapply(params[['interaction_constraints']], function(x) paste0('[', paste(x, collapse = ','), ']')) interaction_constraints <- sapply(params[['interaction_constraints']], function(x) paste0('[', paste(x, collapse = ','), ']'))
params[['interaction_constraints']] <- paste0('[', paste(interaction_constraints, collapse = ','), ']') params[['interaction_constraints']] <- paste0('[', paste(interaction_constraints, collapse = ','), ']')
} }
# for evaluation metrics, should generate multiple entries per metric
if (NROW(params[['eval_metric']]) > 1) {
eval_metrics <- as.list(params[["eval_metric"]])
names(eval_metrics) <- rep("eval_metric", length(eval_metrics))
params_without_ev_metrics <- within(params, rm("eval_metric"))
params <- c(params_without_ev_metrics, eval_metrics)
}
return(params) return(params)
} }
@ -140,19 +148,17 @@ check.custom.eval <- function(env = parent.frame()) {
# Update a booster handle for an iteration with dtrain data # Update a booster handle for an iteration with dtrain data
xgb.iter.update <- function(booster_handle, dtrain, iter, obj) { xgb.iter.update <- function(bst, dtrain, iter, obj) {
if (!identical(class(booster_handle), "xgb.Booster.handle")) {
stop("booster_handle must be of xgb.Booster.handle class")
}
if (!inherits(dtrain, "xgb.DMatrix")) { if (!inherits(dtrain, "xgb.DMatrix")) {
stop("dtrain must be of xgb.DMatrix class") stop("dtrain must be of xgb.DMatrix class")
} }
handle <- xgb.get.handle(bst)
if (is.null(obj)) { if (is.null(obj)) {
.Call(XGBoosterUpdateOneIter_R, booster_handle, as.integer(iter), dtrain) .Call(XGBoosterUpdateOneIter_R, handle, as.integer(iter), dtrain)
} else { } else {
pred <- predict( pred <- predict(
booster_handle, bst,
dtrain, dtrain,
outputmargin = TRUE, outputmargin = TRUE,
training = TRUE, training = TRUE,
@ -160,23 +166,24 @@ xgb.iter.update <- function(booster_handle, dtrain, iter, obj) {
) )
gpair <- obj(pred, dtrain) gpair <- obj(pred, dtrain)
n_samples <- dim(dtrain)[1] n_samples <- dim(dtrain)[1]
grad <- gpair$grad
hess <- gpair$hess
msg <- paste( if ((is.matrix(grad) && dim(grad)[1] != n_samples) ||
"Since 2.1.0, the shape of the gradient and hessian is required to be ", (is.vector(grad) && length(grad) != n_samples) ||
"(n_samples, n_targets) or (n_samples, n_classes).", (is.vector(grad) != is.vector(hess))) {
sep = "" warning(paste(
) "Since 2.1.0, the shape of the gradient and hessian is required to be ",
if (is.matrix(gpair$grad) && dim(gpair$grad)[1] != n_samples) { "(n_samples, n_targets) or (n_samples, n_classes). Will reshape assuming ",
warning(msg) "column-major order.",
} sep = ""
if (is.numeric(gpair$grad) && length(gpair$grad) != n_samples) { ))
warning(msg) grad <- matrix(grad, nrow = n_samples)
hess <- matrix(hess, nrow = n_samples)
} }
gpair$grad <- matrix(gpair$grad, nrow = n_samples)
gpair$hess <- matrix(gpair$hess, nrow = n_samples)
.Call( .Call(
XGBoosterBoostOneIter_R, booster_handle, dtrain, iter, gpair$grad, gpair$hess XGBoosterTrainOneIter_R, handle, dtrain, iter, grad, hess
) )
} }
return(TRUE) return(TRUE)
@ -186,23 +193,22 @@ xgb.iter.update <- function(booster_handle, dtrain, iter, obj) {
# Evaluate one iteration. # Evaluate one iteration.
# Returns a named vector of evaluation metrics # Returns a named vector of evaluation metrics
# with the names in a 'datasetname-metricname' format. # with the names in a 'datasetname-metricname' format.
xgb.iter.eval <- function(booster_handle, watchlist, iter, feval) { xgb.iter.eval <- function(bst, watchlist, iter, feval) {
if (!identical(class(booster_handle), "xgb.Booster.handle")) handle <- xgb.get.handle(bst)
stop("class of booster_handle must be xgb.Booster.handle")
if (length(watchlist) == 0) if (length(watchlist) == 0)
return(NULL) return(NULL)
evnames <- names(watchlist) evnames <- names(watchlist)
if (is.null(feval)) { if (is.null(feval)) {
msg <- .Call(XGBoosterEvalOneIter_R, booster_handle, as.integer(iter), watchlist, as.list(evnames)) msg <- .Call(XGBoosterEvalOneIter_R, handle, as.integer(iter), watchlist, as.list(evnames))
mat <- matrix(strsplit(msg, '\\s+|:')[[1]][-1], nrow = 2) mat <- matrix(strsplit(msg, '\\s+|:')[[1]][-1], nrow = 2)
res <- structure(as.numeric(mat[2, ]), names = mat[1, ]) res <- structure(as.numeric(mat[2, ]), names = mat[1, ])
} else { } else {
res <- sapply(seq_along(watchlist), function(j) { res <- sapply(seq_along(watchlist), function(j) {
w <- watchlist[[j]] w <- watchlist[[j]]
## predict using all trees ## predict using all trees
preds <- predict(booster_handle, w, outputmargin = TRUE, iterationrange = c(1, 1)) preds <- predict(bst, w, outputmargin = TRUE, iterationrange = "all")
eval_res <- feval(preds, w) eval_res <- feval(preds, w)
out <- eval_res$value out <- eval_res$value
names(out) <- paste0(evnames[j], "-", eval_res$metric) names(out) <- paste0(evnames[j], "-", eval_res$metric)
@ -343,16 +349,45 @@ xgb.createFolds <- function(y, k) {
#' @name xgboost-deprecated #' @name xgboost-deprecated
NULL NULL
#' Do not use \code{\link[base]{saveRDS}} or \code{\link[base]{save}} for long-term archival of #' @title Model Serialization and Compatibility
#' models. Instead, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}}. #' @description
#' #'
#' It is a common practice to use the built-in \code{\link[base]{saveRDS}} function (or #' When it comes to serializing XGBoost models, it's possible to use R serializers such as
#' \code{\link[base]{save}}) to persist R objects to the disk. While it is possible to persist #' \link{save} or \link{saveRDS} to serialize an XGBoost R model, but XGBoost also provides
#' \code{xgb.Booster} objects using \code{\link[base]{saveRDS}}, it is not advisable to do so if #' its own serializers with better compatibility guarantees, which allow loading
#' the model is to be accessed in the future. If you train a model with the current version of #' said models in other language bindings of XGBoost.
#' XGBoost and persist it with \code{\link[base]{saveRDS}}, the model is not guaranteed to be #'
#' accessible in later releases of XGBoost. To ensure that your model can be accessed in future #' Note that an `xgb.Booster` object, outside of its core components, might also keep:\itemize{
#' releases of XGBoost, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}} instead. #' \item Additional model configuration (accessible through \link{xgb.config}),
#' which includes model fitting parameters like `max_depth` and runtime parameters like `nthread`.
#' These are not necessarily useful for prediction/importance/plotting.
#' \item Additional R-specific attributes - e.g. results of callbacks, such as evaluation logs,
#' which are kept as a `data.table` object, accessible through `attributes(model)$evaluation_log`
#' if present.
#' }
#'
#' The first one (configurations) does not have the same compatibility guarantees as
#' the model itself, including attributes that are set and accessed through \link{xgb.attributes} - that is, such configuration
#' might be lost after loading the booster in a different XGBoost version, regardless of the
#' serializer that was used. These are saved when using \link{saveRDS}, but will be discarded
#' if loaded into an incompatible XGBoost version. They are not saved when using XGBoost's
#' serializers from its public interface including \link{xgb.save} and \link{xgb.save.raw}.
#'
#' The second ones (R attributes) are not part of the standard XGBoost model structure, and thus are
#' not saved when using XGBoost's own serializers. These attributes are only used for informational
#' purposes, such as keeping track of evaluation metrics as the model was fit, or saving the R
#' call that produced the model, but are otherwise not used for prediction / importance / plotting / etc.
#' These R attributes are only preserved when using R's serializers.
#'
#' Note that XGBoost models in R starting from version `2.1.0` and onwards, and XGBoost models
#' before version `2.1.0`; have a very different R object structure and are incompatible with
#' each other. Hence, models that were saved with R serializers live `saveRDS` or `save` before
#' version `2.1.0` will not work with latter `xgboost` versions and vice versa. Be aware that
#' the structure of R model objects could in theory change again in the future, so XGBoost's serializers
#' should be preferred for long-term storage.
#'
#' Furthermore, note that using the package `qs` for serialization will require version 0.26 or
#' higher of said package, and will have the same compatibility restrictions as R serializers.
#' #'
#' @details #' @details
#' Use \code{\link{xgb.save}} to save the XGBoost model as a stand-alone file. You may opt into #' Use \code{\link{xgb.save}} to save the XGBoost model as a stand-alone file. You may opt into
@ -365,26 +400,29 @@ NULL
#' The \code{\link{xgb.save.raw}} function is useful if you'd like to persist the XGBoost model #' The \code{\link{xgb.save.raw}} function is useful if you'd like to persist the XGBoost model
#' as part of another R object. #' as part of another R object.
#' #'
#' Note: Do not use \code{\link{xgb.serialize}} to store models long-term. It persists not only the #' Use \link{saveRDS} if you require the R-specific attributes that a booster might have, such
#' model but also internal configurations and parameters, and its format is not stable across #' as evaluation logs, but note that future compatibility of such objects is outside XGBoost's
#' multiple XGBoost versions. Use \code{\link{xgb.serialize}} only for checkpointing. #' control as it relies on R's serialization format (see e.g. the details section in
#' \link{serialize} and \link{save} from base R).
#' #'
#' For more details and explanation about model persistence and archival, consult the page #' For more details and explanation about model persistence and archival, consult the page
#' \url{https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html}. #' \url{https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html}.
#' #'
#' @examples #' @examples
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package='xgboost')
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, #' bst <- xgb.train(data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label),
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") #' max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
#' objective = "binary:logistic")
#' #'
#' # Save as a stand-alone file; load it with xgb.load() #' # Save as a stand-alone file; load it with xgb.load()
#' xgb.save(bst, 'xgb.model') #' fname <- file.path(tempdir(), "xgb_model.ubj")
#' bst2 <- xgb.load('xgb.model') #' xgb.save(bst, fname)
#' bst2 <- xgb.load(fname)
#' #'
#' # Save as a stand-alone file (JSON); load it with xgb.load() #' # Save as a stand-alone file (JSON); load it with xgb.load()
#' xgb.save(bst, 'xgb.model.json') #' fname <- file.path(tempdir(), "xgb_model.json")
#' bst2 <- xgb.load('xgb.model.json') #' xgb.save(bst, fname)
#' if (file.exists('xgb.model.json')) file.remove('xgb.model.json') #' bst2 <- xgb.load(fname)
#' #'
#' # Save as a raw byte vector; load it with xgb.load.raw() #' # Save as a raw byte vector; load it with xgb.load.raw()
#' xgb_bytes <- xgb.save.raw(bst) #' xgb_bytes <- xgb.save.raw(bst)
@ -395,12 +433,12 @@ NULL
#' # Persist the R object. Here, saveRDS() is okay, since it doesn't persist #' # Persist the R object. Here, saveRDS() is okay, since it doesn't persist
#' # xgb.Booster directly. What's being persisted is the future-proof byte representation #' # xgb.Booster directly. What's being persisted is the future-proof byte representation
#' # as given by xgb.save.raw(). #' # as given by xgb.save.raw().
#' saveRDS(obj, 'my_object.rds') #' fname <- file.path(tempdir(), "my_object.Rds")
#' saveRDS(obj, fname)
#' # Read back the R object #' # Read back the R object
#' obj2 <- readRDS('my_object.rds') #' obj2 <- readRDS(fname)
#' # Re-construct xgb.Booster object from the bytes #' # Re-construct xgb.Booster object from the bytes
#' bst2 <- xgb.load.raw(obj2$xgb_model_bytes) #' bst2 <- xgb.load.raw(obj2$xgb_model_bytes)
#' if (file.exists('my_object.rds')) file.remove('my_object.rds')
#' #'
#' @name a-compatibility-note-for-saveRDS-save #' @name a-compatibility-note-for-saveRDS-save
NULL NULL

File diff suppressed because it is too large Load Diff

View File

@ -5,16 +5,48 @@
#' \code{\link{xgb.DMatrix.save}}). #' \code{\link{xgb.DMatrix.save}}).
#' #'
#' @param data a \code{matrix} object (either numeric or integer), a \code{dgCMatrix} object, #' @param data a \code{matrix} object (either numeric or integer), a \code{dgCMatrix} object,
#' a \code{dgRMatrix} object (only when making predictions from a fitted model), #' a \code{dgRMatrix} object,
#' a \code{dsparseVector} object (only when making predictions from a fitted model, will be #' a \code{dsparseVector} object (only when making predictions from a fitted model, will be
#' interpreted as a row vector), or a character string representing a filename. #' interpreted as a row vector), or a character string representing a filename.
#' @param info a named list of additional information to store in the \code{xgb.DMatrix} object. #' @param label Label of the training data.
#' See \code{\link{setinfo}} for the specific allowed kinds of #' @param weight Weight for each instance.
#'
#' Note that, for ranking task, weights are per-group. In ranking task, one weight
#' is assigned to each group (not each data point). This is because we
#' only care about the relative ordering of data points within each group,
#' so it doesn't make sense to assign weights to individual data points.
#' @param base_margin Base margin used for boosting from existing model.
#'
#' In the case of multi-output models, one can also pass multi-dimensional base_margin.
#' @param missing a float value to represents missing values in data (used only when input is a dense matrix). #' @param missing a float value to represents missing values in data (used only when input is a dense matrix).
#' It is useful when a 0 or some other extreme value represents missing values in data. #' It is useful when a 0 or some other extreme value represents missing values in data.
#' @param silent whether to suppress printing an informational message after loading from a file. #' @param silent whether to suppress printing an informational message after loading from a file.
#' @param feature_names Set names for features. Overrides column names in data
#' frame and matrix.
#' @param nthread Number of threads used for creating DMatrix. #' @param nthread Number of threads used for creating DMatrix.
#' @param ... the \code{info} data could be passed directly as parameters, without creating an \code{info} list. #' @param group Group size for all ranking group.
#' @param qid Query ID for data samples, used for ranking.
#' @param label_lower_bound Lower bound for survival training.
#' @param label_upper_bound Upper bound for survival training.
#' @param feature_weights Set feature weights for column sampling.
#' @param enable_categorical Experimental support of specializing for categorical features.
#'
#' If passing 'TRUE' and 'data' is a data frame,
#' columns of categorical types will automatically
#' be set to be of categorical type (feature_type='c') in the resulting DMatrix.
#'
#' If passing 'FALSE' and 'data' is a data frame with categorical columns,
#' it will result in an error being thrown.
#'
#' If 'data' is not a data frame, this argument is ignored.
#'
#' JSON/UBJSON serialization format is required for this.
#'
#' @details
#' Note that DMatrix objects are not serializable through R functions such as \code{saveRDS} or \code{save}.
#' If a DMatrix gets serialized and then de-serialized (for example, when saving data in an R session or caching
#' chunks in an Rmd file), the resulting object will not be usable anymore and will need to be reconstructed
#' from the original source of data.
#' #'
#' @examples #' @examples
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package='xgboost')
@ -24,21 +56,43 @@
#' dtrain <- with( #' dtrain <- with(
#' agaricus.train, xgb.DMatrix(data, label = label, nthread = nthread) #' agaricus.train, xgb.DMatrix(data, label = label, nthread = nthread)
#' ) #' )
#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data') #' fname <- file.path(tempdir(), "xgb.DMatrix.data")
#' dtrain <- xgb.DMatrix('xgb.DMatrix.data') #' xgb.DMatrix.save(dtrain, fname)
#' if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data') #' dtrain <- xgb.DMatrix(fname)
#' @export #' @export
xgb.DMatrix <- function(data, info = list(), missing = NA, silent = FALSE, nthread = NULL, ...) { xgb.DMatrix <- function(
cnames <- NULL data,
label = NULL,
weight = NULL,
base_margin = NULL,
missing = NA,
silent = FALSE,
feature_names = colnames(data),
nthread = NULL,
group = NULL,
qid = NULL,
label_lower_bound = NULL,
label_upper_bound = NULL,
feature_weights = NULL,
enable_categorical = FALSE
) {
if (!is.null(group) && !is.null(qid)) {
stop("Either one of 'group' or 'qid' should be NULL")
}
ctypes <- NULL
if (typeof(data) == "character") { if (typeof(data) == "character") {
if (length(data) > 1) if (length(data) > 1) {
stop("'data' has class 'character' and length ", length(data), stop(
".\n 'data' accepts either a numeric matrix or a single filename.") "'data' has class 'character' and length ", length(data),
".\n 'data' accepts either a numeric matrix or a single filename."
)
}
data <- path.expand(data) data <- path.expand(data)
handle <- .Call(XGDMatrixCreateFromFile_R, data, as.integer(silent)) handle <- .Call(XGDMatrixCreateFromFile_R, data, as.integer(silent))
} else if (is.matrix(data)) { } else if (is.matrix(data)) {
handle <- .Call(XGDMatrixCreateFromMat_R, data, missing, as.integer(NVL(nthread, -1))) handle <- .Call(
cnames <- colnames(data) XGDMatrixCreateFromMat_R, data, missing, as.integer(NVL(nthread, -1))
)
} else if (inherits(data, "dgCMatrix")) { } else if (inherits(data, "dgCMatrix")) {
handle <- .Call( handle <- .Call(
XGDMatrixCreateFromCSC_R, XGDMatrixCreateFromCSC_R,
@ -49,7 +103,6 @@ xgb.DMatrix <- function(data, info = list(), missing = NA, silent = FALSE, nthre
missing, missing,
as.integer(NVL(nthread, -1)) as.integer(NVL(nthread, -1))
) )
cnames <- colnames(data)
} else if (inherits(data, "dgRMatrix")) { } else if (inherits(data, "dgRMatrix")) {
handle <- .Call( handle <- .Call(
XGDMatrixCreateFromCSR_R, XGDMatrixCreateFromCSR_R,
@ -60,7 +113,6 @@ xgb.DMatrix <- function(data, info = list(), missing = NA, silent = FALSE, nthre
missing, missing,
as.integer(NVL(nthread, -1)) as.integer(NVL(nthread, -1))
) )
cnames <- colnames(data)
} else if (inherits(data, "dsparseVector")) { } else if (inherits(data, "dsparseVector")) {
indptr <- c(0L, as.integer(length(data@i))) indptr <- c(0L, as.integer(length(data@i)))
ind <- as.integer(data@i) - 1L ind <- as.integer(data@i) - 1L
@ -73,23 +125,112 @@ xgb.DMatrix <- function(data, info = list(), missing = NA, silent = FALSE, nthre
missing, missing,
as.integer(NVL(nthread, -1)) as.integer(NVL(nthread, -1))
) )
} else if (is.data.frame(data)) {
ctypes <- sapply(data, function(x) {
if (is.factor(x)) {
if (!enable_categorical) {
stop(
"When factor type is used, the parameter `enable_categorical`",
" must be set to TRUE."
)
}
"c"
} else if (is.integer(x)) {
"int"
} else if (is.logical(x)) {
"i"
} else {
if (!is.numeric(x)) {
stop("Invalid type in dataframe.")
}
"float"
}
})
## as.data.frame somehow converts integer/logical into real.
data <- as.data.frame(sapply(data, function(x) {
if (is.factor(x)) {
## XGBoost uses 0-based indexing.
as.numeric(x) - 1
} else {
x
}
}))
handle <- .Call(
XGDMatrixCreateFromDF_R, data, missing, as.integer(NVL(nthread, -1))
)
} else { } else {
stop("xgb.DMatrix does not support construction from ", typeof(data)) stop("xgb.DMatrix does not support construction from ", typeof(data))
} }
dmat <- handle dmat <- handle
attributes(dmat) <- list(class = "xgb.DMatrix") attributes(dmat) <- list(
if (!is.null(cnames)) { class = "xgb.DMatrix",
setinfo(dmat, "feature_name", cnames) fields = new.env()
)
if (!is.null(label)) {
setinfo(dmat, "label", label)
}
if (!is.null(weight)) {
setinfo(dmat, "weight", weight)
}
if (!is.null(base_margin)) {
setinfo(dmat, "base_margin", base_margin)
}
if (!is.null(feature_names)) {
setinfo(dmat, "feature_name", feature_names)
}
if (!is.null(group)) {
setinfo(dmat, "group", group)
}
if (!is.null(qid)) {
setinfo(dmat, "qid", qid)
}
if (!is.null(label_lower_bound)) {
setinfo(dmat, "label_lower_bound", label_lower_bound)
}
if (!is.null(label_upper_bound)) {
setinfo(dmat, "label_upper_bound", label_upper_bound)
}
if (!is.null(feature_weights)) {
setinfo(dmat, "feature_weights", feature_weights)
}
if (!is.null(ctypes)) {
setinfo(dmat, "feature_type", ctypes)
} }
info <- append(info, list(...))
for (i in seq_along(info)) {
p <- info[i]
setinfo(dmat, names(p), p[[1]])
}
return(dmat) return(dmat)
} }
#' @title Check whether DMatrix object has a field
#' @description Checks whether an xgb.DMatrix object has a given field assigned to
#' it, such as weights, labels, etc.
#' @param object The DMatrix object to check for the given \code{info} field.
#' @param info The field to check for presence or absence in \code{object}.
#' @seealso \link{xgb.DMatrix}, \link{getinfo.xgb.DMatrix}, \link{setinfo.xgb.DMatrix}
#' @examples
#' library(xgboost)
#' x <- matrix(1:10, nrow = 5)
#' dm <- xgb.DMatrix(x, nthread = 1)
#'
#' # 'dm' so far doesn't have any fields set
#' xgb.DMatrix.hasinfo(dm, "label")
#'
#' # Fields can be added after construction
#' setinfo(dm, "label", 1:5)
#' xgb.DMatrix.hasinfo(dm, "label")
#' @export
xgb.DMatrix.hasinfo <- function(object, info) {
if (!inherits(object, "xgb.DMatrix")) {
stop("Object is not an 'xgb.DMatrix'.")
}
if (.Call(XGCheckNullPtr_R, object)) {
warning("xgb.DMatrix object is invalid. Must be constructed again.")
return(FALSE)
}
return(NVL(attr(object, "fields")[[info]], FALSE))
}
# get dmatrix from data, label # get dmatrix from data, label
# internal helper method # internal helper method
@ -194,26 +335,38 @@ dimnames.xgb.DMatrix <- function(x) {
} }
#' Get information of an xgb.DMatrix object #' @title Get or set information of xgb.DMatrix and xgb.Booster objects
#' #' @param object Object of class \code{xgb.DMatrix} of `xgb.Booster`.
#' Get information of an xgb.DMatrix object
#' @param object Object of class \code{xgb.DMatrix}
#' @param name the name of the information field to get (see details) #' @param name the name of the information field to get (see details)
#' @param ... other parameters #' @return For `getinfo`, will return the requested field. For `setinfo`, will always return value `TRUE`
#' #' if it succeeds.
#' @details #' @details
#' The \code{name} field can be one of the following: #' The \code{name} field can be one of the following for `xgb.DMatrix`:
#' #'
#' \itemize{ #' \itemize{
#' \item \code{label}: label XGBoost learn from ; #' \item \code{label}
#' \item \code{weight}: to do a weight rescale ; #' \item \code{weight}
#' \item \code{base_margin}: base margin is the base prediction XGBoost will boost from ; #' \item \code{base_margin}
#' \item \code{nrow}: number of rows of the \code{xgb.DMatrix}. #' \item \code{label_lower_bound}
#' \item \code{label_upper_bound}
#' \item \code{group}
#' \item \code{feature_type}
#' \item \code{feature_name}
#' \item \code{nrow}
#' }
#' See the documentation for \link{xgb.DMatrix} for more information about these fields.
#' #'
#' For `xgb.Booster`, can be one of the following:
#' \itemize{
#' \item \code{feature_type}
#' \item \code{feature_name}
#' } #' }
#' #'
#' \code{group} can be setup by \code{setinfo} but can't be retrieved by \code{getinfo}. #' Note that, while 'qid' cannot be retrieved, it's possible to get the equivalent 'group'
#' for a DMatrix that had 'qid' assigned.
#' #'
#' \bold{Important}: when calling `setinfo`, the objects are modified in-place. See
#' \link{xgb.copy.Booster} for an idea of this in-place assignment works.
#' @examples #' @examples
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package='xgboost')
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2)) #' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
@ -225,49 +378,60 @@ dimnames.xgb.DMatrix <- function(x) {
#' stopifnot(all(labels2 == 1-labels)) #' stopifnot(all(labels2 == 1-labels))
#' @rdname getinfo #' @rdname getinfo
#' @export #' @export
getinfo <- function(object, ...) UseMethod("getinfo") getinfo <- function(object, name) UseMethod("getinfo")
#' @rdname getinfo #' @rdname getinfo
#' @export #' @export
getinfo.xgb.DMatrix <- function(object, name, ...) { getinfo.xgb.DMatrix <- function(object, name) {
allowed_int_fields <- 'group'
allowed_float_fields <- c(
'label', 'weight', 'base_margin',
'label_lower_bound', 'label_upper_bound'
)
allowed_str_fields <- c("feature_type", "feature_name")
allowed_fields <- c(allowed_float_fields, allowed_int_fields, allowed_str_fields, 'nrow')
if (typeof(name) != "character" || if (typeof(name) != "character" ||
length(name) != 1 || length(name) != 1 ||
!name %in% c('label', 'weight', 'base_margin', 'nrow', !name %in% allowed_fields) {
'label_lower_bound', 'label_upper_bound', "feature_type", "feature_name")) { stop("getinfo: name must be one of the following\n",
stop( paste(paste0("'", allowed_fields, "'"), collapse = ", "))
"getinfo: name must be one of the following\n",
" 'label', 'weight', 'base_margin', 'nrow', 'label_lower_bound', 'label_upper_bound', 'feature_type', 'feature_name'"
)
} }
if (name == "feature_name" || name == "feature_type") { if (name == "nrow") {
ret <- .Call(XGDMatrixGetStrFeatureInfo_R, object, name)
} else if (name != "nrow") {
ret <- .Call(XGDMatrixGetInfo_R, object, name)
} else {
ret <- nrow(object) ret <- nrow(object)
} else if (name %in% allowed_str_fields) {
ret <- .Call(XGDMatrixGetStrFeatureInfo_R, object, name)
} else if (name %in% allowed_float_fields) {
ret <- .Call(XGDMatrixGetFloatInfo_R, object, name)
if (length(ret) > nrow(object)) {
ret <- matrix(ret, nrow = nrow(object), byrow = TRUE)
}
} else if (name %in% allowed_int_fields) {
if (name == "group") {
name <- "group_ptr"
}
ret <- .Call(XGDMatrixGetUIntInfo_R, object, name)
if (length(ret) > nrow(object)) {
ret <- matrix(ret, nrow = nrow(object), byrow = TRUE)
}
} }
if (length(ret) == 0) return(NULL) if (length(ret) == 0) return(NULL)
return(ret) return(ret)
} }
#' @rdname getinfo
#' Set information of an xgb.DMatrix object
#'
#' Set information of an xgb.DMatrix object
#'
#' @param object Object of class "xgb.DMatrix"
#' @param name the name of the field to get
#' @param info the specific field of information to set #' @param info the specific field of information to set
#' @param ... other parameters
#' #'
#' @details #' @details
#' The \code{name} field can be one of the following: #' See the documentation for \link{xgb.DMatrix} for possible fields that can be set
#' (which correspond to arguments in that function).
#' #'
#' \itemize{ #' Note that the following fields are allowed in the construction of an \code{xgb.DMatrix}
#' \item \code{label}: label XGBoost learn from ; #' but \bold{aren't} allowed here:\itemize{
#' \item \code{weight}: to do a weight rescale ; #' \item data
#' \item \code{base_margin}: base margin is the base prediction XGBoost will boost from ; #' \item missing
#' \item \code{group}: number of rows in each group (to use with \code{rank:pairwise} objective). #' \item silent
#' \item nthread
#' } #' }
#' #'
#' @examples #' @examples
@ -278,52 +442,61 @@ getinfo.xgb.DMatrix <- function(object, name, ...) {
#' setinfo(dtrain, 'label', 1-labels) #' setinfo(dtrain, 'label', 1-labels)
#' labels2 <- getinfo(dtrain, 'label') #' labels2 <- getinfo(dtrain, 'label')
#' stopifnot(all.equal(labels2, 1-labels)) #' stopifnot(all.equal(labels2, 1-labels))
#' @rdname setinfo
#' @export #' @export
setinfo <- function(object, ...) UseMethod("setinfo") setinfo <- function(object, name, info) UseMethod("setinfo")
#' @rdname setinfo #' @rdname getinfo
#' @export #' @export
setinfo.xgb.DMatrix <- function(object, name, info, ...) { setinfo.xgb.DMatrix <- function(object, name, info) {
.internal.setinfo.xgb.DMatrix(object, name, info)
attr(object, "fields")[[name]] <- TRUE
return(TRUE)
}
.internal.setinfo.xgb.DMatrix <- function(object, name, info) {
if (name == "label") { if (name == "label") {
if (length(info) != nrow(object)) if (NROW(info) != nrow(object))
stop("The length of labels must equal to the number of rows in the input data") stop("The length of labels must equal to the number of rows in the input data")
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info)) .Call(XGDMatrixSetInfo_R, object, name, info)
return(TRUE) return(TRUE)
} }
if (name == "label_lower_bound") { if (name == "label_lower_bound") {
if (length(info) != nrow(object)) if (NROW(info) != nrow(object))
stop("The length of lower-bound labels must equal to the number of rows in the input data") stop("The length of lower-bound labels must equal to the number of rows in the input data")
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info)) .Call(XGDMatrixSetInfo_R, object, name, info)
return(TRUE) return(TRUE)
} }
if (name == "label_upper_bound") { if (name == "label_upper_bound") {
if (length(info) != nrow(object)) if (NROW(info) != nrow(object))
stop("The length of upper-bound labels must equal to the number of rows in the input data") stop("The length of upper-bound labels must equal to the number of rows in the input data")
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info)) .Call(XGDMatrixSetInfo_R, object, name, info)
return(TRUE) return(TRUE)
} }
if (name == "weight") { if (name == "weight") {
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info)) .Call(XGDMatrixSetInfo_R, object, name, info)
return(TRUE) return(TRUE)
} }
if (name == "base_margin") { if (name == "base_margin") {
# if (length(info)!=nrow(object)) .Call(XGDMatrixSetInfo_R, object, name, info)
# stop("The length of base margin must equal to the number of rows in the input data")
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
return(TRUE) return(TRUE)
} }
if (name == "group") { if (name == "group") {
if (sum(info) != nrow(object)) if (sum(info) != nrow(object))
stop("The sum of groups must equal to the number of rows in the input data") stop("The sum of groups must equal to the number of rows in the input data")
.Call(XGDMatrixSetInfo_R, object, name, as.integer(info)) .Call(XGDMatrixSetInfo_R, object, name, info)
return(TRUE)
}
if (name == "qid") {
if (NROW(info) != nrow(object))
stop("The length of qid assignments must equal to the number of rows in the input data")
.Call(XGDMatrixSetInfo_R, object, name, info)
return(TRUE) return(TRUE)
} }
if (name == "feature_weights") { if (name == "feature_weights") {
if (length(info) != ncol(object)) { if (NROW(info) != ncol(object)) {
stop("The number of feature weights must equal to the number of columns in the input data") stop("The number of feature weights must equal to the number of columns in the input data")
} }
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info)) .Call(XGDMatrixSetInfo_R, object, name, info)
return(TRUE) return(TRUE)
} }
@ -353,6 +526,111 @@ setinfo.xgb.DMatrix <- function(object, name, info, ...) {
stop("setinfo: unknown info name ", name) stop("setinfo: unknown info name ", name)
} }
#' @title Get Quantile Cuts from DMatrix
#' @description Get the quantile cuts (a.k.a. borders) from an `xgb.DMatrix`
#' that has been quantized for the histogram method (`tree_method="hist"`).
#'
#' These cuts are used in order to assign observations to bins - i.e. these are ordered
#' boundaries which are used to determine assignment condition `border_low < x < border_high`.
#' As such, the first and last bin will be outside of the range of the data, so as to include
#' all of the observations there.
#'
#' If a given column has 'n' bins, then there will be 'n+1' cuts / borders for that column,
#' which will be output in sorted order from lowest to highest.
#'
#' Different columns can have different numbers of bins according to their range.
#' @param dmat An `xgb.DMatrix` object, as returned by \link{xgb.DMatrix}.
#' @param output Output format for the quantile cuts. Possible options are:\itemize{
#' \item `"list"` will return the output as a list with one entry per column, where
#' each column will have a numeric vector with the cuts. The list will be named if
#' `dmat` has column names assigned to it.
#' \item `"arrays"` will return a list with entries `indptr` (base-0 indexing) and
#' `data`. Here, the cuts for column 'i' are obtained by slicing 'data' from entries
#' `indptr[i]+1` to `indptr[i+1]`.
#' }
#' @return The quantile cuts, in the format specified by parameter `output`.
#' @examples
#' library(xgboost)
#' data(mtcars)
#' y <- mtcars$mpg
#' x <- as.matrix(mtcars[, -1])
#' dm <- xgb.DMatrix(x, label = y, nthread = 1)
#'
#' # DMatrix is not quantized right away, but will be once a hist model is generated
#' model <- xgb.train(
#' data = dm,
#' params = list(
#' tree_method = "hist",
#' max_bin = 8,
#' nthread = 1
#' ),
#' nrounds = 3
#' )
#'
#' # Now can get the quantile cuts
#' xgb.get.DMatrix.qcut(dm)
#' @export
xgb.get.DMatrix.qcut <- function(dmat, output = c("list", "arrays")) { # nolint
stopifnot(inherits(dmat, "xgb.DMatrix"))
output <- head(output, 1L)
stopifnot(output %in% c("list", "arrays"))
res <- .Call(XGDMatrixGetQuantileCut_R, dmat)
if (output == "arrays") {
return(res)
} else {
feature_names <- getinfo(dmat, "feature_name")
ncols <- length(res$indptr) - 1
out <- lapply(
seq(1, ncols),
function(col) {
st <- res$indptr[col]
end <- res$indptr[col + 1]
if (end <= st) {
return(numeric())
}
return(res$data[seq(1 + st, end)])
}
)
if (NROW(feature_names)) {
names(out) <- feature_names
}
return(out)
}
}
#' @title Get Number of Non-Missing Entries in DMatrix
#' @param dmat An `xgb.DMatrix` object, as returned by \link{xgb.DMatrix}.
#' @return The number of non-missing entries in the DMatrix
#' @export
xgb.get.DMatrix.num.non.missing <- function(dmat) { # nolint
stopifnot(inherits(dmat, "xgb.DMatrix"))
return(.Call(XGDMatrixNumNonMissing_R, dmat))
}
#' @title Get DMatrix Data
#' @param dmat An `xgb.DMatrix` object, as returned by \link{xgb.DMatrix}.
#' @return The data held in the DMatrix, as a sparse CSR matrix (class `dgRMatrix`
#' from package `Matrix`). If it had feature names, these will be added as column names
#' in the output.
#' @export
xgb.get.DMatrix.data <- function(dmat) {
stopifnot(inherits(dmat, "xgb.DMatrix"))
res <- .Call(XGDMatrixGetDataAsCSR_R, dmat)
out <- methods::new("dgRMatrix")
nrows <- as.integer(length(res$indptr) - 1)
out@p <- res$indptr
out@j <- res$indices
out@x <- res$data
out@Dim <- as.integer(c(nrows, res$ncols))
feature_names <- getinfo(dmat, "feature_name")
dim_names <- list(NULL, NULL)
if (NROW(feature_names)) {
dim_names[[2L]] <- feature_names
}
out@Dimnames <- dim_names
return(out)
}
#' Get a new DMatrix containing the specified rows of #' Get a new DMatrix containing the specified rows of
#' original xgb.DMatrix object #' original xgb.DMatrix object
@ -363,7 +641,6 @@ setinfo.xgb.DMatrix <- function(object, name, info, ...) {
#' @param object Object of class "xgb.DMatrix" #' @param object Object of class "xgb.DMatrix"
#' @param idxset a integer vector of indices of rows needed #' @param idxset a integer vector of indices of rows needed
#' @param colset currently not used (columns subsetting is not available) #' @param colset currently not used (columns subsetting is not available)
#' @param ... other parameters (currently not used)
#' #'
#' @examples #' @examples
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package='xgboost')
@ -377,11 +654,11 @@ setinfo.xgb.DMatrix <- function(object, name, info, ...) {
#' #'
#' @rdname slice.xgb.DMatrix #' @rdname slice.xgb.DMatrix
#' @export #' @export
slice <- function(object, ...) UseMethod("slice") slice <- function(object, idxset) UseMethod("slice")
#' @rdname slice.xgb.DMatrix #' @rdname slice.xgb.DMatrix
#' @export #' @export
slice.xgb.DMatrix <- function(object, idxset, ...) { slice.xgb.DMatrix <- function(object, idxset) {
if (!inherits(object, "xgb.DMatrix")) { if (!inherits(object, "xgb.DMatrix")) {
stop("object must be xgb.DMatrix") stop("object must be xgb.DMatrix")
} }
@ -431,11 +708,15 @@ slice.xgb.DMatrix <- function(object, idxset, ...) {
#' @method print xgb.DMatrix #' @method print xgb.DMatrix
#' @export #' @export
print.xgb.DMatrix <- function(x, verbose = FALSE, ...) { print.xgb.DMatrix <- function(x, verbose = FALSE, ...) {
if (.Call(XGCheckNullPtr_R, x)) {
cat("INVALID xgb.DMatrix object. Must be constructed anew.\n")
return(invisible(x))
}
cat('xgb.DMatrix dim:', nrow(x), 'x', ncol(x), ' info: ') cat('xgb.DMatrix dim:', nrow(x), 'x', ncol(x), ' info: ')
infos <- character(0) infos <- character(0)
if (length(getinfo(x, 'label')) > 0) infos <- 'label' if (xgb.DMatrix.hasinfo(x, 'label')) infos <- 'label'
if (length(getinfo(x, 'weight')) > 0) infos <- c(infos, 'weight') if (xgb.DMatrix.hasinfo(x, 'weight')) infos <- c(infos, 'weight')
if (length(getinfo(x, 'base_margin')) > 0) infos <- c(infos, 'base_margin') if (xgb.DMatrix.hasinfo(x, 'base_margin')) infos <- c(infos, 'base_margin')
if (length(infos) == 0) infos <- 'NA' if (length(infos) == 0) infos <- 'NA'
cat(infos) cat(infos)
cnames <- colnames(x) cnames <- colnames(x)

View File

@ -8,9 +8,9 @@
#' @examples #' @examples
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package='xgboost')
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2)) #' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data') #' fname <- file.path(tempdir(), "xgb.DMatrix.data")
#' dtrain <- xgb.DMatrix('xgb.DMatrix.data') #' xgb.DMatrix.save(dtrain, fname)
#' if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data') #' dtrain <- xgb.DMatrix(fname)
#' @export #' @export
xgb.DMatrix.save <- function(dmatrix, fname) { xgb.DMatrix.save <- function(dmatrix, fname) {
if (typeof(fname) != "character") if (typeof(fname) != "character")

View File

@ -51,7 +51,7 @@
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2)) #' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
#' dtest <- with(agaricus.test, xgb.DMatrix(data, label = label, nthread = 2)) #' dtest <- with(agaricus.test, xgb.DMatrix(data, label = label, nthread = 2))
#' #'
#' param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic') #' param <- list(max_depth=2, eta=1, objective='binary:logistic')
#' nrounds = 4 #' nrounds = 4
#' #'
#' bst = xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2) #' bst = xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)

View File

@ -103,7 +103,6 @@
#' parameter or randomly generated. #' parameter or randomly generated.
#' \item \code{best_iteration} iteration number with the best evaluation metric value #' \item \code{best_iteration} iteration number with the best evaluation metric value
#' (only available with early stopping). #' (only available with early stopping).
#' \item \code{best_ntreelimit} and the \code{ntreelimit} Deprecated attributes, use \code{best_iteration} instead.
#' \item \code{pred} CV prediction values available when \code{prediction} is set. #' \item \code{pred} CV prediction values available when \code{prediction} is set.
#' It is either vector or matrix (see \code{\link{cb.cv.predict}}). #' It is either vector or matrix (see \code{\link{cb.cv.predict}}).
#' \item \code{models} a list of the CV folds' models. It is only available with the explicit #' \item \code{models} a list of the CV folds' models. It is only available with the explicit
@ -126,6 +125,9 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
early_stopping_rounds = NULL, maximize = NULL, callbacks = list(), ...) { early_stopping_rounds = NULL, maximize = NULL, callbacks = list(), ...) {
check.deprecation(...) check.deprecation(...)
if (inherits(data, "xgb.DMatrix") && .Call(XGCheckNullPtr_R, data)) {
stop("'data' is an invalid 'xgb.DMatrix' object. Must be constructed again.")
}
params <- check.booster.params(params, ...) params <- check.booster.params(params, ...)
# TODO: should we deprecate the redundant 'metrics' parameter? # TODO: should we deprecate the redundant 'metrics' parameter?
@ -136,7 +138,7 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
check.custom.eval() check.custom.eval()
# Check the labels # Check the labels
if ((inherits(data, 'xgb.DMatrix') && is.null(getinfo(data, 'label'))) || if ((inherits(data, 'xgb.DMatrix') && !xgb.DMatrix.hasinfo(data, 'label')) ||
(!inherits(data, 'xgb.DMatrix') && is.null(label))) { (!inherits(data, 'xgb.DMatrix') && is.null(label))) {
stop("Labels must be provided for CV either through xgb.DMatrix, or through 'label=' when 'data' is matrix") stop("Labels must be provided for CV either through xgb.DMatrix, or through 'label=' when 'data' is matrix")
} else if (inherits(data, 'xgb.DMatrix')) { } else if (inherits(data, 'xgb.DMatrix')) {
@ -201,13 +203,13 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
dtrain <- slice(dall, unlist(folds[-k])) dtrain <- slice(dall, unlist(folds[-k]))
else else
dtrain <- slice(dall, train_folds[[k]]) dtrain <- slice(dall, train_folds[[k]])
handle <- xgb.Booster.handle( bst <- xgb.Booster(
params = params, params = params,
cachelist = list(dtrain, dtest), cachelist = list(dtrain, dtest),
modelfile = NULL, modelfile = NULL
handle = NULL
) )
list(dtrain = dtrain, bst = handle, watchlist = list(train = dtrain, test = dtest), index = folds[[k]]) bst <- bst$bst
list(dtrain = dtrain, bst = bst, watchlist = list(train = dtrain, test = dtest), index = folds[[k]])
}) })
rm(dall) rm(dall)
# a "basket" to collect some results from callbacks # a "basket" to collect some results from callbacks
@ -215,7 +217,6 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
# extract parameters that can affect the relationship b/w #trees and #iterations # extract parameters that can affect the relationship b/w #trees and #iterations
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1) # nolint num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1) # nolint
num_parallel_tree <- max(as.numeric(NVL(params[['num_parallel_tree']], 1)), 1) # nolint
# those are fixed for CV (no training continuation) # those are fixed for CV (no training continuation)
begin_iteration <- 1 begin_iteration <- 1
@ -228,21 +229,22 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
msg <- lapply(bst_folds, function(fd) { msg <- lapply(bst_folds, function(fd) {
xgb.iter.update( xgb.iter.update(
booster_handle = fd$bst, bst = fd$bst,
dtrain = fd$dtrain, dtrain = fd$dtrain,
iter = iteration - 1, iter = iteration - 1,
obj = obj obj = obj
) )
xgb.iter.eval( xgb.iter.eval(
booster_handle = fd$bst, bst = fd$bst,
watchlist = fd$watchlist, watchlist = fd$watchlist,
iter = iteration - 1, iter = iteration - 1,
feval = feval feval = feval
) )
}) })
msg <- simplify2array(msg) msg <- simplify2array(msg)
bst_evaluation <- rowMeans(msg) # Note: these variables might look unused here, but they are used in the callbacks
bst_evaluation_err <- sqrt(rowMeans(msg^2) - bst_evaluation^2) # nolint bst_evaluation <- rowMeans(msg) # nolint
bst_evaluation_err <- apply(msg, 1, sd) # nolint
for (f in cb$post_iter) f() for (f in cb$post_iter) f()
@ -263,7 +265,7 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
ret <- c(ret, basket) ret <- c(ret, basket)
class(ret) <- 'xgb.cv.synchronous' class(ret) <- 'xgb.cv.synchronous'
invisible(ret) return(invisible(ret))
} }
@ -314,7 +316,7 @@ print.xgb.cv.synchronous <- function(x, verbose = FALSE, ...) {
}) })
} }
for (n in c('niter', 'best_iteration', 'best_ntreelimit')) { for (n in c('niter', 'best_iteration')) {
if (is.null(x[[n]])) if (is.null(x[[n]]))
next next
cat(n, ': ', x[[n]], '\n', sep = '') cat(n, ': ', x[[n]], '\n', sep = '')

View File

@ -13,7 +13,10 @@
#' When this option is on, the model dump contains two additional values: #' When this option is on, the model dump contains two additional values:
#' gain is the approximate loss function gain we get in each split; #' gain is the approximate loss function gain we get in each split;
#' cover is the sum of second order gradient in each node. #' cover is the sum of second order gradient in each node.
#' @param dump_format either 'text' or 'json' format could be specified. #' @param dump_format either 'text', 'json', or 'dot' (graphviz) format could be specified.
#'
#' Format 'dot' for a single tree can be passed directly to packages that consume this format
#' for graph visualization, such as function [DiagrammeR::grViz()]
#' @param ... currently not used #' @param ... currently not used
#' #'
#' @return #' @return
@ -37,9 +40,13 @@
#' # print in JSON format: #' # print in JSON format:
#' cat(xgb.dump(bst, with_stats = TRUE, dump_format='json')) #' cat(xgb.dump(bst, with_stats = TRUE, dump_format='json'))
#' #'
#' # plot first tree leveraging the 'dot' format
#' if (requireNamespace('DiagrammeR', quietly = TRUE)) {
#' DiagrammeR::grViz(xgb.dump(bst, dump_format = "dot")[[1L]])
#' }
#' @export #' @export
xgb.dump <- function(model, fname = NULL, fmap = "", with_stats = FALSE, xgb.dump <- function(model, fname = NULL, fmap = "", with_stats = FALSE,
dump_format = c("text", "json"), ...) { dump_format = c("text", "json", "dot"), ...) {
check.deprecation(...) check.deprecation(...)
dump_format <- match.arg(dump_format) dump_format <- match.arg(dump_format)
if (!inherits(model, "xgb.Booster")) if (!inherits(model, "xgb.Booster"))
@ -49,9 +56,16 @@ xgb.dump <- function(model, fname = NULL, fmap = "", with_stats = FALSE,
if (!(is.null(fmap) || is.character(fmap))) if (!(is.null(fmap) || is.character(fmap)))
stop("fmap: argument must be a character string (when provided)") stop("fmap: argument must be a character string (when provided)")
model <- xgb.Booster.complete(model) model_dump <- .Call(
model_dump <- .Call(XGBoosterDumpModel_R, model$handle, NVL(fmap, "")[1], as.integer(with_stats), XGBoosterDumpModel_R,
as.character(dump_format)) xgb.get.handle(model),
NVL(fmap, "")[1],
as.integer(with_stats),
as.character(dump_format)
)
if (dump_format == "dot") {
return(sapply(model_dump, function(x) gsub("^booster\\[\\d+\\]\\n", "\\1", x)))
}
if (is.null(fname)) if (is.null(fname))
model_dump <- gsub('\t', '', model_dump, fixed = TRUE) model_dump <- gsub('\t', '', model_dump, fixed = TRUE)

View File

@ -127,22 +127,20 @@ xgb.ggplot.shap.summary <- function(data, shap_contrib = NULL, features = NULL,
p p
} }
#' Combine and melt feature values and SHAP contributions for sample #' Combine feature values and SHAP values
#' observations.
#' #'
#' Conforms to data format required for ggplot functions. #' Internal function used to combine and melt feature values and SHAP contributions
#' as required for ggplot functions related to SHAP.
#' #'
#' Internal utility function. #' @param data_list The result of `xgb.shap.data()`.
#' @param normalize Whether to standardize feature values to mean 0 and
#' standard deviation 1. This is useful for comparing multiple features on the same
#' plot. Default is \code{FALSE}.
#' #'
#' @param data_list List containing 'data' and 'shap_contrib' returned by #' @return A `data.table` containing the observation ID, the feature name, the
#' \code{xgb.shap.data()}.
#' @param normalize Whether to standardize feature values to have mean 0 and
#' standard deviation 1 (useful for comparing multiple features on the same
#' plot). Default \code{FALSE}.
#'
#' @return A data.table containing the observation ID, the feature name, the
#' feature value (normalized if specified), and the SHAP contribution value. #' feature value (normalized if specified), and the SHAP contribution value.
#' @noRd #' @noRd
#' @keywords internal
prepare.ggplot.shap.data <- function(data_list, normalize = FALSE) { prepare.ggplot.shap.data <- function(data_list, normalize = FALSE) {
data <- data_list[["data"]] data <- data_list[["data"]]
shap_contrib <- data_list[["shap_contrib"]] shap_contrib <- data_list[["shap_contrib"]]
@ -163,15 +161,16 @@ prepare.ggplot.shap.data <- function(data_list, normalize = FALSE) {
p_data p_data
} }
#' Scale feature value to have mean 0, standard deviation 1 #' Scale feature values
#' #'
#' This is used to compare multiple features on the same plot. #' Internal function that scales feature values to mean 0 and standard deviation 1.
#' Internal utility function #' Useful to compare multiple features on the same plot.
#' #'
#' @param x Numeric vector #' @param x Numeric vector.
#' #'
#' @return Numeric vector with mean 0 and sd 1. #' @return Numeric vector with mean 0 and standard deviation 1.
#' @noRd #' @noRd
#' @keywords internal
normalize <- function(x) { normalize <- function(x) {
loc <- mean(x, na.rm = TRUE) loc <- mean(x, na.rm = TRUE)
scale <- stats::sd(x, na.rm = TRUE) scale <- stats::sd(x, na.rm = TRUE)

View File

@ -1,107 +1,132 @@
#' Importance of features in a model. #' Feature importance
#' #'
#' Creates a \code{data.table} of feature importances in a model. #' Creates a `data.table` of feature importances.
#' #'
#' @param feature_names character vector of feature names. If the model already #' @param feature_names Character vector used to overwrite the feature names
#' contains feature names, those would be used when \code{feature_names=NULL} (default value). #' of the model. The default is `NULL` (use original feature names).
#' Non-null \code{feature_names} could be provided to override those in the model. #' @param model Object of class `xgb.Booster`.
#' @param model object of class \code{xgb.Booster}. #' @param trees An integer vector of tree indices that should be included
#' @param trees (only for the gbtree booster) an integer vector of tree indices that should be included #' into the importance calculation (only for the "gbtree" booster).
#' into the importance calculation. If set to \code{NULL}, all trees of the model are parsed. #' The default (`NULL`) parses all trees.
#' It could be useful, e.g., in multiclass classification to get feature importances #' It could be useful, e.g., in multiclass classification to get feature importances
#' for each class separately. IMPORTANT: the tree index in xgboost models #' for each class separately. *Important*: the tree index in XGBoost models
#' is zero-based (e.g., use \code{trees = 0:4} for first 5 trees). #' is zero-based (e.g., use `trees = 0:4` for the first five trees).
#' @param data deprecated. #' @param data Deprecated.
#' @param label deprecated. #' @param label Deprecated.
#' @param target deprecated. #' @param target Deprecated.
#' #'
#' @details #' @details
#' #'
#' This function works for both linear and tree models. #' This function works for both linear and tree models.
#' #'
#' For linear models, the importance is the absolute magnitude of linear coefficients. #' For linear models, the importance is the absolute magnitude of linear coefficients.
#' For that reason, in order to obtain a meaningful ranking by importance for a linear model, #' To obtain a meaningful ranking by importance for linear models, the features need to
#' the features need to be on the same scale (which you also would want to do when using either #' be on the same scale (which is also recommended when using L1 or L2 regularization).
#' L1 or L2 regularization).
#' #'
#' @return #' @return A `data.table` with the following columns:
#' #'
#' For a tree model, a \code{data.table} with the following columns: #' For a tree model:
#' \itemize{ #' - `Features`: Names of the features used in the model.
#' \item \code{Features} names of the features used in the model; #' - `Gain`: Fractional contribution of each feature to the model based on
#' \item \code{Gain} represents fractional contribution of each feature to the model based on #' the total gain of this feature's splits. Higher percentage means higher importance.
#' the total gain of this feature's splits. Higher percentage means a more important #' - `Cover`: Metric of the number of observation related to this feature.
#' predictive feature. #' - `Frequency`: Percentage of times a feature has been used in trees.
#' \item \code{Cover} metric of the number of observation related to this feature;
#' \item \code{Frequency} percentage representing the relative number of times
#' a feature have been used in trees.
#' }
#' #'
#' A linear model's importance \code{data.table} has the following columns: #' For a linear model:
#' \itemize{ #' - `Features`: Names of the features used in the model.
#' \item \code{Features} names of the features used in the model; #' - `Weight`: Linear coefficient of this feature.
#' \item \code{Weight} the linear coefficient of this feature; #' - `Class`: Class label (only for multiclass models).
#' \item \code{Class} (only for multiclass models) class label.
#' }
#' #'
#' If \code{feature_names} is not provided and \code{model} doesn't have \code{feature_names}, #' If `feature_names` is not provided and `model` doesn't have `feature_names`,
#' index of the features will be used instead. Because the index is extracted from the model dump #' the index of the features will be used instead. Because the index is extracted from the model dump
#' (based on C++ code), it starts at 0 (as in C/C++ or Python) instead of 1 (usual in R). #' (based on C++ code), it starts at 0 (as in C/C++ or Python) instead of 1 (usual in R).
#' #'
#' @examples #' @examples
#' #'
#' # binomial classification using gbtree: #' # binomial classification using "gbtree":
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package = "xgboost")
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, #'
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") #' bst <- xgboost(
#' data = agaricus.train$data,
#' label = agaricus.train$label,
#' max_depth = 2,
#' eta = 1,
#' nthread = 2,
#' nrounds = 2,
#' objective = "binary:logistic"
#' )
#'
#' xgb.importance(model = bst) #' xgb.importance(model = bst)
#' #'
#' # binomial classification using gblinear: #' # binomial classification using "gblinear":
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, booster = "gblinear", #' bst <- xgboost(
#' eta = 0.3, nthread = 1, nrounds = 20, objective = "binary:logistic") #' data = agaricus.train$data,
#' label = agaricus.train$label,
#' booster = "gblinear",
#' eta = 0.3,
#' nthread = 1,
#' nrounds = 20,objective = "binary:logistic"
#' )
#'
#' xgb.importance(model = bst) #' xgb.importance(model = bst)
#' #'
#' # multiclass classification using gbtree: #' # multiclass classification using "gbtree":
#' nclass <- 3 #' nclass <- 3
#' nrounds <- 10 #' nrounds <- 10
#' mbst <- xgboost(data = as.matrix(iris[, -5]), label = as.numeric(iris$Species) - 1, #' mbst <- xgboost(
#' max_depth = 3, eta = 0.2, nthread = 2, nrounds = nrounds, #' data = as.matrix(iris[, -5]),
#' objective = "multi:softprob", num_class = nclass) #' label = as.numeric(iris$Species) - 1,
#' max_depth = 3,
#' eta = 0.2,
#' nthread = 2,
#' nrounds = nrounds,
#' objective = "multi:softprob",
#' num_class = nclass
#' )
#'
#' # all classes clumped together: #' # all classes clumped together:
#' xgb.importance(model = mbst) #' xgb.importance(model = mbst)
#' # inspect importances separately for each class:
#' xgb.importance(model = mbst, trees = seq(from=0, by=nclass, length.out=nrounds))
#' xgb.importance(model = mbst, trees = seq(from=1, by=nclass, length.out=nrounds))
#' xgb.importance(model = mbst, trees = seq(from=2, by=nclass, length.out=nrounds))
#' #'
#' # multiclass classification using gblinear: #' # inspect importances separately for each class:
#' mbst <- xgboost(data = scale(as.matrix(iris[, -5])), label = as.numeric(iris$Species) - 1, #' xgb.importance(
#' booster = "gblinear", eta = 0.2, nthread = 1, nrounds = 15, #' model = mbst, trees = seq(from = 0, by = nclass, length.out = nrounds)
#' objective = "multi:softprob", num_class = nclass) #' )
#' xgb.importance(
#' model = mbst, trees = seq(from = 1, by = nclass, length.out = nrounds)
#' )
#' xgb.importance(
#' model = mbst, trees = seq(from = 2, by = nclass, length.out = nrounds)
#' )
#'
#' # multiclass classification using "gblinear":
#' mbst <- xgboost(
#' data = scale(as.matrix(iris[, -5])),
#' label = as.numeric(iris$Species) - 1,
#' booster = "gblinear",
#' eta = 0.2,
#' nthread = 1,
#' nrounds = 15,
#' objective = "multi:softprob",
#' num_class = nclass
#' )
#'
#' xgb.importance(model = mbst) #' xgb.importance(model = mbst)
#' #'
#' @export #' @export
xgb.importance <- function(feature_names = NULL, model = NULL, trees = NULL, xgb.importance <- function(model = NULL, feature_names = getinfo(model, "feature_name"), trees = NULL,
data = NULL, label = NULL, target = NULL) { data = NULL, label = NULL, target = NULL) {
if (!(is.null(data) && is.null(label) && is.null(target))) if (!(is.null(data) && is.null(label) && is.null(target)))
warning("xgb.importance: parameters 'data', 'label' and 'target' are deprecated") warning("xgb.importance: parameters 'data', 'label' and 'target' are deprecated")
if (!inherits(model, "xgb.Booster"))
stop("model: must be an object of class xgb.Booster")
if (is.null(feature_names) && !is.null(model$feature_names))
feature_names <- model$feature_names
if (!(is.null(feature_names) || is.character(feature_names))) if (!(is.null(feature_names) || is.character(feature_names)))
stop("feature_names: Has to be a character vector") stop("feature_names: Has to be a character vector")
model <- xgb.Booster.complete(model) handle <- xgb.get.handle(model)
config <- jsonlite::fromJSON(xgb.config(model)) if (xgb.booster_type(model) == "gblinear") {
if (config$learner$gradient_booster$name == "gblinear") {
args <- list(importance_type = "weight", feature_names = feature_names) args <- list(importance_type = "weight", feature_names = feature_names)
results <- .Call( results <- .Call(
XGBoosterFeatureScore_R, model$handle, jsonlite::toJSON(args, auto_unbox = TRUE, null = "null") XGBoosterFeatureScore_R, handle, jsonlite::toJSON(args, auto_unbox = TRUE, null = "null")
) )
names(results) <- c("features", "shape", "weight") names(results) <- c("features", "shape", "weight")
if (length(results$shape) == 2) { if (length(results$shape) == 2) {
@ -122,7 +147,7 @@ xgb.importance <- function(feature_names = NULL, model = NULL, trees = NULL,
for (importance_type in c("weight", "total_gain", "total_cover")) { for (importance_type in c("weight", "total_gain", "total_cover")) {
args <- list(importance_type = importance_type, feature_names = feature_names, tree_idx = trees) args <- list(importance_type = importance_type, feature_names = feature_names, tree_idx = trees)
results <- .Call( results <- .Call(
XGBoosterFeatureScore_R, model$handle, jsonlite::toJSON(args, auto_unbox = TRUE, null = "null") XGBoosterFeatureScore_R, handle, jsonlite::toJSON(args, auto_unbox = TRUE, null = "null")
) )
names(results) <- c("features", "shape", importance_type) names(results) <- c("features", "shape", importance_type)
concatenated[ concatenated[

View File

@ -17,7 +17,7 @@
#' An object of \code{xgb.Booster} class. #' An object of \code{xgb.Booster} class.
#' #'
#' @seealso #' @seealso
#' \code{\link{xgb.save}}, \code{\link{xgb.Booster.complete}}. #' \code{\link{xgb.save}}
#' #'
#' @examples #' @examples
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package='xgboost')
@ -29,40 +29,37 @@
#' #'
#' train <- agaricus.train #' train <- agaricus.train
#' test <- agaricus.test #' test <- agaricus.test
#' bst <- xgboost( #' bst <- xgb.train(
#' data = train$data, label = train$label, max_depth = 2, eta = 1, #' data = xgb.DMatrix(train$data, label = train$label),
#' max_depth = 2,
#' eta = 1,
#' nthread = nthread, #' nthread = nthread,
#' nrounds = 2, #' nrounds = 2,
#' objective = "binary:logistic" #' objective = "binary:logistic"
#' ) #' )
#' #'
#' xgb.save(bst, 'xgb.model') #' fname <- file.path(tempdir(), "xgb.ubj")
#' bst <- xgb.load('xgb.model') #' xgb.save(bst, fname)
#' if (file.exists('xgb.model')) file.remove('xgb.model') #' bst <- xgb.load(fname)
#' @export #' @export
xgb.load <- function(modelfile) { xgb.load <- function(modelfile) {
if (is.null(modelfile)) if (is.null(modelfile))
stop("xgb.load: modelfile cannot be NULL") stop("xgb.load: modelfile cannot be NULL")
handle <- xgb.Booster.handle( bst <- xgb.Booster(
params = list(), params = list(),
cachelist = list(), cachelist = list(),
modelfile = modelfile, modelfile = modelfile
handle = NULL
) )
bst <- bst$bst
# re-use modelfile if it is raw so we do not need to serialize # re-use modelfile if it is raw so we do not need to serialize
if (typeof(modelfile) == "raw") { if (typeof(modelfile) == "raw") {
warning( warning(
paste( paste(
"The support for loading raw booster with `xgb.load` will be ", "The support for loading raw booster with `xgb.load` will be ",
"discontinued in upcoming release. Use `xgb.load.raw` or", "discontinued in upcoming release. Use `xgb.load.raw` instead. "
" `xgb.unserialize` instead. "
) )
) )
bst <- xgb.handleToBooster(handle = handle, raw = modelfile)
} else {
bst <- xgb.handleToBooster(handle = handle, raw = NULL)
} }
bst <- xgb.Booster.complete(bst, saveraw = TRUE)
return(bst) return(bst)
} }

View File

@ -3,21 +3,10 @@
#' User can generate raw memory buffer by calling xgb.save.raw #' User can generate raw memory buffer by calling xgb.save.raw
#' #'
#' @param buffer the buffer returned by xgb.save.raw #' @param buffer the buffer returned by xgb.save.raw
#' @param as_booster Return the loaded model as xgb.Booster instead of xgb.Booster.handle.
#'
#' @export #' @export
xgb.load.raw <- function(buffer, as_booster = FALSE) { xgb.load.raw <- function(buffer) {
cachelist <- list() cachelist <- list()
handle <- .Call(XGBoosterCreate_R, cachelist) bst <- .Call(XGBoosterCreate_R, cachelist)
.Call(XGBoosterLoadModelFromRaw_R, handle, buffer) .Call(XGBoosterLoadModelFromRaw_R, xgb.get.handle(bst), buffer)
class(handle) <- "xgb.Booster.handle" return(bst)
if (as_booster) {
booster <- list(handle = handle, raw = NULL)
class(booster) <- "xgb.Booster"
booster <- xgb.Booster.complete(booster, saveraw = TRUE)
return(booster)
} else {
return(handle)
}
} }

View File

@ -1,70 +1,72 @@
#' Parse a boosted tree model text dump #' Parse model text dump
#' #'
#' Parse a boosted tree model text dump into a \code{data.table} structure. #' Parse a boosted tree model text dump into a `data.table` structure.
#' #'
#' @param feature_names character vector of feature names. If the model already #' @param model Object of class `xgb.Booster`. If it contains feature names (they can be set through
#' contains feature names, those would be used when \code{feature_names=NULL} (default value). #' \link{setinfo}), they will be used in the output from this function.
#' Non-null \code{feature_names} could be provided to override those in the model. #' @param text Character vector previously generated by the function [xgb.dump()]
#' @param model object of class \code{xgb.Booster} #' (called with parameter `with_stats = TRUE`). `text` takes precedence over `model`.
#' @param text \code{character} vector previously generated by the \code{xgb.dump} #' @param trees An integer vector of tree indices that should be used.
#' function (where parameter \code{with_stats = TRUE} should have been set). #' The default (`NULL`) uses all trees.
#' \code{text} takes precedence over \code{model}. #' Useful, e.g., in multiclass classification to get only
#' @param trees an integer vector of tree indices that should be parsed. #' the trees of one class. *Important*: the tree index in XGBoost models
#' If set to \code{NULL}, all trees of the model are parsed. #' is zero-based (e.g., use `trees = 0:4` for the first five trees).
#' It could be useful, e.g., in multiclass classification to get only #' @param use_int_id A logical flag indicating whether nodes in columns "Yes", "No", and
#' the trees of one certain class. IMPORTANT: the tree index in xgboost models #' "Missing" should be represented as integers (when `TRUE`) or as "Tree-Node"
#' is zero-based (e.g., use \code{trees = 0:4} for first 5 trees). #' character strings (when `FALSE`, default).
#' @param use_int_id a logical flag indicating whether nodes in columns "Yes", "No", "Missing" should be #' @param ... Currently not used.
#' represented as integers (when FALSE) or as "Tree-Node" character strings (when FALSE).
#' @param ... currently not used.
#' #'
#' @return #' @return
#' A \code{data.table} with detailed information about model trees' nodes. #' A `data.table` with detailed information about tree nodes. It has the following columns:
#' - `Tree`: integer ID of a tree in a model (zero-based index).
#' - `Node`: integer ID of a node in a tree (zero-based index).
#' - `ID`: character identifier of a node in a model (only when `use_int_id = FALSE`).
#' - `Feature`: for a branch node, a feature ID or name (when available);
#' for a leaf node, it simply labels it as `"Leaf"`.
#' - `Split`: location of the split for a branch node (split condition is always "less than").
#' - `Yes`: ID of the next node when the split condition is met.
#' - `No`: ID of the next node when the split condition is not met.
#' - `Missing`: ID of the next node when the branch value is missing.
#' - `Gain`: either the split gain (change in loss) or the leaf value.
#' - `Cover`: metric related to the number of observations either seen by a split
#' or collected by a leaf during training.
#' #'
#' The columns of the \code{data.table} are: #' When `use_int_id = FALSE`, columns "Yes", "No", and "Missing" point to model-wide node identifiers
#' #' in the "ID" column. When `use_int_id = TRUE`, those columns point to node identifiers from
#' \itemize{
#' \item \code{Tree}: integer ID of a tree in a model (zero-based index)
#' \item \code{Node}: integer ID of a node in a tree (zero-based index)
#' \item \code{ID}: character identifier of a node in a model (only when \code{use_int_id=FALSE})
#' \item \code{Feature}: for a branch node, it's a feature id or name (when available);
#' for a leaf note, it simply labels it as \code{'Leaf'}
#' \item \code{Split}: location of the split for a branch node (split condition is always "less than")
#' \item \code{Yes}: ID of the next node when the split condition is met
#' \item \code{No}: ID of the next node when the split condition is not met
#' \item \code{Missing}: ID of the next node when branch value is missing
#' \item \code{Quality}: either the split gain (change in loss) or the leaf value
#' \item \code{Cover}: metric related to the number of observation either seen by a split
#' or collected by a leaf during training.
#' }
#'
#' When \code{use_int_id=FALSE}, columns "Yes", "No", and "Missing" point to model-wide node identifiers
#' in the "ID" column. When \code{use_int_id=TRUE}, those columns point to node identifiers from
#' the corresponding trees in the "Node" column. #' the corresponding trees in the "Node" column.
#' #'
#' @examples #' @examples
#' # Basic use: #' # Basic use:
#' #'
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package = "xgboost")
#' ## Keep the number of threads to 1 for examples #' ## Keep the number of threads to 1 for examples
#' nthread <- 1 #' nthread <- 1
#' data.table::setDTthreads(nthread) #' data.table::setDTthreads(nthread)
#' #'
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, #' bst <- xgboost(
#' eta = 1, nthread = nthread, nrounds = 2,objective = "binary:logistic") #' data = agaricus.train$data,
#' #' label = agaricus.train$label,
#' (dt <- xgb.model.dt.tree(colnames(agaricus.train$data), bst)) #' max_depth = 2,
#' eta = 1,
#' nthread = nthread,
#' nrounds = 2,
#' objective = "binary:logistic"
#' )
#' #'
#' # This bst model already has feature_names stored with it, so those would be used when #' # This bst model already has feature_names stored with it, so those would be used when
#' # feature_names is not set: #' # feature_names is not set:
#' (dt <- xgb.model.dt.tree(model = bst)) #' dt <- xgb.model.dt.tree(bst)
#' #'
#' # How to match feature names of splits that are following a current 'Yes' branch: #' # How to match feature names of splits that are following a current 'Yes' branch:
#' #' merge(
#' merge(dt, dt[, .(ID, Y.Feature=Feature)], by.x='Yes', by.y='ID', all.x=TRUE)[order(Tree,Node)] #' dt,
#' dt[, .(ID, Y.Feature = Feature)], by.x = "Yes", by.y = "ID", all.x = TRUE
#' )[
#' order(Tree, Node)
#' ]
#' #'
#' @export #' @export
xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL, xgb.model.dt.tree <- function(model = NULL, text = NULL,
trees = NULL, use_int_id = FALSE, ...) { trees = NULL, use_int_id = FALSE, ...) {
check.deprecation(...) check.deprecation(...)
@ -74,19 +76,19 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
" (or NULL if 'model' was provided).") " (or NULL if 'model' was provided).")
} }
if (is.null(feature_names) && !is.null(model) && !is.null(model$feature_names))
feature_names <- model$feature_names
if (!(is.null(feature_names) || is.character(feature_names))) {
stop("feature_names: must be a character vector")
}
if (!(is.null(trees) || is.numeric(trees))) { if (!(is.null(trees) || is.numeric(trees))) {
stop("trees: must be a vector of integers.") stop("trees: must be a vector of integers.")
} }
feature_names <- NULL
if (inherits(model, "xgb.Booster")) {
feature_names <- xgb.feature_names(model)
}
from_text <- TRUE
if (is.null(text)) { if (is.null(text)) {
text <- xgb.dump(model = model, with_stats = TRUE) text <- xgb.dump(model = model, with_stats = TRUE)
from_text <- FALSE
} }
if (length(text) < 2 || !any(grepl('leaf=(\\d+)', text))) { if (length(text) < 2 || !any(grepl('leaf=(\\d+)', text))) {
@ -115,9 +117,26 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
td[, isLeaf := grepl("leaf", t, fixed = TRUE)] td[, isLeaf := grepl("leaf", t, fixed = TRUE)]
# parse branch lines # parse branch lines
branch_rx <- paste0("f(\\d+)<(", anynumber_regex, ")\\] yes=(\\d+),no=(\\d+),missing=(\\d+),", branch_rx_nonames <- paste0("f(\\d+)<(", anynumber_regex, ")\\] yes=(\\d+),no=(\\d+),missing=(\\d+),",
"gain=(", anynumber_regex, "),cover=(", anynumber_regex, ")") "gain=(", anynumber_regex, "),cover=(", anynumber_regex, ")")
branch_cols <- c("Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover") branch_rx_w_names <- paste0("\\d+:\\[(.+)<(", anynumber_regex, ")\\] yes=(\\d+),no=(\\d+),missing=(\\d+),",
"gain=(", anynumber_regex, "),cover=(", anynumber_regex, ")")
text_has_feature_names <- FALSE
if (NROW(feature_names)) {
branch_rx <- branch_rx_w_names
text_has_feature_names <- TRUE
} else {
# Note: when passing a text dump, it might or might not have feature names,
# but that aspect is unknown from just the text attributes
branch_rx <- branch_rx_nonames
if (from_text) {
if (sum(grepl(branch_rx_w_names, text)) > sum(grepl(branch_rx_nonames, text))) {
branch_rx <- branch_rx_w_names
text_has_feature_names <- TRUE
}
}
}
branch_cols <- c("Feature", "Split", "Yes", "No", "Missing", "Gain", "Cover")
td[ td[
isLeaf == FALSE, isLeaf == FALSE,
(branch_cols) := { (branch_cols) := {
@ -127,7 +146,7 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
xtr[, 3:5] <- add.tree.id(xtr[, 3:5], Tree) xtr[, 3:5] <- add.tree.id(xtr[, 3:5], Tree)
if (length(xtr) == 0) { if (length(xtr) == 0) {
as.data.table( as.data.table(
list(Feature = "NA", Split = "NA", Yes = "NA", No = "NA", Missing = "NA", Quality = "NA", Cover = "NA") list(Feature = "NA", Split = "NA", Yes = "NA", No = "NA", Missing = "NA", Gain = "NA", Cover = "NA")
) )
} else { } else {
as.data.table(xtr) as.data.table(xtr)
@ -139,15 +158,17 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
is_stump <- function() { is_stump <- function() {
return(length(td$Feature) == 1 && is.na(td$Feature)) return(length(td$Feature) == 1 && is.na(td$Feature))
} }
if (!is.null(feature_names) && !is_stump()) { if (!text_has_feature_names) {
if (length(feature_names) <= max(as.numeric(td$Feature), na.rm = TRUE)) if (!is.null(feature_names) && !is_stump()) {
stop("feature_names has less elements than there are features used in the model") if (length(feature_names) <= max(as.numeric(td$Feature), na.rm = TRUE))
td[isLeaf == FALSE, Feature := feature_names[as.numeric(Feature) + 1]] stop("feature_names has less elements than there are features used in the model")
td[isLeaf == FALSE, Feature := feature_names[as.numeric(Feature) + 1]]
}
} }
# parse leaf lines # parse leaf lines
leaf_rx <- paste0("leaf=(", anynumber_regex, "),cover=(", anynumber_regex, ")") leaf_rx <- paste0("leaf=(", anynumber_regex, "),cover=(", anynumber_regex, ")")
leaf_cols <- c("Feature", "Quality", "Cover") leaf_cols <- c("Feature", "Gain", "Cover")
td[ td[
isLeaf == TRUE, isLeaf == TRUE,
(leaf_cols) := { (leaf_cols) := {
@ -162,7 +183,7 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
] ]
# convert some columns to numeric # convert some columns to numeric
numeric_cols <- c("Split", "Quality", "Cover") numeric_cols <- c("Split", "Gain", "Cover")
td[, (numeric_cols) := lapply(.SD, as.numeric), .SDcols = numeric_cols] td[, (numeric_cols) := lapply(.SD, as.numeric), .SDcols = numeric_cols]
if (use_int_id) { if (use_int_id) {
int_cols <- c("Yes", "No", "Missing") int_cols <- c("Yes", "No", "Missing")

View File

@ -1,65 +1,74 @@
#' Plot model trees deepness #' Plot model tree depth
#' #'
#' Visualizes distributions related to depth of tree leafs. #' Visualizes distributions related to the depth of tree leaves.
#' \code{xgb.plot.deepness} uses base R graphics, while \code{xgb.ggplot.deepness} uses the ggplot backend. #' - `xgb.plot.deepness()` uses base R graphics, while
#' - `xgb.ggplot.deepness()` uses "ggplot2".
#' #'
#' @param model either an \code{xgb.Booster} model generated by the \code{xgb.train} function #' @param model Either an `xgb.Booster` model, or the "data.table" returned by [xgb.model.dt.tree()].
#' or a data.table result of the \code{xgb.model.dt.tree} function. #' @param which Which distribution to plot (see details).
#' @param plot (base R barplot) whether a barplot should be produced. #' @param plot Should the plot be shown? Default is `TRUE`.
#' If FALSE, only a data.table is returned. #' @param ... Other parameters passed to [graphics::barplot()] or [graphics::plot()].
#' @param which which distribution to plot (see details).
#' @param ... other parameters passed to \code{barplot} or \code{plot}.
#' #'
#' @details #' @details
#' #'
#' When \code{which="2x1"}, two distributions with respect to the leaf depth #' When `which = "2x1"`, two distributions with respect to the leaf depth
#' are plotted on top of each other: #' are plotted on top of each other:
#' \itemize{ #' 1. The distribution of the number of leaves in a tree model at a certain depth.
#' \item the distribution of the number of leafs in a tree model at a certain depth; #' 2. The distribution of the average weighted number of observations ("cover")
#' \item the distribution of average weighted number of observations ("cover") #' ending up in leaves at a certain depth.
#' ending up in leafs at certain depth.
#' }
#' Those could be helpful in determining sensible ranges of the \code{max_depth}
#' and \code{min_child_weight} parameters.
#' #'
#' When \code{which="max.depth"} or \code{which="med.depth"}, plots of either maximum or median depth #' Those could be helpful in determining sensible ranges of the `max_depth`
#' per tree with respect to tree number are created. And \code{which="med.weight"} allows to see how #' and `min_child_weight` parameters.
#'
#' When `which = "max.depth"` or `which = "med.depth"`, plots of either maximum or
#' median depth per tree with respect to the tree number are created.
#'
#' Finally, `which = "med.weight"` allows to see how
#' a tree's median absolute leaf weight changes through the iterations. #' a tree's median absolute leaf weight changes through the iterations.
#' #'
#' This function was inspired by the blog post #' These functions have been inspired by the blog post
#' \url{https://github.com/aysent/random-forest-leaf-visualization}. #' <https://github.com/aysent/random-forest-leaf-visualization>.
#' #'
#' @return #' @return
#' The return value of the two functions is as follows:
#' - `xgb.plot.deepness()`: A "data.table" (invisibly).
#' Each row corresponds to a terminal leaf in the model. It contains its information
#' about depth, cover, and weight (used in calculating predictions).
#' If `plot = TRUE`, also a plot is shown.
#' - `xgb.ggplot.deepness()`: When `which = "2x1"`, a list of two "ggplot" objects,
#' and a single "ggplot" object otherwise.
#' #'
#' Other than producing plots (when \code{plot=TRUE}), the \code{xgb.plot.deepness} function #' @seealso [xgb.train()] and [xgb.model.dt.tree()].
#' silently returns a processed data.table where each row corresponds to a terminal leaf in a tree model,
#' and contains information about leaf's depth, cover, and weight (which is used in calculating predictions).
#'
#' The \code{xgb.ggplot.deepness} silently returns either a list of two ggplot graphs when \code{which="2x1"}
#' or a single ggplot graph for the other \code{which} options.
#'
#' @seealso
#'
#' \code{\link{xgb.train}}, \code{\link{xgb.model.dt.tree}}.
#' #'
#' @examples #' @examples
#' #'
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package = "xgboost")
#' ## Keep the number of threads to 2 for examples #' ## Keep the number of threads to 2 for examples
#' nthread <- 2 #' nthread <- 2
#' data.table::setDTthreads(nthread) #' data.table::setDTthreads(nthread)
#' #'
#' ## Change max_depth to a higher number to get a more significant result #' ## Change max_depth to a higher number to get a more significant result
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 6, #' bst <- xgboost(
#' eta = 0.1, nthread = nthread, nrounds = 50, objective = "binary:logistic", #' data = agaricus.train$data,
#' subsample = 0.5, min_child_weight = 2) #' label = agaricus.train$label,
#' max_depth = 6,
#' nthread = nthread,
#' nrounds = 50,
#' objective = "binary:logistic",
#' subsample = 0.5,
#' min_child_weight = 2
#' )
#' #'
#' xgb.plot.deepness(bst) #' xgb.plot.deepness(bst)
#' xgb.ggplot.deepness(bst) #' xgb.ggplot.deepness(bst)
#' #'
#' xgb.plot.deepness(bst, which='max.depth', pch=16, col=rgb(0,0,1,0.3), cex=2) #' xgb.plot.deepness(
#' bst, which = "max.depth", pch = 16, col = rgb(0, 0, 1, 0.3), cex = 2
#' )
#' #'
#' xgb.plot.deepness(bst, which='med.weight', pch=16, col=rgb(0,0,1,0.3), cex=2) #' xgb.plot.deepness(
#' bst, which = "med.weight", pch = 16, col = rgb(0, 0, 1, 0.3), cex = 2
#' )
#' #'
#' @rdname xgb.plot.deepness #' @rdname xgb.plot.deepness
#' @export #' @export
@ -83,7 +92,7 @@ xgb.plot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med.d
stop("Model tree columns are not as expected!\n", stop("Model tree columns are not as expected!\n",
" Note that this function works only for tree models.") " Note that this function works only for tree models.")
dt_depths <- merge(get.leaf.depth(dt_tree), dt_tree[, .(ID, Cover, Weight = Quality)], by = "ID") dt_depths <- merge(get.leaf.depth(dt_tree), dt_tree[, .(ID, Cover, Weight = Gain)], by = "ID")
setkeyv(dt_depths, c("Tree", "ID")) setkeyv(dt_depths, c("Tree", "ID"))
# count by depth levels, and also calculate average cover at a depth # count by depth levels, and also calculate average cover at a depth
dt_summaries <- dt_depths[, .(.N, Cover = mean(Cover)), Depth] dt_summaries <- dt_depths[, .(.N, Cover = mean(Cover)), Depth]
@ -148,6 +157,6 @@ get.leaf.depth <- function(dt_tree) {
# They are mainly column names inferred by Data.table... # They are mainly column names inferred by Data.table...
globalVariables( globalVariables(
c( c(
".N", "N", "Depth", "Quality", "Cover", "Tree", "ID", "Yes", "No", "Feature", "Leaf", "Weight" ".N", "N", "Depth", "Gain", "Cover", "Tree", "ID", "Yes", "No", "Feature", "Leaf", "Weight"
) )
) )

View File

@ -1,64 +1,75 @@
#' Plot feature importance as a bar graph #' Plot feature importance
#' #'
#' Represents previously calculated feature importance as a bar graph. #' Represents previously calculated feature importance as a bar graph.
#' \code{xgb.plot.importance} uses base R graphics, while \code{xgb.ggplot.importance} uses the ggplot backend. #' - `xgb.plot.importance()` uses base R graphics, while
#' - `xgb.ggplot.importance()` uses "ggplot".
#' #'
#' @param importance_matrix a \code{data.table} returned by \code{\link{xgb.importance}}. #' @param importance_matrix A `data.table` as returned by [xgb.importance()].
#' @param top_n maximal number of top features to include into the plot. #' @param top_n Maximal number of top features to include into the plot.
#' @param measure the name of importance measure to plot. #' @param measure The name of importance measure to plot.
#' When \code{NULL}, 'Gain' would be used for trees and 'Weight' would be used for gblinear. #' When `NULL`, 'Gain' would be used for trees and 'Weight' would be used for gblinear.
#' @param rel_to_first whether importance values should be represented as relative to the highest ranked feature. #' @param rel_to_first Whether importance values should be represented as relative to
#' See Details. #' the highest ranked feature, see Details.
#' @param left_margin (base R barplot) allows to adjust the left margin size to fit feature names. #' @param left_margin Adjust the left margin size to fit feature names.
#' When it is NULL, the existing \code{par('mar')} is used. #' When `NULL`, the existing `par("mar")` is used.
#' @param cex (base R barplot) passed as \code{cex.names} parameter to \code{barplot}. #' @param cex Passed as `cex.names` parameter to [graphics::barplot()].
#' @param plot (base R barplot) whether a barplot should be produced. #' @param plot Should the barplot be shown? Default is `TRUE`.
#' If FALSE, only a data.table is returned. #' @param n_clusters A numeric vector containing the min and the max range
#' @param n_clusters (ggplot only) a \code{numeric} vector containing the min and the max range
#' of the possible number of clusters of bars. #' of the possible number of clusters of bars.
#' @param ... other parameters passed to \code{barplot} (except horiz, border, cex.names, names.arg, and las). #' @param ... Other parameters passed to [graphics::barplot()]
#' (except `horiz`, `border`, `cex.names`, `names.arg`, and `las`).
#' Only used in `xgb.plot.importance()`.
#' #'
#' @details #' @details
#' The graph represents each feature as a horizontal bar of length proportional to the importance of a feature. #' The graph represents each feature as a horizontal bar of length proportional to the importance of a feature.
#' Features are shown ranked in a decreasing importance order. #' Features are sorted by decreasing importance.
#' It works for importances from both \code{gblinear} and \code{gbtree} models. #' It works for both "gblinear" and "gbtree" models.
#' #'
#' When \code{rel_to_first = FALSE}, the values would be plotted as they were in \code{importance_matrix}. #' When `rel_to_first = FALSE`, the values would be plotted as in `importance_matrix`.
#' For gbtree model, that would mean being normalized to the total of 1 #' For a "gbtree" model, that would mean being normalized to the total of 1
#' ("what is feature's importance contribution relative to the whole model?"). #' ("what is feature's importance contribution relative to the whole model?").
#' For linear models, \code{rel_to_first = FALSE} would show actual values of the coefficients. #' For linear models, `rel_to_first = FALSE` would show actual values of the coefficients.
#' Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of #' Setting `rel_to_first = TRUE` allows to see the picture from the perspective of
#' "what is feature's importance contribution relative to the most important feature?" #' "what is feature's importance contribution relative to the most important feature?"
#' #'
#' The ggplot-backend method also performs 1-D clustering of the importance values, #' The "ggplot" backend performs 1-D clustering of the importance values,
#' with bar colors corresponding to different clusters that have somewhat similar importance values. #' with bar colors corresponding to different clusters having similar importance values.
#' #'
#' @return #' @return
#' The \code{xgb.plot.importance} function creates a \code{barplot} (when \code{plot=TRUE}) #' The return value depends on the function:
#' and silently returns a processed data.table with \code{n_top} features sorted by importance. #' - `xgb.plot.importance()`: Invisibly, a "data.table" with `n_top` features sorted
#' by importance. If `plot = TRUE`, the values are also plotted as barplot.
#' - `xgb.ggplot.importance()`: A customizable "ggplot" object.
#' E.g., to change the title, set `+ ggtitle("A GRAPH NAME")`.
#' #'
#' The \code{xgb.ggplot.importance} function returns a ggplot graph which could be customized afterwards. #' @seealso [graphics::barplot()]
#' E.g., to change the title of the graph, add \code{+ ggtitle("A GRAPH NAME")} to the result.
#'
#' @seealso
#' \code{\link[graphics]{barplot}}.
#' #'
#' @examples #' @examples
#' data(agaricus.train) #' data(agaricus.train)
#'
#' ## Keep the number of threads to 2 for examples #' ## Keep the number of threads to 2 for examples
#' nthread <- 2 #' nthread <- 2
#' data.table::setDTthreads(nthread) #' data.table::setDTthreads(nthread)
#' #'
#' bst <- xgboost( #' bst <- xgboost(
#' data = agaricus.train$data, label = agaricus.train$label, max_depth = 3, #' data = agaricus.train$data,
#' eta = 1, nthread = nthread, nrounds = 2, objective = "binary:logistic" #' label = agaricus.train$label,
#' max_depth = 3,
#' eta = 1,
#' nthread = nthread,
#' nrounds = 2,
#' objective = "binary:logistic"
#' ) #' )
#' #'
#' importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst) #' importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst)
#' xgb.plot.importance(
#' importance_matrix, rel_to_first = TRUE, xlab = "Relative importance"
#' )
#' #'
#' xgb.plot.importance(importance_matrix, rel_to_first = TRUE, xlab = "Relative importance") #' gg <- xgb.ggplot.importance(
#' #' importance_matrix, measure = "Frequency", rel_to_first = TRUE
#' (gg <- xgb.ggplot.importance(importance_matrix, measure = "Frequency", rel_to_first = TRUE)) #' )
#' gg
#' gg + ggplot2::ylab("Frequency") #' gg + ggplot2::ylab("Frequency")
#' #'
#' @rdname xgb.plot.importance #' @rdname xgb.plot.importance

View File

@ -1,14 +1,10 @@
#' Project all trees on one tree and plot it #' Project all trees on one tree
#' #'
#' Visualization of the ensemble of trees as a single collective unit. #' Visualization of the ensemble of trees as a single collective unit.
#' #'
#' @param model produced by the \code{xgb.train} function. #' @inheritParams xgb.plot.tree
#' @param feature_names names of each feature as a \code{character} vector. #' @param features_keep Number of features to keep in each position of the multi trees,
#' @param features_keep number of features to keep in each position of the multi trees. #' by default 5.
#' @param plot_width width in pixels of the graph to produce
#' @param plot_height height in pixels of the graph to produce
#' @param render a logical flag for whether the graph should be rendered (see Value).
#' @param ... currently not used
#' #'
#' @details #' @details
#' #'
@ -24,33 +20,31 @@
#' Moreover, the trees tend to reuse the same features. #' Moreover, the trees tend to reuse the same features.
#' #'
#' The function projects each tree onto one, and keeps for each position the #' The function projects each tree onto one, and keeps for each position the
#' \code{features_keep} first features (based on the Gain per feature measure). #' `features_keep` first features (based on the Gain per feature measure).
#' #'
#' This function is inspired by this blog post: #' This function is inspired by this blog post:
#' \url{https://wellecks.wordpress.com/2015/02/21/peering-into-the-black-box-visualizing-lambdamart/} #' <https://wellecks.wordpress.com/2015/02/21/peering-into-the-black-box-visualizing-lambdamart/>
#' #'
#' @return #' @inherit xgb.plot.tree return
#'
#' When \code{render = TRUE}:
#' returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}.
#' Similar to ggplot objects, it needs to be printed to see it when not running from command line.
#'
#' When \code{render = FALSE}:
#' silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}.
#' This could be useful if one wants to modify some of the graph attributes
#' before rendering the graph with \code{\link[DiagrammeR]{render_graph}}.
#' #'
#' @examples #' @examples
#' #'
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package = "xgboost")
#'
#' ## Keep the number of threads to 2 for examples #' ## Keep the number of threads to 2 for examples
#' nthread <- 2 #' nthread <- 2
#' data.table::setDTthreads(nthread) #' data.table::setDTthreads(nthread)
#' #'
#' bst <- xgboost( #' bst <- xgboost(
#' data = agaricus.train$data, label = agaricus.train$label, max_depth = 15, #' data = agaricus.train$data,
#' eta = 1, nthread = nthread, nrounds = 30, objective = "binary:logistic", #' label = agaricus.train$label,
#' min_child_weight = 50, verbose = 0 #' max_depth = 15,
#' eta = 1,
#' nthread = nthread,
#' nrounds = 30,
#' objective = "binary:logistic",
#' min_child_weight = 50,
#' verbose = 0
#' ) #' )
#' #'
#' p <- xgb.plot.multi.trees(model = bst, features_keep = 3) #' p <- xgb.plot.multi.trees(model = bst, features_keep = 3)
@ -58,20 +52,23 @@
#' #'
#' \dontrun{ #' \dontrun{
#' # Below is an example of how to save this plot to a file. #' # Below is an example of how to save this plot to a file.
#' # Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed. #' # Note that for export_graph() to work, the {DiagrammeRsvg} and {rsvg} packages
#' # must also be installed.
#'
#' library(DiagrammeR) #' library(DiagrammeR)
#' gr <- xgb.plot.multi.trees(model=bst, features_keep = 3, render=FALSE) #'
#' export_graph(gr, 'tree.pdf', width=1500, height=600) #' gr <- xgb.plot.multi.trees(model = bst, features_keep = 3, render = FALSE)
#' export_graph(gr, "tree.pdf", width = 1500, height = 600)
#' } #' }
#' #'
#' @export #' @export
xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5, plot_width = NULL, plot_height = NULL, xgb.plot.multi.trees <- function(model, features_keep = 5, plot_width = NULL, plot_height = NULL,
render = TRUE, ...) { render = TRUE, ...) {
if (!requireNamespace("DiagrammeR", quietly = TRUE)) { if (!requireNamespace("DiagrammeR", quietly = TRUE)) {
stop("DiagrammeR is required for xgb.plot.multi.trees") stop("DiagrammeR is required for xgb.plot.multi.trees")
} }
check.deprecation(...) check.deprecation(...)
tree.matrix <- xgb.model.dt.tree(feature_names = feature_names, model = model) tree.matrix <- xgb.model.dt.tree(model = model)
# first number of the path represents the tree, then the following numbers are related to the path to follow # first number of the path represents the tree, then the following numbers are related to the path to follow
# root init # root init
@ -98,13 +95,13 @@ xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5,
data.table::set(tree.matrix, j = nm, value = sub("^\\d+-", "", tree.matrix[[nm]])) data.table::set(tree.matrix, j = nm, value = sub("^\\d+-", "", tree.matrix[[nm]]))
nodes.dt <- tree.matrix[ nodes.dt <- tree.matrix[
, .(Quality = sum(Quality)) , .(Gain = sum(Gain))
, by = .(abs.node.position, Feature) , by = .(abs.node.position, Feature)
][, .(Text = paste0( ][, .(Text = paste0(
paste0( paste0(
Feature[seq_len(min(length(Feature), features_keep))], Feature[seq_len(min(length(Feature), features_keep))],
" (", " (",
format(Quality[seq_len(min(length(Quality), features_keep))], digits = 5), format(Gain[seq_len(min(length(Gain), features_keep))], digits = 5),
")" ")"
), ),
collapse = "\n" collapse = "\n"

View File

@ -1,110 +1,165 @@
#' SHAP contribution dependency plots #' SHAP dependence plots
#' #'
#' Visualizing the SHAP feature contribution to prediction dependencies on feature value. #' Visualizes SHAP values against feature values to gain an impression of feature effects.
#' #'
#' @param data data as a \code{matrix} or \code{dgCMatrix}. #' @param data The data to explain as a `matrix` or `dgCMatrix`.
#' @param shap_contrib a matrix of SHAP contributions that was computed earlier for the above #' @param shap_contrib Matrix of SHAP contributions of `data`.
#' \code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}. #' The default (`NULL`) computes it from `model` and `data`.
#' @param features a vector of either column indices or of feature names to plot. When it is NULL, #' @param features Vector of column indices or feature names to plot.
#' feature importance is calculated, and \code{top_n} high ranked features are taken. #' When `NULL` (default), the `top_n` most important features are selected
#' @param top_n when \code{features} is NULL, top_n [1, 100] most important features in a model are taken. #' by [xgb.importance()].
#' @param model an \code{xgb.Booster} model. It has to be provided when either \code{shap_contrib} #' @param top_n How many of the most important features (<= 100) should be selected?
#' or \code{features} is missing. #' By default 1 for SHAP dependence and 10 for SHAP summary).
#' @param trees passed to \code{\link{xgb.importance}} when \code{features = NULL}. #' Only used when `features = NULL`.
#' @param target_class is only relevant for multiclass models. When it is set to a 0-based class index, #' @param model An `xgb.Booster` model. Only required when `shap_contrib = NULL` or
#' only SHAP contributions for that specific class are used. #' `features = NULL`.
#' If it is not set, SHAP importances are averaged over all classes. #' @param trees Passed to [xgb.importance()] when `features = NULL`.
#' @param approxcontrib passed to \code{\link{predict.xgb.Booster}} when \code{shap_contrib = NULL}. #' @param target_class Only relevant for multiclass models. The default (`NULL`)
#' @param subsample a random fraction of data points to use for plotting. When it is NULL, #' averages the SHAP values over all classes. Pass a (0-based) class index
#' it is set so that up to 100K data points are used. #' to show only SHAP values of that class.
#' @param n_col a number of columns in a grid of plots. #' @param approxcontrib Passed to `predict()` when `shap_contrib = NULL`.
#' @param col color of the scatterplot markers. #' @param subsample Fraction of data points randomly picked for plotting.
#' @param pch scatterplot marker. #' The default (`NULL`) will use up to 100k data points.
#' @param discrete_n_uniq a maximal number of unique values in a feature to consider it as discrete. #' @param n_col Number of columns in a grid of plots.
#' @param discrete_jitter an \code{amount} parameter of jitter added to discrete features' positions. #' @param col Color of the scatterplot markers.
#' @param ylab a y-axis label in 1D plots. #' @param pch Scatterplot marker.
#' @param plot_NA whether the contributions of cases with missing values should also be plotted. #' @param discrete_n_uniq Maximal number of unique feature values to consider the
#' @param col_NA a color of marker for missing value contributions. #' feature as discrete.
#' @param pch_NA a marker type for NA values. #' @param discrete_jitter Jitter amount added to the values of discrete features.
#' @param pos_NA a relative position of the x-location where NA values are shown: #' @param ylab The y-axis label in 1D plots.
#' \code{min(x) + (max(x) - min(x)) * pos_NA}. #' @param plot_NA Should contributions of cases with missing values be plotted?
#' @param plot_loess whether to plot loess-smoothed curves. The smoothing is only done for features with #' Default is `TRUE`.
#' more than 5 distinct values. #' @param col_NA Color of marker for missing value contributions.
#' @param col_loess a color to use for the loess curves. #' @param pch_NA Marker type for `NA` values.
#' @param span_loess the \code{span} parameter in \code{\link[stats]{loess}}'s call. #' @param pos_NA Relative position of the x-location where `NA` values are shown:
#' @param which whether to do univariate or bivariate plotting. NOTE: only 1D is implemented so far. #' `min(x) + (max(x) - min(x)) * pos_NA`.
#' @param plot whether a plot should be drawn. If FALSE, only a list of matrices is returned. #' @param plot_loess Should loess-smoothed curves be plotted? (Default is `TRUE`).
#' @param ... other parameters passed to \code{plot}. #' The smoothing is only done for features with more than 5 distinct values.
#' @param col_loess Color of loess curves.
#' @param span_loess The `span` parameter of [stats::loess()].
#' @param which Whether to do univariate or bivariate plotting. Currently, only "1d" is implemented.
#' @param plot Should the plot be drawn? (Default is `TRUE`).
#' If `FALSE`, only a list of matrices is returned.
#' @param ... Other parameters passed to [graphics::plot()].
#' #'
#' @details #' @details
#' #'
#' These scatterplots represent how SHAP feature contributions depend of feature values. #' These scatterplots represent how SHAP feature contributions depend of feature values.
#' The similarity to partial dependency plots is that they also give an idea for how feature values #' The similarity to partial dependence plots is that they also give an idea for how feature values
#' affect predictions. However, in partial dependency plots, we usually see marginal dependencies #' affect predictions. However, in partial dependence plots, we see marginal dependencies
#' of model prediction on feature value, while SHAP contribution dependency plots display the estimated #' of model prediction on feature value, while SHAP dependence plots display the estimated
#' contributions of a feature to model prediction for each individual case. #' contributions of a feature to the prediction for each individual case.
#' #'
#' When \code{plot_loess = TRUE} is set, feature values are rounded to 3 significant digits and #' When `plot_loess = TRUE`, feature values are rounded to three significant digits and
#' weighted LOESS is computed and plotted, where weights are the numbers of data points #' weighted LOESS is computed and plotted, where the weights are the numbers of data points
#' at each rounded value. #' at each rounded value.
#' #'
#' Note: SHAP contributions are shown on the scale of model margin. E.g., for a logistic binomial objective, #' Note: SHAP contributions are on the scale of the model margin.
#' the margin is prediction before a sigmoidal transform into probability-like values. #' E.g., for a logistic binomial objective, the margin is on log-odds scale.
#' Also, since SHAP stands for "SHapley Additive exPlanation" (model prediction = sum of SHAP #' Also, since SHAP stands for "SHapley Additive exPlanation" (model prediction = sum of SHAP
#' contributions for all features + bias), depending on the objective used, transforming SHAP #' contributions for all features + bias), depending on the objective used, transforming SHAP
#' contributions for a feature from the marginal to the prediction space is not necessarily #' contributions for a feature from the marginal to the prediction space is not necessarily
#' a meaningful thing to do. #' a meaningful thing to do.
#' #'
#' @return #' @return
#' #' In addition to producing plots (when `plot = TRUE`), it silently returns a list of two matrices:
#' In addition to producing plots (when \code{plot=TRUE}), it silently returns a list of two matrices: #' - `data`: Feature value matrix.
#' \itemize{ #' - `shap_contrib`: Corresponding SHAP value matrix.
#' \item \code{data} the values of selected features;
#' \item \code{shap_contrib} the contributions of selected features.
#' }
#' #'
#' @references #' @references
#' #' 1. Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions",
#' Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions", NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874} #' NIPS Proceedings 2017, <https://arxiv.org/abs/1705.07874>
#' #' 2. Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles",
#' Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles", \url{https://arxiv.org/abs/1706.06060} #' <https://arxiv.org/abs/1706.06060>
#' #'
#' @examples #' @examples
#' #'
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package = "xgboost")
#' data(agaricus.test, package='xgboost') #' data(agaricus.test, package = "xgboost")
#' #'
#' ## Keep the number of threads to 1 for examples #' ## Keep the number of threads to 1 for examples
#' nthread <- 1 #' nthread <- 1
#' data.table::setDTthreads(nthread) #' data.table::setDTthreads(nthread)
#' nrounds <- 20 #' nrounds <- 20
#' #'
#' bst <- xgboost(agaricus.train$data, agaricus.train$label, nrounds = nrounds, #' bst <- xgboost(
#' eta = 0.1, max_depth = 3, subsample = .5, #' agaricus.train$data,
#' method = "hist", objective = "binary:logistic", nthread = nthread, verbose = 0) #' agaricus.train$label,
#' nrounds = nrounds,
#' eta = 0.1,
#' max_depth = 3,
#' subsample = 0.5,
#' objective = "binary:logistic",
#' nthread = nthread,
#' verbose = 0
#' )
#' #'
#' xgb.plot.shap(agaricus.test$data, model = bst, features = "odor=none") #' xgb.plot.shap(agaricus.test$data, model = bst, features = "odor=none")
#'
#' contr <- predict(bst, agaricus.test$data, predcontrib = TRUE) #' contr <- predict(bst, agaricus.test$data, predcontrib = TRUE)
#' xgb.plot.shap(agaricus.test$data, contr, model = bst, top_n = 12, n_col = 3) #' xgb.plot.shap(agaricus.test$data, contr, model = bst, top_n = 12, n_col = 3)
#' xgb.ggplot.shap.summary(agaricus.test$data, contr, model = bst, top_n = 12) # Summary plot
#' #'
#' # multiclass example - plots for each class separately: #' # Summary plot
#' xgb.ggplot.shap.summary(agaricus.test$data, contr, model = bst, top_n = 12)
#'
#' # Multiclass example - plots for each class separately:
#' nclass <- 3 #' nclass <- 3
#' x <- as.matrix(iris[, -5]) #' x <- as.matrix(iris[, -5])
#' set.seed(123) #' set.seed(123)
#' is.na(x[sample(nrow(x) * 4, 30)]) <- TRUE # introduce some missing values #' is.na(x[sample(nrow(x) * 4, 30)]) <- TRUE # introduce some missing values
#' mbst <- xgboost(data = x, label = as.numeric(iris$Species) - 1, nrounds = nrounds, #'
#' max_depth = 2, eta = 0.3, subsample = .5, nthread = nthread, #' mbst <- xgboost(
#' objective = "multi:softprob", num_class = nclass, verbose = 0) #' data = x,
#' trees0 <- seq(from=0, by=nclass, length.out=nrounds) #' label = as.numeric(iris$Species) - 1,
#' nrounds = nrounds,
#' max_depth = 2,
#' eta = 0.3,
#' subsample = 0.5,
#' nthread = nthread,
#' objective = "multi:softprob",
#' num_class = nclass,
#' verbose = 0
#' )
#' trees0 <- seq(from = 0, by = nclass, length.out = nrounds)
#' col <- rgb(0, 0, 1, 0.5) #' col <- rgb(0, 0, 1, 0.5)
#' xgb.plot.shap(x, model = mbst, trees = trees0, target_class = 0, top_n = 4, #' xgb.plot.shap(
#' n_col = 2, col = col, pch = 16, pch_NA = 17) #' x,
#' xgb.plot.shap(x, model = mbst, trees = trees0 + 1, target_class = 1, top_n = 4, #' model = mbst,
#' n_col = 2, col = col, pch = 16, pch_NA = 17) #' trees = trees0,
#' xgb.plot.shap(x, model = mbst, trees = trees0 + 2, target_class = 2, top_n = 4, #' target_class = 0,
#' n_col = 2, col = col, pch = 16, pch_NA = 17) #' top_n = 4,
#' xgb.ggplot.shap.summary(x, model = mbst, target_class = 0, top_n = 4) # Summary plot #' n_col = 2,
#' col = col,
#' pch = 16,
#' pch_NA = 17
#' )
#'
#' xgb.plot.shap(
#' x,
#' model = mbst,
#' trees = trees0 + 1,
#' target_class = 1,
#' top_n = 4,
#' n_col = 2,
#' col = col,
#' pch = 16,
#' pch_NA = 17
#' )
#'
#' xgb.plot.shap(
#' x,
#' model = mbst,
#' trees = trees0 + 2,
#' target_class = 2,
#' top_n = 4,
#' n_col = 2,
#' col = col,
#' pch = 16,
#' pch_NA = 17
#' )
#'
#' # Summary plot
#' xgb.ggplot.shap.summary(x, model = mbst, target_class = 0, top_n = 4)
#' #'
#' @rdname xgb.plot.shap #' @rdname xgb.plot.shap
#' @export #' @export
@ -187,41 +242,48 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
invisible(list(data = data, shap_contrib = shap_contrib)) invisible(list(data = data, shap_contrib = shap_contrib))
} }
#' SHAP contribution dependency summary plot #' SHAP summary plot
#' #'
#' Compare SHAP contributions of different features. #' Visualizes SHAP contributions of different features.
#' #'
#' A point plot (each point representing one sample from \code{data}) is #' A point plot (each point representing one observation from `data`) is
#' produced for each feature, with the points plotted on the SHAP value axis. #' produced for each feature, with the points plotted on the SHAP value axis.
#' Each point (observation) is coloured based on its feature value. The plot #' Each point (observation) is coloured based on its feature value.
#' hence allows us to see which features have a negative / positive contribution #'
#' The plot allows to see which features have a negative / positive contribution
#' on the model prediction, and whether the contribution is different for larger #' on the model prediction, and whether the contribution is different for larger
#' or smaller values of the feature. We effectively try to replicate the #' or smaller values of the feature. Inspired by the summary plot of
#' \code{summary_plot} function from https://github.com/shap/shap. #' <https://github.com/shap/shap>.
#' #'
#' @inheritParams xgb.plot.shap #' @inheritParams xgb.plot.shap
#' #'
#' @return A \code{ggplot2} object. #' @return A `ggplot2` object.
#' @export #' @export
#' #'
#' @examples # See \code{\link{xgb.plot.shap}}. #' @examples
#' @seealso \code{\link{xgb.plot.shap}}, \code{\link{xgb.ggplot.shap.summary}}, #' # See examples in xgb.plot.shap()
#' \url{https://github.com/shap/shap} #'
#' @seealso [xgb.plot.shap()], [xgb.ggplot.shap.summary()],
#' and the Python library <https://github.com/shap/shap>.
xgb.plot.shap.summary <- function(data, shap_contrib = NULL, features = NULL, top_n = 10, model = NULL, xgb.plot.shap.summary <- function(data, shap_contrib = NULL, features = NULL, top_n = 10, model = NULL,
trees = NULL, target_class = NULL, approxcontrib = FALSE, subsample = NULL) { trees = NULL, target_class = NULL, approxcontrib = FALSE, subsample = NULL) {
# Only ggplot implementation is available. # Only ggplot implementation is available.
xgb.ggplot.shap.summary(data, shap_contrib, features, top_n, model, trees, target_class, approxcontrib, subsample) xgb.ggplot.shap.summary(data, shap_contrib, features, top_n, model, trees, target_class, approxcontrib, subsample)
} }
#' Prepare data for SHAP plots. To be used in xgb.plot.shap, xgb.plot.shap.summary, etc. #' Prepare data for SHAP plots
#' Internal utility function. #'
#' Internal function used in [xgb.plot.shap()], [xgb.plot.shap.summary()], etc.
#' #'
#' @inheritParams xgb.plot.shap #' @inheritParams xgb.plot.shap
#' @param max_observations Maximum number of observations to consider.
#' @keywords internal #' @keywords internal
#' @noRd
#' #'
#' @return A list containing: 'data', a matrix containing sample observations #' @return
#' and their feature values; 'shap_contrib', a matrix containing the SHAP contribution #' A list containing:
#' values for these observations. #' - `data`: The matrix of feature values.
#' - `shap_contrib`: The matrix with corresponding SHAP values.
xgb.shap.data <- function(data, shap_contrib = NULL, features = NULL, top_n = 1, model = NULL, xgb.shap.data <- function(data, shap_contrib = NULL, features = NULL, top_n = 1, model = NULL,
trees = NULL, target_class = NULL, approxcontrib = FALSE, trees = NULL, target_class = NULL, approxcontrib = FALSE,
subsample = NULL, max_observations = 100000) { subsample = NULL, max_observations = 100000) {
@ -241,7 +303,11 @@ xgb.shap.data <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
if (is.character(features) && is.null(colnames(data))) if (is.character(features) && is.null(colnames(data)))
stop("either provide `data` with column names or provide `features` as column indices") stop("either provide `data` with column names or provide `features` as column indices")
if (is.null(model$feature_names) && model$nfeatures != ncol(data)) model_feature_names <- NULL
if (is.null(features) && !is.null(model)) {
model_feature_names <- xgb.feature_names(model)
}
if (is.null(model_feature_names) && xgb.num_feature(model) != ncol(data))
stop("if model has no feature_names, columns in `data` must match features in model") stop("if model has no feature_names, columns in `data` must match features in model")
if (!is.null(subsample)) { if (!is.null(subsample)) {
@ -270,7 +336,7 @@ xgb.shap.data <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
} }
if (is.null(features)) { if (is.null(features)) {
if (!is.null(model$feature_names)) { if (!is.null(model_feature_names)) {
imp <- xgb.importance(model = model, trees = trees) imp <- xgb.importance(model = model, trees = trees)
} else { } else {
imp <- xgb.importance(model = model, trees = trees, feature_names = colnames(data)) imp <- xgb.importance(model = model, trees = trees, feature_names = colnames(data))

View File

@ -1,74 +1,109 @@
#' Plot a boosted tree model #' Plot boosted trees
#' #'
#' Read a tree model text dump and plot the model. #' Read a tree model text dump and plot the model.
#' #'
#' @param feature_names names of each feature as a \code{character} vector. #' @param model Object of class `xgb.Booster`. If it contains feature names (they can be set through
#' @param model produced by the \code{xgb.train} function. #' \link{setinfo}), they will be used in the output from this function.
#' @param trees an integer vector of tree indices that should be visualized. #' @param trees An integer vector of tree indices that should be used.
#' If set to \code{NULL}, all trees of the model are included. #' The default (`NULL`) uses all trees.
#' IMPORTANT: the tree index in xgboost model is zero-based #' Useful, e.g., in multiclass classification to get only
#' (e.g., use \code{trees = 0:2} for the first 3 trees in a model). #' the trees of one class. *Important*: the tree index in XGBoost models
#' @param plot_width the width of the diagram in pixels. #' is zero-based (e.g., use `trees = 0:2` for the first three trees).
#' @param plot_height the height of the diagram in pixels. #' @param plot_width,plot_height Width and height of the graph in pixels.
#' @param render a logical flag for whether the graph should be rendered (see Value). #' The values are passed to [DiagrammeR::render_graph()].
#' @param render Should the graph be rendered or not? The default is `TRUE`.
#' @param show_node_id a logical flag for whether to show node id's in the graph. #' @param show_node_id a logical flag for whether to show node id's in the graph.
#' @param style Style to use for the plot. Options are:\itemize{
#' \item `"xgboost"`: will use the plot style defined in the core XGBoost library,
#' which is shared between different interfaces through the 'dot' format. This
#' style was not available before version 2.1.0 in R. It always plots the trees
#' vertically (from top to bottom).
#' \item `"R"`: will use the style defined from XGBoost's R interface, which predates
#' the introducition of the standardized style from the core library. It might plot
#' the trees horizontally (from left to right).
#' }
#'
#' Note that `style="xgboost"` is only supported when all of the following conditions are met:\itemize{
#' \item Only a single tree is being plotted.
#' \item Node IDs are not added to the graph.
#' \item The graph is being returned as `htmlwidget` (`render=TRUE`).
#' }
#' @param ... currently not used. #' @param ... currently not used.
#' #'
#' @details #' @details
#' #'
#' The content of each node is organised that way: #' When using `style="xgboost"`, the content of each node is visualized as follows:
#' - For non-terminal nodes, it will display the split condition (number or name if
#' available, and the condition that would decide to which node to go next).
#' - Those nodes will be connected to their children by arrows that indicate whether the
#' branch corresponds to the condition being met or not being met.
#' - Terminal (leaf) nodes contain the margin to add when ending there.
#' #'
#' \itemize{ #' When using `style="R"`, the content of each node is visualized like this:
#' \item Feature name. #' - *Feature name*.
#' \item \code{Cover}: The sum of second order gradient of training data classified to the leaf. #' - *Cover:* The sum of second order gradients of training data.
#' If it is square loss, this simply corresponds to the number of instances seen by a split #' For the squared loss, this simply corresponds to the number of instances in the node.
#' or collected by a leaf during training. #' The deeper in the tree, the lower the value.
#' The deeper in the tree a node is, the lower this metric will be. #' - *Gain* (for split nodes): Information gain metric of a split
#' \item \code{Gain} (for split nodes): the information gain metric of a split
#' (corresponds to the importance of the node in the model). #' (corresponds to the importance of the node in the model).
#' \item \code{Value} (for leafs): the margin value that the leaf may contribute to prediction. #' - *Value* (for leaves): Margin value that the leaf may contribute to the prediction.
#' } #'
#' The tree root nodes also indicate the Tree index (0-based). #' The tree root nodes also indicate the tree index (0-based).
#' #'
#' The "Yes" branches are marked by the "< split_value" label. #' The "Yes" branches are marked by the "< split_value" label.
#' The branches that also used for missing values are marked as bold #' The branches also used for missing values are marked as bold
#' (as in "carrying extra capacity"). #' (as in "carrying extra capacity").
#' #'
#' This function uses \href{https://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR. #' This function uses [GraphViz](https://www.graphviz.org/) as DiagrammeR backend.
#' #'
#' @return #' @return
#' #' The value depends on the `render` parameter:
#' When \code{render = TRUE}: #' - If `render = TRUE` (default): Rendered graph object which is an htmlwidget of
#' returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}. #' class `grViz`. Similar to "ggplot" objects, it needs to be printed when not
#' Similar to ggplot objects, it needs to be printed to see it when not running from command line. #' running from the command line.
#' #' - If `render = FALSE`: Graph object which is of DiagrammeR's class `dgr_graph`.
#' When \code{render = FALSE}: #' This could be useful if one wants to modify some of the graph attributes
#' silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}. #' before rendering the graph with [DiagrammeR::render_graph()].
#' This could be useful if one wants to modify some of the graph attributes
#' before rendering the graph with \code{\link[DiagrammeR]{render_graph}}.
#' #'
#' @examples #' @examples
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package = "xgboost")
#'
#' bst <- xgboost(
#' data = agaricus.train$data,
#' label = agaricus.train$label,
#' max_depth = 3,
#' eta = 1,
#' nthread = 2,
#' nrounds = 2,
#' objective = "binary:logistic"
#' )
#'
#' # plot the first tree, using the style from xgboost's core library
#' # (this plot should look identical to the ones generated from other
#' # interfaces like the python package for xgboost)
#' xgb.plot.tree(model = bst, trees = 1, style = "xgboost")
#' #'
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 3,
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
#' # plot all the trees #' # plot all the trees
#' xgb.plot.tree(model = bst) #' xgb.plot.tree(model = bst, trees = NULL)
#'
#' # plot only the first tree and display the node ID: #' # plot only the first tree and display the node ID:
#' xgb.plot.tree(model = bst, trees = 0, show_node_id = TRUE) #' xgb.plot.tree(model = bst, trees = 0, show_node_id = TRUE)
#' #'
#' \dontrun{ #' \dontrun{
#' # Below is an example of how to save this plot to a file. #' # Below is an example of how to save this plot to a file.
#' # Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed. #' # Note that for export_graph() to work, the {DiagrammeRsvg}
#' # and {rsvg} packages must also be installed.
#'
#' library(DiagrammeR) #' library(DiagrammeR)
#' gr <- xgb.plot.tree(model=bst, trees=0:1, render=FALSE) #'
#' export_graph(gr, 'tree.pdf', width=1500, height=1900) #' gr <- xgb.plot.tree(model = bst, trees = 0:1, render = FALSE)
#' export_graph(gr, 'tree.png', width=1500, height=1900) #' export_graph(gr, "tree.pdf", width = 1500, height = 1900)
#' export_graph(gr, "tree.png", width = 1500, height = 1900)
#' } #' }
#' #'
#' @export #' @export
xgb.plot.tree <- function(feature_names = NULL, model = NULL, trees = NULL, plot_width = NULL, plot_height = NULL, xgb.plot.tree <- function(model = NULL, trees = NULL, plot_width = NULL, plot_height = NULL,
render = TRUE, show_node_id = FALSE, ...) { render = TRUE, show_node_id = FALSE, style = c("R", "xgboost"), ...) {
check.deprecation(...) check.deprecation(...)
if (!inherits(model, "xgb.Booster")) { if (!inherits(model, "xgb.Booster")) {
stop("model: Has to be an object of class xgb.Booster") stop("model: Has to be an object of class xgb.Booster")
@ -78,9 +113,20 @@ xgb.plot.tree <- function(feature_names = NULL, model = NULL, trees = NULL, plot
stop("DiagrammeR package is required for xgb.plot.tree", call. = FALSE) stop("DiagrammeR package is required for xgb.plot.tree", call. = FALSE)
} }
dt <- xgb.model.dt.tree(feature_names = feature_names, model = model, trees = trees) style <- as.character(head(style, 1L))
stopifnot(style %in% c("R", "xgboost"))
if (style == "xgboost") {
if (NROW(trees) != 1L || !render || show_node_id) {
stop("style='xgboost' is only supported for single, rendered tree, without node IDs.")
}
dt[, label := paste0(Feature, "\nCover: ", Cover, ifelse(Feature == "Leaf", "\nValue: ", "\nGain: "), Quality)] txt <- xgb.dump(model, dump_format = "dot")
return(DiagrammeR::grViz(txt[[trees + 1]], width = plot_width, height = plot_height))
}
dt <- xgb.model.dt.tree(model = model, trees = trees)
dt[, label := paste0(Feature, "\nCover: ", Cover, ifelse(Feature == "Leaf", "\nValue: ", "\nGain: "), Gain)]
if (show_node_id) if (show_node_id)
dt[, label := paste0(ID, ": ", label)] dt[, label := paste0(ID, ": ", label)]
dt[Node == 0, label := paste0("Tree ", Tree, "\n", label)] dt[Node == 0, label := paste0("Tree ", Tree, "\n", label)]
@ -147,4 +193,4 @@ xgb.plot.tree <- function(feature_names = NULL, model = NULL, trees = NULL, plot
# Avoid error messages during CRAN check. # Avoid error messages during CRAN check.
# The reason is that these variables are never declared # The reason is that these variables are never declared
# They are mainly column names inferred by Data.table... # They are mainly column names inferred by Data.table...
globalVariables(c("Feature", "ID", "Cover", "Quality", "Split", "Yes", "No", "Missing", ".", "shape", "filledcolor", "label")) globalVariables(c("Feature", "ID", "Cover", "Gain", "Split", "Yes", "No", "Missing", ".", "shape", "filledcolor", "label"))

View File

@ -1,12 +1,24 @@
#' Save xgboost model to binary file #' Save xgboost model to binary file
#' #'
#' Save xgboost model to a file in binary format. #' Save xgboost model to a file in binary or JSON format.
#' #'
#' @param model model object of \code{xgb.Booster} class. #' @param model Model object of \code{xgb.Booster} class.
#' @param fname name of the file to write. #' @param fname Name of the file to write.
#'
#' Note that the extension of this file name determined the serialization format to use:\itemize{
#' \item Extension ".ubj" will use the universal binary JSON format (recommended).
#' This format uses binary types for e.g. floating point numbers, thereby preventing any loss
#' of precision when converting to a human-readable JSON text or similar.
#' \item Extension ".json" will use plain JSON, which is a human-readable format.
#' \item Extension ".deprecated" will use a \bold{deprecated} binary format. This format will
#' not be able to save attributes introduced after v1 of XGBoost, such as the "best_iteration"
#' attribute that boosters might keep, nor feature names or user-specifiec attributes.
#' \item If the format is not specified by passing one of the file extensions above, will
#' default to UBJ.
#' }
#' #'
#' @details #' @details
#' This methods allows to save a model in an xgboost-internal binary format which is universal #' This methods allows to save a model in an xgboost-internal binary or text format which is universal
#' among the various xgboost interfaces. In R, the saved model file could be read-in later #' among the various xgboost interfaces. In R, the saved model file could be read-in later
#' using either the \code{\link{xgb.load}} function or the \code{xgb_model} parameter #' using either the \code{\link{xgb.load}} function or the \code{xgb_model} parameter
#' of \code{\link{xgb.train}}. #' of \code{\link{xgb.train}}.
@ -14,13 +26,13 @@
#' Note: a model can also be saved as an R-object (e.g., by using \code{\link[base]{readRDS}} #' Note: a model can also be saved as an R-object (e.g., by using \code{\link[base]{readRDS}}
#' or \code{\link[base]{save}}). However, it would then only be compatible with R, and #' or \code{\link[base]{save}}). However, it would then only be compatible with R, and
#' corresponding R-methods would need to be used to load it. Moreover, persisting the model with #' corresponding R-methods would need to be used to load it. Moreover, persisting the model with
#' \code{\link[base]{readRDS}} or \code{\link[base]{save}}) will cause compatibility problems in #' \code{\link[base]{readRDS}} or \code{\link[base]{save}}) might cause compatibility problems in
#' future versions of XGBoost. Consult \code{\link{a-compatibility-note-for-saveRDS-save}} to learn #' future versions of XGBoost. Consult \code{\link{a-compatibility-note-for-saveRDS-save}} to learn
#' how to persist models in a future-proof way, i.e. to make the model accessible in future #' how to persist models in a future-proof way, i.e. to make the model accessible in future
#' releases of XGBoost. #' releases of XGBoost.
#' #'
#' @seealso #' @seealso
#' \code{\link{xgb.load}}, \code{\link{xgb.Booster.complete}}. #' \code{\link{xgb.load}}
#' #'
#' @examples #' @examples
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package='xgboost')
@ -32,15 +44,17 @@
#' #'
#' train <- agaricus.train #' train <- agaricus.train
#' test <- agaricus.test #' test <- agaricus.test
#' bst <- xgboost( #' bst <- xgb.train(
#' data = train$data, label = train$label, max_depth = 2, eta = 1, #' data = xgb.DMatrix(train$data, label = train$label),
#' max_depth = 2,
#' eta = 1,
#' nthread = nthread, #' nthread = nthread,
#' nrounds = 2, #' nrounds = 2,
#' objective = "binary:logistic" #' objective = "binary:logistic"
#' ) #' )
#' xgb.save(bst, 'xgb.model') #' fname <- file.path(tempdir(), "xgb.ubj")
#' bst <- xgb.load('xgb.model') #' xgb.save(bst, fname)
#' if (file.exists('xgb.model')) file.remove('xgb.model') #' bst <- xgb.load(fname)
#' @export #' @export
xgb.save <- function(model, fname) { xgb.save <- function(model, fname) {
if (typeof(fname) != "character") if (typeof(fname) != "character")
@ -49,8 +63,7 @@ xgb.save <- function(model, fname) {
stop("model must be xgb.Booster.", stop("model must be xgb.Booster.",
if (inherits(model, "xgb.DMatrix")) " Use xgb.DMatrix.save to save an xgb.DMatrix object." else "") if (inherits(model, "xgb.DMatrix")) " Use xgb.DMatrix.save to save an xgb.DMatrix object." else "")
} }
model <- xgb.Booster.complete(model, saveraw = FALSE)
fname <- path.expand(fname) fname <- path.expand(fname)
.Call(XGBoosterSaveModel_R, model$handle, enc2utf8(fname[1])) .Call(XGBoosterSaveModel_R, xgb.get.handle(model), enc2utf8(fname[1]))
return(TRUE) return(TRUE)
} }

View File

@ -11,8 +11,6 @@
#' \item \code{deprecated}: Encode the booster into old customized binary format. #' \item \code{deprecated}: Encode the booster into old customized binary format.
#' } #' }
#' #'
#' Right now the default is \code{deprecated} but will be changed to \code{ubj} in upcoming release.
#'
#' @examples #' @examples
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost') #' data(agaricus.test, package='xgboost')
@ -23,14 +21,14 @@
#' #'
#' train <- agaricus.train #' train <- agaricus.train
#' test <- agaricus.test #' test <- agaricus.test
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, #' bst <- xgb.train(data = xgb.DMatrix(train$data, label = train$label), max_depth = 2,
#' eta = 1, nthread = nthread, nrounds = 2,objective = "binary:logistic") #' eta = 1, nthread = nthread, nrounds = 2,objective = "binary:logistic")
#' #'
#' raw <- xgb.save.raw(bst) #' raw <- xgb.save.raw(bst)
#' bst <- xgb.load.raw(raw) #' bst <- xgb.load.raw(raw)
#' #'
#' @export #' @export
xgb.save.raw <- function(model, raw_format = "deprecated") { xgb.save.raw <- function(model, raw_format = "ubj") {
handle <- xgb.get.handle(model) handle <- xgb.get.handle(model)
args <- list(format = raw_format) args <- list(format = raw_format)
.Call(XGBoosterSaveModelToRaw_R, handle, jsonlite::toJSON(args, auto_unbox = TRUE)) .Call(XGBoosterSaveModelToRaw_R, handle, jsonlite::toJSON(args, auto_unbox = TRUE))

View File

@ -1,21 +0,0 @@
#' Serialize the booster instance into R's raw vector. The serialization method differs
#' from \code{\link{xgb.save.raw}} as the latter one saves only the model but not
#' parameters. This serialization format is not stable across different xgboost versions.
#'
#' @param booster the booster instance
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost')
#' train <- agaricus.train
#' test <- agaricus.test
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
#' raw <- xgb.serialize(bst)
#' bst <- xgb.unserialize(raw)
#'
#' @export
xgb.serialize <- function(booster) {
handle <- xgb.get.handle(booster)
.Call(XGBoosterSerializeToBuffer_R, handle)
}

View File

@ -152,6 +152,10 @@
#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the #' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
#' parameters' values. User can provide either existing or their own callback methods in order #' parameters' values. User can provide either existing or their own callback methods in order
#' to customize the training process. #' to customize the training process.
#'
#' Note that some callbacks might try to set an evaluation log - be aware that these evaluation logs
#' are kept as R attributes, and thus do not get saved when using non-R serializaters like
#' \link{xgb.save} (but are kept when using R serializers like \link{saveRDS}).
#' @param ... other parameters to pass to \code{params}. #' @param ... other parameters to pass to \code{params}.
#' @param label vector of response values. Should not be provided when data is #' @param label vector of response values. Should not be provided when data is
#' a local data file name or an \code{xgb.DMatrix}. #' a local data file name or an \code{xgb.DMatrix}.
@ -160,6 +164,9 @@
#' This parameter is only used when input is a dense matrix. #' This parameter is only used when input is a dense matrix.
#' @param weight a vector indicating the weight for each row of the input. #' @param weight a vector indicating the weight for each row of the input.
#' #'
#' @return
#' An object of class \code{xgb.Booster}.
#'
#' @details #' @details
#' These are the training functions for \code{xgboost}. #' These are the training functions for \code{xgboost}.
#' #'
@ -201,28 +208,20 @@
#' \item \code{cb.save.model}: when \code{save_period > 0} is set. #' \item \code{cb.save.model}: when \code{save_period > 0} is set.
#' } #' }
#' #'
#' @return #' Note that objects of type `xgb.Booster` as returned by this function behave a bit differently
#' An object of class \code{xgb.Booster} with the following elements: #' from typical R objects (it's an 'altrep' list class), and it makes a separation between
#' \itemize{ #' internal booster attributes (restricted to jsonifyable data), accessed through \link{xgb.attr}
#' \item \code{handle} a handle (pointer) to the xgboost model in memory. #' and shared between interfaces through serialization functions like \link{xgb.save}; and
#' \item \code{raw} a cached memory dump of the xgboost model saved as R's \code{raw} type. #' R-specific attributes, accessed through \link{attributes} and \link{attr}, which are otherwise
#' \item \code{niter} number of boosting iterations. #' only used in the R interface, only kept when using R's serializers like \link{saveRDS}, and
#' \item \code{evaluation_log} evaluation history stored as a \code{data.table} with the #' not anyhow used by functions like \link{predict.xgb.Booster}.
#' first column corresponding to iteration number and the rest corresponding to evaluation #'
#' metrics' values. It is created by the \code{\link{cb.evaluation.log}} callback. #' Be aware that one such R attribute that is automatically added is `params` - this attribute
#' \item \code{call} a function call. #' is assigned from the `params` argument to this function, and is only meant to serve as a
#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not #' reference for what went into the booster, but is not used in other methods that take a booster
#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback. #' object - so for example, changing the booster's configuration requires calling `xgb.config<-`
#' \item \code{callbacks} callback functions that were either automatically assigned or #' or 'xgb.parameters<-', while simply modifying `attributes(model)$params$<...>` will have no
#' explicitly passed. #' effect elsewhere.
#' \item \code{best_iteration} iteration number with the best evaluation metric value
#' (only available with early stopping).
#' \item \code{best_score} the best evaluation metric value during early stopping.
#' (only available with early stopping).
#' \item \code{feature_names} names of the training dataset features
#' (only when column names were defined in training data).
#' \item \code{nfeatures} number of features in training data.
#' }
#' #'
#' @seealso #' @seealso
#' \code{\link{callbacks}}, #' \code{\link{callbacks}},
@ -251,9 +250,9 @@
#' watchlist <- list(train = dtrain, eval = dtest) #' watchlist <- list(train = dtrain, eval = dtest)
#' #'
#' ## A simple xgb.train example: #' ## A simple xgb.train example:
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = nthread, #' param <- list(max_depth = 2, eta = 1, nthread = nthread,
#' objective = "binary:logistic", eval_metric = "auc") #' objective = "binary:logistic", eval_metric = "auc")
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist) #' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0)
#' #'
#' ## An xgb.train example where custom objective and evaluation metric are #' ## An xgb.train example where custom objective and evaluation metric are
#' ## used: #' ## used:
@ -272,13 +271,13 @@
#' #'
#' # These functions could be used by passing them either: #' # These functions could be used by passing them either:
#' # as 'objective' and 'eval_metric' parameters in the params list: #' # as 'objective' and 'eval_metric' parameters in the params list:
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = nthread, #' param <- list(max_depth = 2, eta = 1, nthread = nthread,
#' objective = logregobj, eval_metric = evalerror) #' objective = logregobj, eval_metric = evalerror)
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist) #' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0)
#' #'
#' # or through the ... arguments: #' # or through the ... arguments:
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = nthread) #' param <- list(max_depth = 2, eta = 1, nthread = nthread)
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, #' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0,
#' objective = logregobj, eval_metric = evalerror) #' objective = logregobj, eval_metric = evalerror)
#' #'
#' # or as dedicated 'obj' and 'feval' parameters of xgb.train: #' # or as dedicated 'obj' and 'feval' parameters of xgb.train:
@ -287,10 +286,10 @@
#' #'
#' #'
#' ## An xgb.train example of using variable learning rates at each iteration: #' ## An xgb.train example of using variable learning rates at each iteration:
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = nthread, #' param <- list(max_depth = 2, eta = 1, nthread = nthread,
#' objective = "binary:logistic", eval_metric = "auc") #' objective = "binary:logistic", eval_metric = "auc")
#' my_etas <- list(eta = c(0.5, 0.1)) #' my_etas <- list(eta = c(0.5, 0.1))
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, #' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0,
#' callbacks = list(cb.reset.parameters(my_etas))) #' callbacks = list(cb.reset.parameters(my_etas)))
#' #'
#' ## Early stopping: #' ## Early stopping:
@ -371,27 +370,30 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
# The tree updating process would need slightly different handling # The tree updating process would need slightly different handling
is_update <- NVL(params[['process_type']], '.') == 'update' is_update <- NVL(params[['process_type']], '.') == 'update'
past_evaluation_log <- NULL
if (inherits(xgb_model, "xgb.Booster")) {
past_evaluation_log <- attributes(xgb_model)$evaluation_log
}
# Construct a booster (either a new one or load from xgb_model) # Construct a booster (either a new one or load from xgb_model)
handle <- xgb.Booster.handle( bst <- xgb.Booster(
params = params, params = params,
cachelist = append(watchlist, dtrain), cachelist = append(watchlist, dtrain),
modelfile = xgb_model, modelfile = xgb_model
handle = NULL )
niter_init <- bst$niter
bst <- bst$bst
.Call(
XGBoosterCopyInfoFromDMatrix_R,
xgb.get.handle(bst),
dtrain
) )
bst <- xgb.handleToBooster(handle = handle, raw = NULL)
# extract parameters that can affect the relationship b/w #trees and #iterations # extract parameters that can affect the relationship b/w #trees and #iterations
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1) # Note: it might look like these aren't used, but they need to be defined in this
num_parallel_tree <- max(as.numeric(NVL(params[['num_parallel_tree']], 1)), 1) # environment for the callbacks for work correctly.
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1) # nolint
# When the 'xgb_model' was set, find out how many boosting iterations it has
niter_init <- 0
if (!is.null(xgb_model)) {
niter_init <- as.numeric(xgb.attr(bst, 'niter')) + 1
if (length(niter_init) == 0) {
niter_init <- xgb.ntree(bst) %/% (num_parallel_tree * num_class)
}
}
if (is_update && nrounds > niter_init) if (is_update && nrounds > niter_init)
stop("nrounds cannot be larger than ", niter_init, " (nrounds of xgb_model)") stop("nrounds cannot be larger than ", niter_init, " (nrounds of xgb_model)")
@ -405,7 +407,7 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
for (f in cb$pre_iter) f() for (f in cb$pre_iter) f()
xgb.iter.update( xgb.iter.update(
booster_handle = bst$handle, bst = bst,
dtrain = dtrain, dtrain = dtrain,
iter = iteration - 1, iter = iteration - 1,
obj = obj obj = obj
@ -413,46 +415,43 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
if (length(watchlist) > 0) { if (length(watchlist) > 0) {
bst_evaluation <- xgb.iter.eval( # nolint: object_usage_linter bst_evaluation <- xgb.iter.eval( # nolint: object_usage_linter
booster_handle = bst$handle, bst = bst,
watchlist = watchlist, watchlist = watchlist,
iter = iteration - 1, iter = iteration - 1,
feval = feval feval = feval
) )
} }
xgb.attr(bst$handle, 'niter') <- iteration - 1
for (f in cb$post_iter) f() for (f in cb$post_iter) f()
if (stop_condition) break if (stop_condition) break
} }
for (f in cb$finalize) f(finalize = TRUE) for (f in cb$finalize) f(finalize = TRUE)
bst <- xgb.Booster.complete(bst, saveraw = TRUE)
# store the total number of boosting iterations
bst$niter <- end_iteration
# store the evaluation results # store the evaluation results
if (length(evaluation_log) > 0 && keep_evaluation_log <- FALSE
nrow(evaluation_log) > 0) { if (length(evaluation_log) > 0 && nrow(evaluation_log) > 0) {
keep_evaluation_log <- TRUE
# include the previous compatible history when available # include the previous compatible history when available
if (inherits(xgb_model, 'xgb.Booster') && if (inherits(xgb_model, 'xgb.Booster') &&
!is_update && !is_update &&
!is.null(xgb_model$evaluation_log) && !is.null(past_evaluation_log) &&
isTRUE(all.equal(colnames(evaluation_log), isTRUE(all.equal(colnames(evaluation_log),
colnames(xgb_model$evaluation_log)))) { colnames(past_evaluation_log)))) {
evaluation_log <- rbindlist(list(xgb_model$evaluation_log, evaluation_log)) evaluation_log <- rbindlist(list(past_evaluation_log, evaluation_log))
} }
bst$evaluation_log <- evaluation_log
} }
bst$call <- match.call() extra_attrs <- list(
bst$params <- params call = match.call(),
bst$callbacks <- callbacks params = params,
if (!is.null(colnames(dtrain))) callbacks = callbacks
bst$feature_names <- colnames(dtrain) )
bst$nfeatures <- ncol(dtrain) if (keep_evaluation_log) {
extra_attrs$evaluation_log <- evaluation_log
}
curr_attrs <- attributes(bst)
attributes(bst) <- c(curr_attrs, extra_attrs)
return(bst) return(bst)
} }

View File

@ -1,41 +0,0 @@
#' Load the instance back from \code{\link{xgb.serialize}}
#'
#' @param buffer the buffer containing booster instance saved by \code{\link{xgb.serialize}}
#' @param handle An \code{xgb.Booster.handle} object which will be overwritten with
#' the new deserialized object. Must be a null handle (e.g. when loading the model through
#' `readRDS`). If not provided, a new handle will be created.
#' @return An \code{xgb.Booster.handle} object.
#'
#' @export
xgb.unserialize <- function(buffer, handle = NULL) {
cachelist <- list()
if (is.null(handle)) {
handle <- .Call(XGBoosterCreate_R, cachelist)
} else {
if (!is.null.handle(handle))
stop("'handle' is not null/empty. Cannot overwrite existing handle.")
.Call(XGBoosterCreateInEmptyObj_R, cachelist, handle)
}
tryCatch(
.Call(XGBoosterUnserializeFromBuffer_R, handle, buffer),
error = function(e) {
error_msg <- conditionMessage(e)
m <- regexec("(src[\\\\/]learner.cc:[0-9]+): Check failed: (header == serialisation_header_)",
error_msg, perl = TRUE)
groups <- regmatches(error_msg, m)[[1]]
if (length(groups) == 3) {
warning(paste("The model had been generated by XGBoost version 1.0.0 or earlier and was ",
"loaded from a RDS file. We strongly ADVISE AGAINST using saveRDS() ",
"function, to ensure that your model can be read in current and upcoming ",
"XGBoost releases. Please use xgb.save() instead to preserve models for the ",
"long term. For more details and explanation, see ",
"https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html",
sep = ""))
.Call(XGBoosterLoadModelFromRaw_R, handle, buffer)
} else {
stop(e)
}
})
class(handle) <- "xgb.Booster.handle"
return(handle)
}

View File

@ -40,10 +40,10 @@ xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL,
#' } #' }
#' #'
#' @references #' @references
#' https://archive.ics.uci.edu/ml/datasets/Mushroom #' <https://archive.ics.uci.edu/ml/datasets/Mushroom>
#' #'
#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository #' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, #' <http://archive.ics.uci.edu/ml>. Irvine, CA: University of California,
#' School of Information and Computer Science. #' School of Information and Computer Science.
#' #'
#' @docType data #' @docType data
@ -67,10 +67,10 @@ NULL
#' } #' }
#' #'
#' @references #' @references
#' https://archive.ics.uci.edu/ml/datasets/Mushroom #' <https://archive.ics.uci.edu/ml/datasets/Mushroom>
#' #'
#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository #' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, #' <http://archive.ics.uci.edu/ml>. Irvine, CA: University of California,
#' School of Information and Computer Science. #' School of Information and Computer Science.
#' #'
#' @docType data #' @docType data
@ -82,7 +82,7 @@ NULL
NULL NULL
# Various imports # Various imports
#' @importClassesFrom Matrix dgCMatrix dgeMatrix #' @importClassesFrom Matrix dgCMatrix dgeMatrix dgRMatrix
#' @importFrom Matrix colSums #' @importFrom Matrix colSums
#' @importFrom Matrix sparse.model.matrix #' @importFrom Matrix sparse.model.matrix
#' @importFrom Matrix sparseVector #' @importFrom Matrix sparseVector
@ -98,9 +98,12 @@ NULL
#' @importFrom data.table setnames #' @importFrom data.table setnames
#' @importFrom jsonlite fromJSON #' @importFrom jsonlite fromJSON
#' @importFrom jsonlite toJSON #' @importFrom jsonlite toJSON
#' @importFrom methods new
#' @importFrom utils object.size str tail #' @importFrom utils object.size str tail
#' @importFrom stats coef
#' @importFrom stats predict #' @importFrom stats predict
#' @importFrom stats median #' @importFrom stats median
#' @importFrom stats variable.names
#' @importFrom utils head #' @importFrom utils head
#' @importFrom graphics barplot #' @importFrom graphics barplot
#' @importFrom graphics lines #' @importFrom graphics lines

View File

@ -1,5 +1,4 @@
basic_walkthrough Basic feature walkthrough basic_walkthrough Basic feature walkthrough
caret_wrapper Use xgboost to train in caret library
custom_objective Customize loss function, and evaluation metric custom_objective Customize loss function, and evaluation metric
boost_from_prediction Boosting from existing prediction boost_from_prediction Boosting from existing prediction
predict_first_ntree Predicting using first n trees predict_first_ntree Predicting using first n trees

View File

@ -1,7 +1,6 @@
XGBoost R Feature Walkthrough XGBoost R Feature Walkthrough
==== ====
* [Basic walkthrough of wrappers](basic_walkthrough.R) * [Basic walkthrough of wrappers](basic_walkthrough.R)
* [Train a xgboost model from caret library](caret_wrapper.R)
* [Customize loss function, and evaluation metric](custom_objective.R) * [Customize loss function, and evaluation metric](custom_objective.R)
* [Boosting from existing prediction](boost_from_prediction.R) * [Boosting from existing prediction](boost_from_prediction.R)
* [Predicting using first n trees](predict_first_ntree.R) * [Predicting using first n trees](predict_first_ntree.R)

View File

@ -1,44 +0,0 @@
# install development version of caret library that contains xgboost models
require(caret)
require(xgboost)
require(data.table)
require(vcd)
require(e1071)
# Load Arthritis dataset in memory.
data(Arthritis)
# Create a copy of the dataset with data.table package
# (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent
# and its performance are really good).
df <- data.table(Arthritis, keep.rownames = FALSE)
# Let's add some new categorical features to see if it helps.
# Of course these feature are highly correlated to the Age feature.
# Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features,
# even in case of highly correlated features.
# For the first feature we create groups of age by rounding the real age.
# Note that we transform it to factor (categorical data) so the algorithm treat them as independant values.
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old.
# I choose this value based on nothing.
# We will see later if simplifying the information based on arbitrary values is a good strategy
# (I am sure you already have an idea of how well it will work!).
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small).
df[, ID := NULL]
#-------------Basic Training using XGBoost in caret Library-----------------
# Set up control parameters for caret::train
# Here we use 10-fold cross-validation, repeating twice, and using random search for tuning hyper-parameters.
fitControl <- trainControl(method = "repeatedcv", number = 10, repeats = 2, search = "random")
# train a xgbTree model using caret::train
model <- train(factor(Improved) ~ ., data = df, method = "xgbTree", trControl = fitControl)
# Instead of tree for our boosters, you can also fit a linear regression or logistic regression model
# using xgbLinear
# model <- train(factor(Improved)~., data = df, method = "xgbLinear", trControl = fitControl)
# See model results
print(model)

View File

@ -81,8 +81,8 @@ output_vector <- df[, Y := 0][Improved == "Marked", Y := 1][, Y]
# Following is the same process as other demo # Following is the same process as other demo
cat("Learning...\n") cat("Learning...\n")
bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 9, bst <- xgb.train(data = xgb.DMatrix(sparse_matrix, label = output_vector), max_depth = 9,
eta = 1, nthread = 2, nrounds = 10, objective = "binary:logistic") eta = 1, nthread = 2, nrounds = 10, objective = "binary:logistic")
importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst) importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst)
print(importance) print(importance)

View File

@ -74,26 +74,26 @@ cols2ids <- function(object, col_names) {
interaction_list_fid <- cols2ids(interaction_list, colnames(train)) interaction_list_fid <- cols2ids(interaction_list, colnames(train))
# Fit model with interaction constraints # Fit model with interaction constraints
bst <- xgboost(data = train, label = y, max_depth = 4, bst <- xgb.train(data = xgb.DMatrix(train, label = y), max_depth = 4,
eta = 0.1, nthread = 2, nrounds = 1000, eta = 0.1, nthread = 2, nrounds = 1000,
interaction_constraints = interaction_list_fid) interaction_constraints = interaction_list_fid)
bst_tree <- xgb.model.dt.tree(colnames(train), bst) bst_tree <- xgb.model.dt.tree(colnames(train), bst)
bst_interactions <- treeInteractions(bst_tree, 4) bst_interactions <- treeInteractions(bst_tree, 4)
# interactions constrained to combinations of V1*V2 and V3*V4*V5 # interactions constrained to combinations of V1*V2 and V3*V4*V5
# Fit model without interaction constraints # Fit model without interaction constraints
bst2 <- xgboost(data = train, label = y, max_depth = 4, bst2 <- xgb.train(data = xgb.DMatrix(train, label = y), max_depth = 4,
eta = 0.1, nthread = 2, nrounds = 1000) eta = 0.1, nthread = 2, nrounds = 1000)
bst2_tree <- xgb.model.dt.tree(colnames(train), bst2) bst2_tree <- xgb.model.dt.tree(colnames(train), bst2)
bst2_interactions <- treeInteractions(bst2_tree, 4) # much more interactions bst2_interactions <- treeInteractions(bst2_tree, 4) # much more interactions
# Fit model with both interaction and monotonicity constraints # Fit model with both interaction and monotonicity constraints
bst3 <- xgboost(data = train, label = y, max_depth = 4, bst3 <- xgb.train(data = xgb.DMatrix(train, label = y), max_depth = 4,
eta = 0.1, nthread = 2, nrounds = 1000, eta = 0.1, nthread = 2, nrounds = 1000,
interaction_constraints = interaction_list_fid, interaction_constraints = interaction_list_fid,
monotone_constraints = c(-1, 0, 0, 0, 0, 0, 0, 0, 0, 0)) monotone_constraints = c(-1, 0, 0, 0, 0, 0, 0, 0, 0, 0))
bst3_tree <- xgb.model.dt.tree(colnames(train), bst3) bst3_tree <- xgb.model.dt.tree(colnames(train), bst3)
bst3_interactions <- treeInteractions(bst3_tree, 4) bst3_interactions <- treeInteractions(bst3_tree, 4)

View File

@ -1,6 +1,6 @@
data(mtcars) data(mtcars)
head(mtcars) head(mtcars)
bst <- xgboost(data = as.matrix(mtcars[, -11]), label = mtcars[, 11], bst <- xgb.train(data = xgb.DMatrix(as.matrix(mtcars[, -11]), label = mtcars[, 11]),
objective = 'count:poisson', nrounds = 5) objective = 'count:poisson', nrounds = 5)
pred <- predict(bst, as.matrix(mtcars[, -11])) pred <- predict(bst, as.matrix(mtcars[, -11]))
sqrt(mean((pred - mtcars[, 11]) ^ 2)) sqrt(mean((pred - mtcars[, 11]) ^ 2))

View File

@ -15,7 +15,7 @@ cat('start testing prediction from first n trees\n')
labels <- getinfo(dtest, 'label') labels <- getinfo(dtest, 'label')
### predict using first 1 tree ### predict using first 1 tree
ypred1 <- predict(bst, dtest, ntreelimit = 1) ypred1 <- predict(bst, dtest, iterationrange = c(1, 1))
# by default, we predict using all the trees # by default, we predict using all the trees
ypred2 <- predict(bst, dtest) ypred2 <- predict(bst, dtest)

View File

@ -27,7 +27,7 @@ head(pred_with_leaf)
create.new.tree.features <- function(model, original.features) { create.new.tree.features <- function(model, original.features) {
pred_with_leaf <- predict(model, original.features, predleaf = TRUE) pred_with_leaf <- predict(model, original.features, predleaf = TRUE)
cols <- list() cols <- list()
for (i in 1:model$niter) { for (i in 1:xgb.get.num.boosted.rounds(model)) {
# max is not the real max but it s not important for the purpose of adding features # max is not the real max but it s not important for the purpose of adding features
leaf.id <- sort(unique(pred_with_leaf[, i])) leaf.id <- sort(unique(pred_with_leaf[, i]))
cols[[i]] <- factor(x = pred_with_leaf[, i], level = leaf.id) cols[[i]] <- factor(x = pred_with_leaf[, i], level = leaf.id)

View File

@ -9,6 +9,5 @@ demo(create_sparse_matrix, package = 'xgboost')
demo(predict_leaf_indices, package = 'xgboost') demo(predict_leaf_indices, package = 'xgboost')
demo(early_stopping, package = 'xgboost') demo(early_stopping, package = 'xgboost')
demo(poisson_regression, package = 'xgboost') demo(poisson_regression, package = 'xgboost')
demo(caret_wrapper, package = 'xgboost')
demo(tweedie_regression, package = 'xgboost') demo(tweedie_regression, package = 'xgboost')
#demo(gpu_accelerated, package = 'xgboost') # can only run when built with GPU support #demo(gpu_accelerated, package = 'xgboost') # can only run when built with GPU support

View File

@ -55,7 +55,7 @@ message(sprintf("Creating '%s' from '%s'", OUT_DEF_FILE, IN_DLL_FILE))
} }
# use objdump to dump all the symbols # use objdump to dump all the symbols
OBJDUMP_FILE <- "objdump-out.txt" OBJDUMP_FILE <- file.path(tempdir(), "objdump-out.txt")
.pipe_shell_command_to_stdout( .pipe_shell_command_to_stdout(
command = "objdump" command = "objdump"
, args = c("-p", IN_DLL_FILE) , args = c("-p", IN_DLL_FILE)

View File

@ -2,16 +2,44 @@
% Please edit documentation in R/utils.R % Please edit documentation in R/utils.R
\name{a-compatibility-note-for-saveRDS-save} \name{a-compatibility-note-for-saveRDS-save}
\alias{a-compatibility-note-for-saveRDS-save} \alias{a-compatibility-note-for-saveRDS-save}
\title{Do not use \code{\link[base]{saveRDS}} or \code{\link[base]{save}} for long-term archival of \title{Model Serialization and Compatibility}
models. Instead, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}}.}
\description{ \description{
It is a common practice to use the built-in \code{\link[base]{saveRDS}} function (or When it comes to serializing XGBoost models, it's possible to use R serializers such as
\code{\link[base]{save}}) to persist R objects to the disk. While it is possible to persist \link{save} or \link{saveRDS} to serialize an XGBoost R model, but XGBoost also provides
\code{xgb.Booster} objects using \code{\link[base]{saveRDS}}, it is not advisable to do so if its own serializers with better compatibility guarantees, which allow loading
the model is to be accessed in the future. If you train a model with the current version of said models in other language bindings of XGBoost.
XGBoost and persist it with \code{\link[base]{saveRDS}}, the model is not guaranteed to be
accessible in later releases of XGBoost. To ensure that your model can be accessed in future Note that an \code{xgb.Booster} object, outside of its core components, might also keep:\itemize{
releases of XGBoost, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}} instead. \item Additional model configuration (accessible through \link{xgb.config}),
which includes model fitting parameters like \code{max_depth} and runtime parameters like \code{nthread}.
These are not necessarily useful for prediction/importance/plotting.
\item Additional R-specific attributes - e.g. results of callbacks, such as evaluation logs,
which are kept as a \code{data.table} object, accessible through \code{attributes(model)$evaluation_log}
if present.
}
The first one (configurations) does not have the same compatibility guarantees as
the model itself, including attributes that are set and accessed through \link{xgb.attributes} - that is, such configuration
might be lost after loading the booster in a different XGBoost version, regardless of the
serializer that was used. These are saved when using \link{saveRDS}, but will be discarded
if loaded into an incompatible XGBoost version. They are not saved when using XGBoost's
serializers from its public interface including \link{xgb.save} and \link{xgb.save.raw}.
The second ones (R attributes) are not part of the standard XGBoost model structure, and thus are
not saved when using XGBoost's own serializers. These attributes are only used for informational
purposes, such as keeping track of evaluation metrics as the model was fit, or saving the R
call that produced the model, but are otherwise not used for prediction / importance / plotting / etc.
These R attributes are only preserved when using R's serializers.
Note that XGBoost models in R starting from version \verb{2.1.0} and onwards, and XGBoost models
before version \verb{2.1.0}; have a very different R object structure and are incompatible with
each other. Hence, models that were saved with R serializers live \code{saveRDS} or \code{save} before
version \verb{2.1.0} will not work with latter \code{xgboost} versions and vice versa. Be aware that
the structure of R model objects could in theory change again in the future, so XGBoost's serializers
should be preferred for long-term storage.
Furthermore, note that using the package \code{qs} for serialization will require version 0.26 or
higher of said package, and will have the same compatibility restrictions as R serializers.
} }
\details{ \details{
Use \code{\link{xgb.save}} to save the XGBoost model as a stand-alone file. You may opt into Use \code{\link{xgb.save}} to save the XGBoost model as a stand-alone file. You may opt into
@ -24,26 +52,29 @@ re-construct the corresponding model. To read the model back, use \code{\link{xg
The \code{\link{xgb.save.raw}} function is useful if you'd like to persist the XGBoost model The \code{\link{xgb.save.raw}} function is useful if you'd like to persist the XGBoost model
as part of another R object. as part of another R object.
Note: Do not use \code{\link{xgb.serialize}} to store models long-term. It persists not only the Use \link{saveRDS} if you require the R-specific attributes that a booster might have, such
model but also internal configurations and parameters, and its format is not stable across as evaluation logs, but note that future compatibility of such objects is outside XGBoost's
multiple XGBoost versions. Use \code{\link{xgb.serialize}} only for checkpointing. control as it relies on R's serialization format (see e.g. the details section in
\link{serialize} and \link{save} from base R).
For more details and explanation about model persistence and archival, consult the page For more details and explanation about model persistence and archival, consult the page
\url{https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html}. \url{https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html}.
} }
\examples{ \examples{
data(agaricus.train, package='xgboost') data(agaricus.train, package='xgboost')
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, bst <- xgb.train(data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label),
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
objective = "binary:logistic")
# Save as a stand-alone file; load it with xgb.load() # Save as a stand-alone file; load it with xgb.load()
xgb.save(bst, 'xgb.model') fname <- file.path(tempdir(), "xgb_model.ubj")
bst2 <- xgb.load('xgb.model') xgb.save(bst, fname)
bst2 <- xgb.load(fname)
# Save as a stand-alone file (JSON); load it with xgb.load() # Save as a stand-alone file (JSON); load it with xgb.load()
xgb.save(bst, 'xgb.model.json') fname <- file.path(tempdir(), "xgb_model.json")
bst2 <- xgb.load('xgb.model.json') xgb.save(bst, fname)
if (file.exists('xgb.model.json')) file.remove('xgb.model.json') bst2 <- xgb.load(fname)
# Save as a raw byte vector; load it with xgb.load.raw() # Save as a raw byte vector; load it with xgb.load.raw()
xgb_bytes <- xgb.save.raw(bst) xgb_bytes <- xgb.save.raw(bst)
@ -54,11 +85,11 @@ obj <- list(xgb_model_bytes = xgb.save.raw(bst), description = "My first XGBoost
# Persist the R object. Here, saveRDS() is okay, since it doesn't persist # Persist the R object. Here, saveRDS() is okay, since it doesn't persist
# xgb.Booster directly. What's being persisted is the future-proof byte representation # xgb.Booster directly. What's being persisted is the future-proof byte representation
# as given by xgb.save.raw(). # as given by xgb.save.raw().
saveRDS(obj, 'my_object.rds') fname <- file.path(tempdir(), "my_object.Rds")
saveRDS(obj, fname)
# Read back the R object # Read back the R object
obj2 <- readRDS('my_object.rds') obj2 <- readRDS(fname)
# Re-construct xgb.Booster object from the bytes # Re-construct xgb.Booster object from the bytes
bst2 <- xgb.load.raw(obj2$xgb_model_bytes) bst2 <- xgb.load.raw(obj2$xgb_model_bytes)
if (file.exists('my_object.rds')) file.remove('my_object.rds')
} }

View File

@ -19,15 +19,15 @@ UCI Machine Learning Repository.
This data set includes the following fields: This data set includes the following fields:
\itemize{ \itemize{
\item \code{label} the label for each record \item \code{label} the label for each record
\item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns. \item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns.
} }
} }
\references{ \references{
https://archive.ics.uci.edu/ml/datasets/Mushroom \url{https://archive.ics.uci.edu/ml/datasets/Mushroom}
Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, \url{http://archive.ics.uci.edu/ml}. Irvine, CA: University of California,
School of Information and Computer Science. School of Information and Computer Science.
} }
\keyword{datasets} \keyword{datasets}

View File

@ -19,15 +19,15 @@ UCI Machine Learning Repository.
This data set includes the following fields: This data set includes the following fields:
\itemize{ \itemize{
\item \code{label} the label for each record \item \code{label} the label for each record
\item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns. \item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns.
} }
} }
\references{ \references{
https://archive.ics.uci.edu/ml/datasets/Mushroom \url{https://archive.ics.uci.edu/ml/datasets/Mushroom}
Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, \url{http://archive.ics.uci.edu/ml}. Irvine, CA: University of California,
School of Information and Computer Science. School of Information and Computer Science.
} }
\keyword{datasets} \keyword{datasets}

View File

@ -35,8 +35,6 @@ Callback function expects the following values to be set in its calling frame:
\code{data}, \code{data},
\code{end_iteration}, \code{end_iteration},
\code{params}, \code{params},
\code{num_parallel_tree},
\code{num_class}.
} }
\seealso{ \seealso{
\code{\link{callbacks}} \code{\link{callbacks}}

View File

@ -55,7 +55,6 @@ Callback function expects the following values to be set in its calling frame:
\code{iteration}, \code{iteration},
\code{begin_iteration}, \code{begin_iteration},
\code{end_iteration}, \code{end_iteration},
\code{num_parallel_tree}.
} }
\seealso{ \seealso{
\code{\link{callbacks}}, \code{\link{callbacks}},

View File

@ -4,17 +4,22 @@
\alias{cb.save.model} \alias{cb.save.model}
\title{Callback closure for saving a model file.} \title{Callback closure for saving a model file.}
\usage{ \usage{
cb.save.model(save_period = 0, save_name = "xgboost.model") cb.save.model(save_period = 0, save_name = "xgboost.ubj")
} }
\arguments{ \arguments{
\item{save_period}{save the model to disk after every \item{save_period}{save the model to disk after every
\code{save_period} iterations; 0 means save the model at the end.} \code{save_period} iterations; 0 means save the model at the end.}
\item{save_name}{the name or path for the saved model file. \item{save_name}{the name or path for the saved model file.
It can contain a \code{\link[base]{sprintf}} formatting specifier
to include the integer iteration number in the file name. \if{html}{\out{<div class="sourceCode">}}\preformatted{ Note that the format of the model being saved is determined by the file
E.g., with \code{save_name} = 'xgboost_%04d.model', extension specified here (see \link{xgb.save} for details about how it works).
the file saved at iteration 50 would be named "xgboost_0050.model".}
It can contain a \code{\link[base]{sprintf}} formatting specifier
to include the integer iteration number in the file name.
E.g., with \code{save_name} = 'xgboost_\%04d.ubj',
the file saved at iteration 50 would be named "xgboost_0050.ubj".
}\if{html}{\out{</div>}}}
} }
\description{ \description{
Callback closure for saving a model file. Callback closure for saving a model file.
@ -29,5 +34,7 @@ Callback function expects the following values to be set in its calling frame:
\code{end_iteration}. \code{end_iteration}.
} }
\seealso{ \seealso{
\link{xgb.save}
\code{\link{callbacks}} \code{\link{callbacks}}
} }

View File

@ -0,0 +1,50 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.Booster.R
\name{coef.xgb.Booster}
\alias{coef.xgb.Booster}
\title{Extract coefficients from linear booster}
\usage{
\method{coef}{xgb.Booster}(object, ...)
}
\arguments{
\item{object}{A fitted booster of 'gblinear' type.}
\item{...}{Not used.}
}
\value{
The extracted coefficients:\itemize{
\item If there's only one coefficient per column in the data, will be returned as a
vector, potentially containing the feature names if available, with the intercept
as first column.
\item If there's more than one coefficient per column in the data (e.g. when using
\code{objective="multi:softmax"}), will be returned as a matrix with dimensions equal
to \verb{[num_features, num_cols]}, with the intercepts as first row. Note that the column
(classes in multi-class classification) dimension will not be named.
}
The intercept returned here will include the 'base_score' parameter (unlike the 'bias'
or the last coefficient in the model dump, which doesn't have 'base_score' added to it),
hence one should get the same values from calling \code{predict(..., outputmargin = TRUE)} and
from performing a matrix multiplication with \code{model.matrix(~., ...)}.
Be aware that the coefficients are obtained by first converting them to strings and
back, so there will always be some very small lose of precision compared to the actual
coefficients as used by \link{predict.xgb.Booster}.
}
\description{
Extracts the coefficients from a 'gblinear' booster object,
as produced by \code{xgb.train} when using parameter \code{booster="gblinear"}.
Note: this function will error out if passing a booster model
which is not of "gblinear" type.
}
\examples{
library(xgboost)
data(mtcars)
y <- mtcars[, 1]
x <- as.matrix(mtcars[, -1])
dm <- xgb.DMatrix(data = x, label = y, nthread = 1)
params <- list(booster = "gblinear", nthread = 1)
model <- xgb.train(data = dm, params = params, nrounds = 2)
coef(model)
}

View File

@ -1,36 +1,78 @@
% Generated by roxygen2: do not edit by hand % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.DMatrix.R % Please edit documentation in R/xgb.Booster.R, R/xgb.DMatrix.R
\name{getinfo} \name{getinfo.xgb.Booster}
\alias{getinfo.xgb.Booster}
\alias{setinfo.xgb.Booster}
\alias{getinfo} \alias{getinfo}
\alias{getinfo.xgb.DMatrix} \alias{getinfo.xgb.DMatrix}
\title{Get information of an xgb.DMatrix object} \alias{setinfo}
\alias{setinfo.xgb.DMatrix}
\title{Get or set information of xgb.DMatrix and xgb.Booster objects}
\usage{ \usage{
getinfo(object, ...) \method{getinfo}{xgb.Booster}(object, name)
\method{getinfo}{xgb.DMatrix}(object, name, ...) \method{setinfo}{xgb.Booster}(object, name, info)
getinfo(object, name)
\method{getinfo}{xgb.DMatrix}(object, name)
setinfo(object, name, info)
\method{setinfo}{xgb.DMatrix}(object, name, info)
} }
\arguments{ \arguments{
\item{object}{Object of class \code{xgb.DMatrix}} \item{object}{Object of class \code{xgb.DMatrix} of \code{xgb.Booster}.}
\item{...}{other parameters}
\item{name}{the name of the information field to get (see details)} \item{name}{the name of the information field to get (see details)}
\item{info}{the specific field of information to set}
}
\value{
For \code{getinfo}, will return the requested field. For \code{setinfo}, will always return value \code{TRUE}
if it succeeds.
} }
\description{ \description{
Get information of an xgb.DMatrix object Get or set information of xgb.DMatrix and xgb.Booster objects
} }
\details{ \details{
The \code{name} field can be one of the following: The \code{name} field can be one of the following for \code{xgb.DMatrix}:
\itemize{ \itemize{
\item \code{label}: label XGBoost learn from ; \item \code{label}
\item \code{weight}: to do a weight rescale ; \item \code{weight}
\item \code{base_margin}: base margin is the base prediction XGBoost will boost from ; \item \code{base_margin}
\item \code{nrow}: number of rows of the \code{xgb.DMatrix}. \item \code{label_lower_bound}
\item \code{label_upper_bound}
\item \code{group}
\item \code{feature_type}
\item \code{feature_name}
\item \code{nrow}
}
See the documentation for \link{xgb.DMatrix} for more information about these fields.
For \code{xgb.Booster}, can be one of the following:
\itemize{
\item \code{feature_type}
\item \code{feature_name}
} }
\code{group} can be setup by \code{setinfo} but can't be retrieved by \code{getinfo}. Note that, while 'qid' cannot be retrieved, it's possible to get the equivalent 'group'
for a DMatrix that had 'qid' assigned.
\bold{Important}: when calling \code{setinfo}, the objects are modified in-place. See
\link{xgb.copy.Booster} for an idea of this in-place assignment works.
See the documentation for \link{xgb.DMatrix} for possible fields that can be set
(which correspond to arguments in that function).
Note that the following fields are allowed in the construction of an \code{xgb.DMatrix}
but \bold{aren't} allowed here:\itemize{
\item data
\item missing
\item silent
\item nthread
}
} }
\examples{ \examples{
data(agaricus.train, package='xgboost') data(agaricus.train, package='xgboost')
@ -41,4 +83,11 @@ setinfo(dtrain, 'label', 1-labels)
labels2 <- getinfo(dtrain, 'label') labels2 <- getinfo(dtrain, 'label')
stopifnot(all(labels2 == 1-labels)) stopifnot(all(labels2 == 1-labels))
data(agaricus.train, package='xgboost')
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
labels <- getinfo(dtrain, 'label')
setinfo(dtrain, 'label', 1-labels)
labels2 <- getinfo(dtrain, 'label')
stopifnot(all.equal(labels2, 1-labels))
} }

View File

@ -2,15 +2,13 @@
% Please edit documentation in R/xgb.Booster.R % Please edit documentation in R/xgb.Booster.R
\name{predict.xgb.Booster} \name{predict.xgb.Booster}
\alias{predict.xgb.Booster} \alias{predict.xgb.Booster}
\alias{predict.xgb.Booster.handle} \title{Predict method for XGBoost model}
\title{Predict method for eXtreme Gradient Boosting model}
\usage{ \usage{
\method{predict}{xgb.Booster}( \method{predict}{xgb.Booster}(
object, object,
newdata, newdata,
missing = NA, missing = NA,
outputmargin = FALSE, outputmargin = FALSE,
ntreelimit = NULL,
predleaf = FALSE, predleaf = FALSE,
predcontrib = FALSE, predcontrib = FALSE,
approxcontrib = FALSE, approxcontrib = FALSE,
@ -21,94 +19,93 @@
strict_shape = FALSE, strict_shape = FALSE,
... ...
) )
\method{predict}{xgb.Booster.handle}(object, ...)
} }
\arguments{ \arguments{
\item{object}{Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}} \item{object}{Object of class \code{xgb.Booster}.}
\item{newdata}{takes \code{matrix}, \code{dgCMatrix}, \code{dgRMatrix}, \code{dsparseVector}, \item{newdata}{Takes \code{matrix}, \code{dgCMatrix}, \code{dgRMatrix}, \code{dsparseVector},
local data file or \code{xgb.DMatrix}. local data file, or \code{xgb.DMatrix}.
For single-row predictions on sparse data, it is recommended to use the CSR format.
If passing a sparse vector, it will take it as a row vector.}
For single-row predictions on sparse data, it's recommended to use CSR format. If passing \item{missing}{Only used when input is a dense matrix. Pick a float value that represents
a sparse vector, it will take it as a row vector.} missing values in data (e.g., 0 or some other extreme value).}
\item{missing}{Missing is only used when input is dense matrix. Pick a float value that represents \item{outputmargin}{Whether the prediction should be returned in the form of original untransformed
missing values in data (e.g., sometimes 0 or some other extreme value is used).}
\item{outputmargin}{whether the prediction should be returned in the for of original untransformed
sum of predictions from boosting iterations' results. E.g., setting \code{outputmargin=TRUE} for sum of predictions from boosting iterations' results. E.g., setting \code{outputmargin=TRUE} for
logistic regression would result in predictions for log-odds instead of probabilities.} logistic regression would return log-odds instead of probabilities.}
\item{ntreelimit}{Deprecated, use \code{iterationrange} instead.} \item{predleaf}{Whether to predict pre-tree leaf indices.}
\item{predleaf}{whether predict leaf index.} \item{predcontrib}{Whether to return feature contributions to individual predictions (see Details).}
\item{predcontrib}{whether to return feature contributions to individual predictions (see Details).} \item{approxcontrib}{Whether to use a fast approximation for feature contributions (see Details).}
\item{approxcontrib}{whether to use a fast approximation for feature contributions (see Details).} \item{predinteraction}{Whether to return contributions of feature interactions to individual predictions (see Details).}
\item{predinteraction}{whether to return contributions of feature interactions to individual predictions (see Details).} \item{reshape}{Whether to reshape the vector of predictions to matrix form when there are several
prediction outputs per case. No effect if \code{predleaf}, \code{predcontrib},
or \code{predinteraction} is \code{TRUE}.}
\item{reshape}{whether to reshape the vector of predictions to a matrix form when there are several \item{training}{Whether the predictions are used for training. For dart booster,
prediction outputs per case. This option has no effect when either of predleaf, predcontrib,
or predinteraction flags is TRUE.}
\item{training}{whether is the prediction result used for training. For dart booster,
training predicting will perform dropout.} training predicting will perform dropout.}
\item{iterationrange}{Specifies which layer of trees are used in prediction. For \item{iterationrange}{Sequence of rounds/iterations from the model to use for prediction, specified by passing
example, if a random forest is trained with 100 rounds. Specifying a two-dimensional vector with the start and end numbers in the sequence (same format as R's \code{seq} - i.e.
`iterationrange=(1, 21)`, then only the forests built during [1, 21) (half open set) base-1 indexing, and inclusive of both ends).
rounds are used in this prediction. It's 1-based index just like R vector. When set
to \code{c(1, 1)} XGBoost will use all trees.}
\item{strict_shape}{Default is \code{FALSE}. When it's set to \code{TRUE}, output \if{html}{\out{<div class="sourceCode">}}\preformatted{ For example, passing `c(1,20)` will predict using the first twenty iterations, while passing `c(1,1)` will
type and shape of prediction are invariant to model type.} predict using only the first one.
\item{...}{Parameters passed to \code{predict.xgb.Booster}} If passing `NULL`, will either stop at the best iteration if the model used early stopping, or use all
of the iterations (rounds) otherwise.
If passing "all", will use all of the rounds regardless of whether the model had early stopping or not.
}\if{html}{\out{</div>}}}
\item{strict_shape}{Default is \code{FALSE}. When set to \code{TRUE}, the output
type and shape of predictions are invariant to the model type.}
\item{...}{Not used.}
} }
\value{ \value{
The return type is different depending whether \code{strict_shape} is set to \code{TRUE}. By default, The return type depends on \code{strict_shape}. If \code{FALSE} (default):
for regression or binary classification, it returns a vector of length \code{nrows(newdata)}. \itemize{
For multiclass classification, either a \code{num_class * nrows(newdata)} vector or \item For regression or binary classification: A vector of length \code{nrows(newdata)}.
a \code{(nrows(newdata), num_class)} dimension matrix is returned, depending on \item For multiclass classification: A vector of length \code{num_class * nrows(newdata)} or
the \code{reshape} value. a \verb{(nrows(newdata), num_class)} matrix, depending on the \code{reshape} value.
\item When \code{predleaf = TRUE}: A matrix with one column per tree.
\item When \code{predcontrib = TRUE}: When not multiclass, a matrix with
\code{ num_features + 1} columns. The last "+ 1" column corresponds to the baseline value.
In the multiclass case, a list of \code{num_class} such matrices.
The contribution values are on the scale of untransformed margin
(e.g., for binary classification, the values are log-odds deviations from the baseline).
\item When \code{predinteraction = TRUE}: When not multiclass, the output is a 3d array of
dimension \code{c(nrow, num_features + 1, num_features + 1)}. The off-diagonal (in the last two dimensions)
elements represent different feature interaction contributions. The array is symmetric WRT the last
two dimensions. The "+ 1" columns corresponds to the baselines. Summing this array along the last dimension should
produce practically the same result as \code{predcontrib = TRUE}.
In the multiclass case, a list of \code{num_class} such arrays.
}
When \code{predleaf = TRUE}, the output is a matrix object with the When \code{strict_shape = TRUE}, the output is always an array:
number of columns corresponding to the number of trees. \itemize{
\item For normal predictions, the output has dimension \verb{(num_class, nrow(newdata))}.
When \code{predcontrib = TRUE} and it is not a multiclass setting, the output is a matrix object with \item For \code{predcontrib = TRUE}, the dimension is \verb{(ncol(newdata) + 1, num_class, nrow(newdata))}.
\code{num_features + 1} columns. The last "+ 1" column in a matrix corresponds to bias. \item For \code{predinteraction = TRUE}, the dimension is \verb{(ncol(newdata) + 1, ncol(newdata) + 1, num_class, nrow(newdata))}.
For a multiclass case, a list of \code{num_class} elements is returned, where each element is \item For \code{predleaf = TRUE}, the dimension is \verb{(n_trees_in_forest, num_class, n_iterations, nrow(newdata))}.
such a matrix. The contribution values are on the scale of untransformed margin }
(e.g., for binary classification would mean that the contributions are log-odds deviations from bias).
When \code{predinteraction = TRUE} and it is not a multiclass setting, the output is a 3d array with
dimensions \code{c(nrow, num_features + 1, num_features + 1)}. The off-diagonal (in the last two dimensions)
elements represent different features interaction contributions. The array is symmetric WRT the last
two dimensions. The "+ 1" columns corresponds to bias. Summing this array along the last dimension should
produce practically the same result as predict with \code{predcontrib = TRUE}.
For a multiclass case, a list of \code{num_class} elements is returned, where each element is
such an array.
When \code{strict_shape} is set to \code{TRUE}, the output is always an array. For
normal prediction, the output is a 2-dimension array \code{(num_class, nrow(newdata))}.
For \code{predcontrib = TRUE}, output is \code{(ncol(newdata) + 1, num_class, nrow(newdata))}
For \code{predinteraction = TRUE}, output is \code{(ncol(newdata) + 1, ncol(newdata) + 1, num_class, nrow(newdata))}
For \code{predleaf = TRUE}, output is \code{(n_trees_in_forest, num_class, n_iterations, nrow(newdata))}
} }
\description{ \description{
Predicted values based on either xgboost model or model handle object. Predicted values based on either xgboost model or model handle object.
} }
\details{ \details{
Note that \code{iterationrange} would currently do nothing for predictions from gblinear, Note that \code{iterationrange} would currently do nothing for predictions from "gblinear",
since gblinear doesn't keep its boosting history. since "gblinear" doesn't keep its boosting history.
One possible practical applications of the \code{predleaf} option is to use the model One possible practical applications of the \code{predleaf} option is to use the model
as a generator of new features which capture non-linearity and interactions, as a generator of new features which capture non-linearity and interactions,
e.g., as implemented in \code{\link{xgb.create.features}}. e.g., as implemented in \code{\link[=xgb.create.features]{xgb.create.features()}}.
Setting \code{predcontrib = TRUE} allows to calculate contributions of each feature to Setting \code{predcontrib = TRUE} allows to calculate contributions of each feature to
individual predictions. For "gblinear" booster, feature contributions are simply linear terms individual predictions. For "gblinear" booster, feature contributions are simply linear terms
@ -124,14 +121,14 @@ Since it quadratically depends on the number of features, it is recommended to p
of the most important features first. See below about the format of the returned results. of the most important features first. See below about the format of the returned results.
The \code{predict()} method uses as many threads as defined in \code{xgb.Booster} object (all by default). The \code{predict()} method uses as many threads as defined in \code{xgb.Booster} object (all by default).
If you want to change their number, then assign a new number to \code{nthread} using \code{\link{xgb.parameters<-}}. If you want to change their number, assign a new number to \code{nthread} using \code{\link[=xgb.parameters<-]{xgb.parameters<-()}}.
Note also that converting a matrix to \code{\link{xgb.DMatrix}} uses multiple threads too. Note that converting a matrix to \code{\link[=xgb.DMatrix]{xgb.DMatrix()}} uses multiple threads too.
} }
\examples{ \examples{
## binary classification: ## binary classification:
data(agaricus.train, package='xgboost') data(agaricus.train, package = "xgboost")
data(agaricus.test, package='xgboost') data(agaricus.test, package = "xgboost")
## Keep the number of threads to 2 for examples ## Keep the number of threads to 2 for examples
nthread <- 2 nthread <- 2
@ -140,12 +137,19 @@ data.table::setDTthreads(nthread)
train <- agaricus.train train <- agaricus.train
test <- agaricus.test test <- agaricus.test
bst <- xgboost(data = train$data, label = train$label, max_depth = 2, bst <- xgb.train(
eta = 0.5, nthread = nthread, nrounds = 5, objective = "binary:logistic") data = xgb.DMatrix(train$data, label = train$label),
max_depth = 2,
eta = 0.5,
nthread = nthread,
nrounds = 5,
objective = "binary:logistic"
)
# use all trees by default # use all trees by default
pred <- predict(bst, test$data) pred <- predict(bst, test$data)
# use only the 1st tree # use only the 1st tree
pred1 <- predict(bst, test$data, iterationrange = c(1, 2)) pred1 <- predict(bst, test$data, iterationrange = c(1, 1))
# Predicting tree leafs: # Predicting tree leafs:
# the result is an nsamples X ntrees matrix # the result is an nsamples X ntrees matrix
@ -173,39 +177,61 @@ par(mar = old_mar)
lb <- as.numeric(iris$Species) - 1 lb <- as.numeric(iris$Species) - 1
num_class <- 3 num_class <- 3
set.seed(11) set.seed(11)
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
max_depth = 4, eta = 0.5, nthread = 2, nrounds = 10, subsample = 0.5, bst <- xgb.train(
objective = "multi:softprob", num_class = num_class) data = xgb.DMatrix(as.matrix(iris[, -5]), label = lb),
max_depth = 4,
eta = 0.5,
nthread = 2,
nrounds = 10,
subsample = 0.5,
objective = "multi:softprob",
num_class = num_class
)
# predict for softmax returns num_class probability numbers per case: # predict for softmax returns num_class probability numbers per case:
pred <- predict(bst, as.matrix(iris[, -5])) pred <- predict(bst, as.matrix(iris[, -5]))
str(pred) str(pred)
# reshape it to a num_class-columns matrix # reshape it to a num_class-columns matrix
pred <- matrix(pred, ncol=num_class, byrow=TRUE) pred <- matrix(pred, ncol = num_class, byrow = TRUE)
# convert the probabilities to softmax labels # convert the probabilities to softmax labels
pred_labels <- max.col(pred) - 1 pred_labels <- max.col(pred) - 1
# the following should result in the same error as seen in the last iteration # the following should result in the same error as seen in the last iteration
sum(pred_labels != lb)/length(lb) sum(pred_labels != lb) / length(lb)
# compare that to the predictions from softmax: # compare with predictions from softmax:
set.seed(11) set.seed(11)
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
max_depth = 4, eta = 0.5, nthread = 2, nrounds = 10, subsample = 0.5, bst <- xgb.train(
objective = "multi:softmax", num_class = num_class) data = xgb.DMatrix(as.matrix(iris[, -5]), label = lb),
max_depth = 4,
eta = 0.5,
nthread = 2,
nrounds = 10,
subsample = 0.5,
objective = "multi:softmax",
num_class = num_class
)
pred <- predict(bst, as.matrix(iris[, -5])) pred <- predict(bst, as.matrix(iris[, -5]))
str(pred) str(pred)
all.equal(pred, pred_labels) all.equal(pred, pred_labels)
# prediction from using only 5 iterations should result # prediction from using only 5 iterations should result
# in the same error as seen in iteration 5: # in the same error as seen in iteration 5:
pred5 <- predict(bst, as.matrix(iris[, -5]), iterationrange=c(1, 6)) pred5 <- predict(bst, as.matrix(iris[, -5]), iterationrange = c(1, 5))
sum(pred5 != lb)/length(lb) sum(pred5 != lb) / length(lb)
} }
\references{ \references{
Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions", NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874} \enumerate{
\item Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions",
Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles", \url{https://arxiv.org/abs/1706.06060} NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874}
\item Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles",
\url{https://arxiv.org/abs/1706.06060}
}
} }
\seealso{ \seealso{
\code{\link{xgb.train}}. \code{\link[=xgb.train]{xgb.train()}}
} }

View File

@ -4,26 +4,35 @@
\alias{print.xgb.Booster} \alias{print.xgb.Booster}
\title{Print xgb.Booster} \title{Print xgb.Booster}
\usage{ \usage{
\method{print}{xgb.Booster}(x, verbose = FALSE, ...) \method{print}{xgb.Booster}(x, ...)
} }
\arguments{ \arguments{
\item{x}{an xgb.Booster object} \item{x}{An \code{xgb.Booster} object.}
\item{verbose}{whether to print detailed data (e.g., attribute values)} \item{...}{Not used.}
}
\item{...}{not currently used} \value{
The same \code{x} object, returned invisibly
} }
\description{ \description{
Print information about xgb.Booster. Print information about \code{xgb.Booster}.
} }
\examples{ \examples{
data(agaricus.train, package='xgboost') data(agaricus.train, package = "xgboost")
train <- agaricus.train train <- agaricus.train
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") bst <- xgboost(
attr(bst, 'myattr') <- 'memo' data = train$data,
label = train$label,
max_depth = 2,
eta = 1,
nthread = 2,
nrounds = 2,
objective = "binary:logistic"
)
attr(bst, "myattr") <- "memo"
print(bst) print(bst)
print(bst, verbose=TRUE)
} }

View File

@ -1,42 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.DMatrix.R
\name{setinfo}
\alias{setinfo}
\alias{setinfo.xgb.DMatrix}
\title{Set information of an xgb.DMatrix object}
\usage{
setinfo(object, ...)
\method{setinfo}{xgb.DMatrix}(object, name, info, ...)
}
\arguments{
\item{object}{Object of class "xgb.DMatrix"}
\item{...}{other parameters}
\item{name}{the name of the field to get}
\item{info}{the specific field of information to set}
}
\description{
Set information of an xgb.DMatrix object
}
\details{
The \code{name} field can be one of the following:
\itemize{
\item \code{label}: label XGBoost learn from ;
\item \code{weight}: to do a weight rescale ;
\item \code{base_margin}: base margin is the base prediction XGBoost will boost from ;
\item \code{group}: number of rows in each group (to use with \code{rank:pairwise} objective).
}
}
\examples{
data(agaricus.train, package='xgboost')
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
labels <- getinfo(dtrain, 'label')
setinfo(dtrain, 'label', 1-labels)
labels2 <- getinfo(dtrain, 'label')
stopifnot(all.equal(labels2, 1-labels))
}

View File

@ -7,17 +7,15 @@
\title{Get a new DMatrix containing the specified rows of \title{Get a new DMatrix containing the specified rows of
original xgb.DMatrix object} original xgb.DMatrix object}
\usage{ \usage{
slice(object, ...) slice(object, idxset)
\method{slice}{xgb.DMatrix}(object, idxset, ...) \method{slice}{xgb.DMatrix}(object, idxset)
\method{[}{xgb.DMatrix}(object, idxset, colset = NULL) \method{[}{xgb.DMatrix}(object, idxset, colset = NULL)
} }
\arguments{ \arguments{
\item{object}{Object of class "xgb.DMatrix"} \item{object}{Object of class "xgb.DMatrix"}
\item{...}{other parameters (currently not used)}
\item{idxset}{a integer vector of indices of rows needed} \item{idxset}{a integer vector of indices of rows needed}
\item{colset}{currently not used (columns subsetting is not available)} \item{colset}{currently not used (columns subsetting is not available)}

View File

@ -0,0 +1,22 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.Booster.R
\name{variable.names.xgb.Booster}
\alias{variable.names.xgb.Booster}
\title{Get Features Names from Booster}
\usage{
\method{variable.names}{xgb.Booster}(object, ...)
}
\arguments{
\item{object}{An \code{xgb.Booster} object.}
\item{...}{Not used.}
}
\description{
Returns the feature / variable / column names from a fitted
booster object, which are set automatically during the call to \link{xgb.train}
from the DMatrix names, or which can be set manually through \link{setinfo}.
If the object doesn't have feature names, will return \code{NULL}.
It is equivalent to calling \code{getinfo(object, "feature_name")}.
}

View File

@ -1,52 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.Booster.R
\name{xgb.Booster.complete}
\alias{xgb.Booster.complete}
\title{Restore missing parts of an incomplete xgb.Booster object.}
\usage{
xgb.Booster.complete(object, saveraw = TRUE)
}
\arguments{
\item{object}{object of class \code{xgb.Booster}}
\item{saveraw}{a flag indicating whether to append \code{raw} Booster memory dump data
when it doesn't already exist.}
}
\value{
An object of \code{xgb.Booster} class.
}
\description{
It attempts to complete an \code{xgb.Booster} object by restoring either its missing
raw model memory dump (when it has no \code{raw} data but its \code{xgb.Booster.handle} is valid)
or its missing internal handle (when its \code{xgb.Booster.handle} is not valid
but it has a raw Booster memory dump).
}
\details{
While this method is primarily for internal use, it might be useful in some practical situations.
E.g., when an \code{xgb.Booster} model is saved as an R object and then is loaded as an R object,
its handle (pointer) to an internal xgboost model would be invalid. The majority of xgboost methods
should still work for such a model object since those methods would be using
\code{xgb.Booster.complete} internally. However, one might find it to be more efficient to call the
\code{xgb.Booster.complete} function explicitly once after loading a model as an R-object.
That would prevent further repeated implicit reconstruction of an internal booster model.
}
\examples{
data(agaricus.train, package='xgboost')
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
saveRDS(bst, "xgb.model.rds")
# Warning: The resulting RDS file is only compatible with the current XGBoost version.
# Refer to the section titled "a-compatibility-note-for-saveRDS-save".
bst1 <- readRDS("xgb.model.rds")
if (file.exists("xgb.model.rds")) file.remove("xgb.model.rds")
# the handle is invalid:
print(bst1$handle)
bst1 <- xgb.Booster.complete(bst1)
# now the handle points to a valid internal booster model:
print(bst1$handle)
}

View File

@ -6,36 +6,86 @@
\usage{ \usage{
xgb.DMatrix( xgb.DMatrix(
data, data,
info = list(), label = NULL,
weight = NULL,
base_margin = NULL,
missing = NA, missing = NA,
silent = FALSE, silent = FALSE,
feature_names = colnames(data),
nthread = NULL, nthread = NULL,
... group = NULL,
qid = NULL,
label_lower_bound = NULL,
label_upper_bound = NULL,
feature_weights = NULL,
enable_categorical = FALSE
) )
} }
\arguments{ \arguments{
\item{data}{a \code{matrix} object (either numeric or integer), a \code{dgCMatrix} object, \item{data}{a \code{matrix} object (either numeric or integer), a \code{dgCMatrix} object,
a \code{dgRMatrix} object (only when making predictions from a fitted model), a \code{dgRMatrix} object,
a \code{dsparseVector} object (only when making predictions from a fitted model, will be a \code{dsparseVector} object (only when making predictions from a fitted model, will be
interpreted as a row vector), or a character string representing a filename.} interpreted as a row vector), or a character string representing a filename.}
\item{info}{a named list of additional information to store in the \code{xgb.DMatrix} object. \item{label}{Label of the training data.}
See \code{\link{setinfo}} for the specific allowed kinds of}
\item{weight}{Weight for each instance.
Note that, for ranking task, weights are per-group. In ranking task, one weight
is assigned to each group (not each data point). This is because we
only care about the relative ordering of data points within each group,
so it doesn't make sense to assign weights to individual data points.}
\item{base_margin}{Base margin used for boosting from existing model.
\if{html}{\out{<div class="sourceCode">}}\preformatted{ In the case of multi-output models, one can also pass multi-dimensional base_margin.
}\if{html}{\out{</div>}}}
\item{missing}{a float value to represents missing values in data (used only when input is a dense matrix). \item{missing}{a float value to represents missing values in data (used only when input is a dense matrix).
It is useful when a 0 or some other extreme value represents missing values in data.} It is useful when a 0 or some other extreme value represents missing values in data.}
\item{silent}{whether to suppress printing an informational message after loading from a file.} \item{silent}{whether to suppress printing an informational message after loading from a file.}
\item{feature_names}{Set names for features. Overrides column names in data
frame and matrix.}
\item{nthread}{Number of threads used for creating DMatrix.} \item{nthread}{Number of threads used for creating DMatrix.}
\item{...}{the \code{info} data could be passed directly as parameters, without creating an \code{info} list.} \item{group}{Group size for all ranking group.}
\item{qid}{Query ID for data samples, used for ranking.}
\item{label_lower_bound}{Lower bound for survival training.}
\item{label_upper_bound}{Upper bound for survival training.}
\item{feature_weights}{Set feature weights for column sampling.}
\item{enable_categorical}{Experimental support of specializing for categorical features.
\if{html}{\out{<div class="sourceCode">}}\preformatted{ If passing 'TRUE' and 'data' is a data frame,
columns of categorical types will automatically
be set to be of categorical type (feature_type='c') in the resulting DMatrix.
If passing 'FALSE' and 'data' is a data frame with categorical columns,
it will result in an error being thrown.
If 'data' is not a data frame, this argument is ignored.
JSON/UBJSON serialization format is required for this.
}\if{html}{\out{</div>}}}
} }
\description{ \description{
Construct xgb.DMatrix object from either a dense matrix, a sparse matrix, or a local file. Construct xgb.DMatrix object from either a dense matrix, a sparse matrix, or a local file.
Supported input file formats are either a LIBSVM text file or a binary file that was created previously by Supported input file formats are either a LIBSVM text file or a binary file that was created previously by
\code{\link{xgb.DMatrix.save}}). \code{\link{xgb.DMatrix.save}}).
} }
\details{
Note that DMatrix objects are not serializable through R functions such as \code{saveRDS} or \code{save}.
If a DMatrix gets serialized and then de-serialized (for example, when saving data in an R session or caching
chunks in an Rmd file), the resulting object will not be usable anymore and will need to be reconstructed
from the original source of data.
}
\examples{ \examples{
data(agaricus.train, package='xgboost') data(agaricus.train, package='xgboost')
## Keep the number of threads to 1 for examples ## Keep the number of threads to 1 for examples
@ -44,7 +94,7 @@ data.table::setDTthreads(nthread)
dtrain <- with( dtrain <- with(
agaricus.train, xgb.DMatrix(data, label = label, nthread = nthread) agaricus.train, xgb.DMatrix(data, label = label, nthread = nthread)
) )
xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data') fname <- file.path(tempdir(), "xgb.DMatrix.data")
dtrain <- xgb.DMatrix('xgb.DMatrix.data') xgb.DMatrix.save(dtrain, fname)
if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data') dtrain <- xgb.DMatrix(fname)
} }

View File

@ -0,0 +1,32 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.DMatrix.R
\name{xgb.DMatrix.hasinfo}
\alias{xgb.DMatrix.hasinfo}
\title{Check whether DMatrix object has a field}
\usage{
xgb.DMatrix.hasinfo(object, info)
}
\arguments{
\item{object}{The DMatrix object to check for the given \code{info} field.}
\item{info}{The field to check for presence or absence in \code{object}.}
}
\description{
Checks whether an xgb.DMatrix object has a given field assigned to
it, such as weights, labels, etc.
}
\examples{
library(xgboost)
x <- matrix(1:10, nrow = 5)
dm <- xgb.DMatrix(x, nthread = 1)
# 'dm' so far doesn't have any fields set
xgb.DMatrix.hasinfo(dm, "label")
# Fields can be added after construction
setinfo(dm, "label", 1:5)
xgb.DMatrix.hasinfo(dm, "label")
}
\seealso{
\link{xgb.DMatrix}, \link{getinfo.xgb.DMatrix}, \link{setinfo.xgb.DMatrix}
}

View File

@ -17,7 +17,7 @@ Save xgb.DMatrix object to binary file
\examples{ \examples{
data(agaricus.train, package='xgboost') data(agaricus.train, package='xgboost')
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2)) dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data') fname <- file.path(tempdir(), "xgb.DMatrix.data")
dtrain <- xgb.DMatrix('xgb.DMatrix.data') xgb.DMatrix.save(dtrain, fname)
if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data') dtrain <- xgb.DMatrix(fname)
} }

View File

@ -5,7 +5,7 @@
\alias{xgb.attr<-} \alias{xgb.attr<-}
\alias{xgb.attributes} \alias{xgb.attributes}
\alias{xgb.attributes<-} \alias{xgb.attributes<-}
\title{Accessors for serializable attributes of a model.} \title{Accessors for serializable attributes of a model}
\usage{ \usage{
xgb.attr(object, name) xgb.attr(object, name)
@ -16,64 +16,71 @@ xgb.attributes(object)
xgb.attributes(object) <- value xgb.attributes(object) <- value
} }
\arguments{ \arguments{
\item{object}{Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}.} \item{object}{Object of class \code{xgb.Booster}. \bold{Will be modified in-place} when assigning to it.}
\item{name}{a non-empty character string specifying which attribute is to be accessed.} \item{name}{A non-empty character string specifying which attribute is to be accessed.}
\item{value}{a value of an attribute for \code{xgb.attr<-}; for \code{xgb.attributes<-} \item{value}{For \verb{xgb.attr<-}, a value of an attribute; for \verb{xgb.attributes<-},
it's a list (or an object coercible to a list) with the names of attributes to set it is a list (or an object coercible to a list) with the names of attributes to set
and the elements corresponding to attribute values. and the elements corresponding to attribute values.
Non-character values are converted to character. Non-character values are converted to character.
When attribute value is not a scalar, only the first index is used. When an attribute value is not a scalar, only the first index is used.
Use \code{NULL} to remove an attribute.} Use \code{NULL} to remove an attribute.}
} }
\value{ \value{
\code{xgb.attr} returns either a string value of an attribute \itemize{
\item \code{xgb.attr()} returns either a string value of an attribute
or \code{NULL} if an attribute wasn't stored in a model. or \code{NULL} if an attribute wasn't stored in a model.
\item \code{xgb.attributes()} returns a list of all attributes stored in a model
\code{xgb.attributes} returns a list of all attribute stored in a model
or \code{NULL} if a model has no stored attributes. or \code{NULL} if a model has no stored attributes.
} }
}
\description{ \description{
These methods allow to manipulate the key-value attribute strings of an xgboost model. These methods allow to manipulate the key-value attribute strings of an xgboost model.
} }
\details{ \details{
The primary purpose of xgboost model attributes is to store some meta-data about the model. The primary purpose of xgboost model attributes is to store some meta data about the model.
Note that they are a separate concept from the object attributes in R. Note that they are a separate concept from the object attributes in R.
Specifically, they refer to key-value strings that can be attached to an xgboost model, Specifically, they refer to key-value strings that can be attached to an xgboost model,
stored together with the model's binary representation, and accessed later stored together with the model's binary representation, and accessed later
(from R or any other interface). (from R or any other interface).
In contrast, any R-attribute assigned to an R-object of \code{xgb.Booster} class In contrast, any R attribute assigned to an R object of \code{xgb.Booster} class
would not be saved by \code{xgb.save} because an xgboost model is an external memory object would not be saved by \code{\link[=xgb.save]{xgb.save()}} because an xgboost model is an external memory object
and its serialization is handled externally. and its serialization is handled externally.
Also, setting an attribute that has the same name as one of xgboost's parameters wouldn't Also, setting an attribute that has the same name as one of xgboost's parameters wouldn't
change the value of that parameter for a model. change the value of that parameter for a model.
Use \code{\link{xgb.parameters<-}} to set or change model parameters. Use \code{\link[=xgb.parameters<-]{xgb.parameters<-()}} to set or change model parameters.
The attribute setters would usually work more efficiently for \code{xgb.Booster.handle} The \verb{xgb.attributes<-} setter either updates the existing or adds one or several attributes,
than for \code{xgb.Booster}, since only just a handle (pointer) would need to be copied.
That would only matter if attributes need to be set many times.
Note, however, that when feeding a handle of an \code{xgb.Booster} object to the attribute setters,
the raw model cache of an \code{xgb.Booster} object would not be automatically updated,
and it would be user's responsibility to call \code{xgb.serialize} to update it.
The \code{xgb.attributes<-} setter either updates the existing or adds one or several attributes,
but it doesn't delete the other existing attributes. but it doesn't delete the other existing attributes.
Important: since this modifies the booster's C object, semantics for assignment here
will differ from R's, as any object reference to the same booster will be modified
too, while assignment of R attributes through \verb{attributes(model)$<attr> <- <value>}
will follow the usual copy-on-write R semantics (see \link{xgb.copy.Booster} for an
example of these behaviors).
} }
\examples{ \examples{
data(agaricus.train, package='xgboost') data(agaricus.train, package = "xgboost")
train <- agaricus.train train <- agaricus.train
bst <- xgboost(data = train$data, label = train$label, max_depth = 2, bst <- xgboost(
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") data = train$data,
label = train$label,
max_depth = 2,
eta = 1,
nthread = 2,
nrounds = 2,
objective = "binary:logistic"
)
xgb.attr(bst, "my_attribute") <- "my attribute value" xgb.attr(bst, "my_attribute") <- "my attribute value"
print(xgb.attr(bst, "my_attribute")) print(xgb.attr(bst, "my_attribute"))
xgb.attributes(bst) <- list(a = 123, b = "abc") xgb.attributes(bst) <- list(a = 123, b = "abc")
xgb.save(bst, 'xgb.model') fname <- file.path(tempdir(), "xgb.ubj")
bst1 <- xgb.load('xgb.model') xgb.save(bst, fname)
if (file.exists('xgb.model')) file.remove('xgb.model') bst1 <- xgb.load(fname)
print(xgb.attr(bst1, "my_attribute")) print(xgb.attr(bst1, "my_attribute"))
print(xgb.attributes(bst1)) print(xgb.attributes(bst1))

View File

@ -3,31 +3,48 @@
\name{xgb.config} \name{xgb.config}
\alias{xgb.config} \alias{xgb.config}
\alias{xgb.config<-} \alias{xgb.config<-}
\title{Accessors for model parameters as JSON string.} \title{Accessors for model parameters as JSON string}
\usage{ \usage{
xgb.config(object) xgb.config(object)
xgb.config(object) <- value xgb.config(object) <- value
} }
\arguments{ \arguments{
\item{object}{Object of class \code{xgb.Booster}} \item{object}{Object of class \code{xgb.Booster}. \bold{Will be modified in-place} when assigning to it.}
\item{value}{A JSON string.} \item{value}{An R list.}
}
\value{
\code{xgb.config} will return the parameters as an R list.
} }
\description{ \description{
Accessors for model parameters as JSON string. Accessors for model parameters as JSON string
}
\details{
Note that assignment is performed in-place on the booster C object, which unlike assignment
of R attributes, doesn't follow typical copy-on-write semantics for assignment - i.e. all references
to the same booster will also get updated.
See \link{xgb.copy.Booster} for an example of this behavior.
} }
\examples{ \examples{
data(agaricus.train, package='xgboost') data(agaricus.train, package = "xgboost")
## Keep the number of threads to 1 for examples ## Keep the number of threads to 1 for examples
nthread <- 1 nthread <- 1
data.table::setDTthreads(nthread) data.table::setDTthreads(nthread)
train <- agaricus.train train <- agaricus.train
bst <- xgboost( bst <- xgboost(
data = train$data, label = train$label, max_depth = 2, data = train$data,
eta = 1, nthread = nthread, nrounds = 2, objective = "binary:logistic" label = train$label,
max_depth = 2,
eta = 1,
nthread = nthread,
nrounds = 2,
objective = "binary:logistic"
) )
config <- xgb.config(bst) config <- xgb.config(bst)
} }

View File

@ -0,0 +1,53 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.Booster.R
\name{xgb.copy.Booster}
\alias{xgb.copy.Booster}
\title{Deep-copies a Booster Object}
\usage{
xgb.copy.Booster(model)
}
\arguments{
\item{model}{An 'xgb.Booster' object.}
}
\value{
A deep copy of \code{model} - it will be identical in every way, but C-level
functions called on that copy will not affect the \code{model} variable.
}
\description{
Creates a deep copy of an 'xgb.Booster' object, such that the
C object pointer contained will be a different object, and hence functions
like \link{xgb.attr} will not affect the object from which it was copied.
}
\examples{
library(xgboost)
data(mtcars)
y <- mtcars$mpg
x <- mtcars[, -1]
dm <- xgb.DMatrix(x, label = y, nthread = 1)
model <- xgb.train(
data = dm,
params = list(nthread = 1),
nround = 3
)
# Set an arbitrary attribute kept at the C level
xgb.attr(model, "my_attr") <- 100
print(xgb.attr(model, "my_attr"))
# Just assigning to a new variable will not create
# a deep copy - C object pointer is shared, and in-place
# modifications will affect both objects
model_shallow_copy <- model
xgb.attr(model_shallow_copy, "my_attr") <- 333
# 'model' was also affected by this change:
print(xgb.attr(model, "my_attr"))
model_deep_copy <- xgb.copy.Booster(model)
xgb.attr(model_deep_copy, "my_attr") <- 444
# 'model' was NOT affected by this change
# (keeps previous value that was assigned before)
print(xgb.attr(model, "my_attr"))
# Verify that the new object was actually modified
print(xgb.attr(model_deep_copy, "my_attr"))
}

View File

@ -48,7 +48,7 @@ be the binary vector \code{[0, 1, 0, 1, 0]}, where the first 3 entries
correspond to the leaves of the first subtree and last 2 to correspond to the leaves of the first subtree and last 2 to
those of the second subtree. those of the second subtree.
[...] \link{...}
We can understand boosted decision tree We can understand boosted decision tree
based transformation as a supervised feature encoding that based transformation as a supervised feature encoding that
@ -62,7 +62,7 @@ data(agaricus.test, package='xgboost')
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2)) dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
dtest <- with(agaricus.test, xgb.DMatrix(data, label = label, nthread = 2)) dtest <- with(agaricus.test, xgb.DMatrix(data, label = label, nthread = 2))
param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic') param <- list(max_depth=2, eta=1, objective='binary:logistic')
nrounds = 4 nrounds = 4
bst = xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2) bst = xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)

View File

@ -29,22 +29,22 @@ xgb.cv(
} }
\arguments{ \arguments{
\item{params}{the list of parameters. The complete list of parameters is \item{params}{the list of parameters. The complete list of parameters is
available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below
is a shorter summary: is a shorter summary:
\itemize{ \itemize{
\item \code{objective} objective function, common ones are \item \code{objective} objective function, common ones are
\itemize{ \itemize{
\item \code{reg:squarederror} Regression with squared loss. \item \code{reg:squarederror} Regression with squared loss.
\item \code{binary:logistic} logistic regression for classification. \item \code{binary:logistic} logistic regression for classification.
\item See \code{\link[=xgb.train]{xgb.train}()} for complete list of objectives. \item See \code{\link[=xgb.train]{xgb.train}()} for complete list of objectives.
} }
\item \code{eta} step size of each boosting step \item \code{eta} step size of each boosting step
\item \code{max_depth} maximum depth of the tree \item \code{max_depth} maximum depth of the tree
\item \code{nthread} number of thread used in training, if not set, all threads are used \item \code{nthread} number of thread used in training, if not set, all threads are used
} }
See \code{\link{xgb.train}} for further details. See \code{\link{xgb.train}} for further details.
See also demo/ for walkthrough example in R.} See also demo/ for walkthrough example in R.}
\item{data}{takes an \code{xgb.DMatrix}, \code{matrix}, or \code{dgCMatrix} as the input.} \item{data}{takes an \code{xgb.DMatrix}, \code{matrix}, or \code{dgCMatrix} as the input.}
@ -64,17 +64,17 @@ from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callb
\item{showsd}{\code{boolean}, whether to show standard deviation of cross validation} \item{showsd}{\code{boolean}, whether to show standard deviation of cross validation}
\item{metrics, }{list of evaluation metrics to be used in cross validation, \item{metrics, }{list of evaluation metrics to be used in cross validation,
when it is not specified, the evaluation metric is chosen according to objective function. when it is not specified, the evaluation metric is chosen according to objective function.
Possible options are: Possible options are:
\itemize{ \itemize{
\item \code{error} binary classification error rate \item \code{error} binary classification error rate
\item \code{rmse} Rooted mean square error \item \code{rmse} Rooted mean square error
\item \code{logloss} negative log-likelihood function \item \code{logloss} negative log-likelihood function
\item \code{mae} Mean absolute error \item \code{mae} Mean absolute error
\item \code{mape} Mean absolute percentage error \item \code{mape} Mean absolute percentage error
\item \code{auc} Area under curve \item \code{auc} Area under curve
\item \code{aucpr} Area under PR curve \item \code{aucpr} Area under PR curve
\item \code{merror} Exact matching error, used to evaluate multi-class classification \item \code{merror} Exact matching error, used to evaluate multi-class classification
}} }}
\item{obj}{customized objective function. Returns gradient and second order \item{obj}{customized objective function. Returns gradient and second order
@ -120,26 +120,25 @@ to customize the training process.}
\value{ \value{
An object of class \code{xgb.cv.synchronous} with the following elements: An object of class \code{xgb.cv.synchronous} with the following elements:
\itemize{ \itemize{
\item \code{call} a function call. \item \code{call} a function call.
\item \code{params} parameters that were passed to the xgboost library. Note that it does not \item \code{params} parameters that were passed to the xgboost library. Note that it does not
capture parameters changed by the \code{\link{cb.reset.parameters}} callback. capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
\item \code{callbacks} callback functions that were either automatically assigned or \item \code{callbacks} callback functions that were either automatically assigned or
explicitly passed. explicitly passed.
\item \code{evaluation_log} evaluation history stored as a \code{data.table} with the \item \code{evaluation_log} evaluation history stored as a \code{data.table} with the
first column corresponding to iteration number and the rest corresponding to the first column corresponding to iteration number and the rest corresponding to the
CV-based evaluation means and standard deviations for the training and test CV-sets. CV-based evaluation means and standard deviations for the training and test CV-sets.
It is created by the \code{\link{cb.evaluation.log}} callback. It is created by the \code{\link{cb.evaluation.log}} callback.
\item \code{niter} number of boosting iterations. \item \code{niter} number of boosting iterations.
\item \code{nfeatures} number of features in training data. \item \code{nfeatures} number of features in training data.
\item \code{folds} the list of CV folds' indices - either those passed through the \code{folds} \item \code{folds} the list of CV folds' indices - either those passed through the \code{folds}
parameter or randomly generated. parameter or randomly generated.
\item \code{best_iteration} iteration number with the best evaluation metric value \item \code{best_iteration} iteration number with the best evaluation metric value
(only available with early stopping). (only available with early stopping).
\item \code{best_ntreelimit} and the \code{ntreelimit} Deprecated attributes, use \code{best_iteration} instead. \item \code{pred} CV prediction values available when \code{prediction} is set.
\item \code{pred} CV prediction values available when \code{prediction} is set. It is either vector or matrix (see \code{\link{cb.cv.predict}}).
It is either vector or matrix (see \code{\link{cb.cv.predict}}). \item \code{models} a list of the CV folds' models. It is only available with the explicit
\item \code{models} a list of the CV folds' models. It is only available with the explicit setting of the \code{cb.cv.predict(save_models = TRUE)} callback.
setting of the \code{cb.cv.predict(save_models = TRUE)} callback.
} }
} }
\description{ \description{

View File

@ -9,7 +9,7 @@ xgb.dump(
fname = NULL, fname = NULL,
fmap = "", fmap = "",
with_stats = FALSE, with_stats = FALSE,
dump_format = c("text", "json"), dump_format = c("text", "json", "dot"),
... ...
) )
} }
@ -29,7 +29,10 @@ When this option is on, the model dump contains two additional values:
gain is the approximate loss function gain we get in each split; gain is the approximate loss function gain we get in each split;
cover is the sum of second order gradient in each node.} cover is the sum of second order gradient in each node.}
\item{dump_format}{either 'text' or 'json' format could be specified.} \item{dump_format}{either 'text', 'json', or 'dot' (graphviz) format could be specified.
Format 'dot' for a single tree can be passed directly to packages that consume this format
for graph visualization, such as function \code{\link[DiagrammeR:grViz]{DiagrammeR::grViz()}}}
\item{...}{currently not used} \item{...}{currently not used}
} }
@ -57,4 +60,8 @@ print(xgb.dump(bst, with_stats = TRUE))
# print in JSON format: # print in JSON format:
cat(xgb.dump(bst, with_stats = TRUE, dump_format='json')) cat(xgb.dump(bst, with_stats = TRUE, dump_format='json'))
# plot first tree leveraging the 'dot' format
if (requireNamespace('DiagrammeR', quietly = TRUE)) {
DiagrammeR::grViz(xgb.dump(bst, dump_format = "dot")[[1L]])
}
} }

View File

@ -8,7 +8,8 @@ xgb.gblinear.history(model, class_index = NULL)
} }
\arguments{ \arguments{
\item{model}{either an \code{xgb.Booster} or a result of \code{xgb.cv()}, trained \item{model}{either an \code{xgb.Booster} or a result of \code{xgb.cv()}, trained
using the \code{cb.gblinear.history()} callback.} using the \code{cb.gblinear.history()} callback, but \bold{not} a booster
loaded from \link{xgb.load} or \link{xgb.load.raw}.}
\item{class_index}{zero-based class index to extract the coefficients for only that \item{class_index}{zero-based class index to extract the coefficients for only that
specific class in a multinomial multiclass model. When it is NULL, all the specific class in a multinomial multiclass model. When it is NULL, all the
@ -27,3 +28,11 @@ A helper function to extract the matrix of linear coefficients' history
from a gblinear model created while using the \code{cb.gblinear.history()} from a gblinear model created while using the \code{cb.gblinear.history()}
callback. callback.
} }
\details{
Note that this is an R-specific function that relies on R attributes that
are not saved when using xgboost's own serialization functions like \link{xgb.load}
or \link{xgb.load.raw}.
In order for a serialized model to be accepted by tgis function, one must use R
serializers such as \link{saveRDS}.
}

View File

@ -0,0 +1,19 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.DMatrix.R
\name{xgb.get.DMatrix.data}
\alias{xgb.get.DMatrix.data}
\title{Get DMatrix Data}
\usage{
xgb.get.DMatrix.data(dmat)
}
\arguments{
\item{dmat}{An \code{xgb.DMatrix} object, as returned by \link{xgb.DMatrix}.}
}
\value{
The data held in the DMatrix, as a sparse CSR matrix (class \code{dgRMatrix}
from package \code{Matrix}). If it had feature names, these will be added as column names
in the output.
}
\description{
Get DMatrix Data
}

View File

@ -0,0 +1,17 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.DMatrix.R
\name{xgb.get.DMatrix.num.non.missing}
\alias{xgb.get.DMatrix.num.non.missing}
\title{Get Number of Non-Missing Entries in DMatrix}
\usage{
xgb.get.DMatrix.num.non.missing(dmat)
}
\arguments{
\item{dmat}{An \code{xgb.DMatrix} object, as returned by \link{xgb.DMatrix}.}
}
\value{
The number of non-missing entries in the DMatrix
}
\description{
Get Number of Non-Missing Entries in DMatrix
}

View File

@ -0,0 +1,58 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.DMatrix.R
\name{xgb.get.DMatrix.qcut}
\alias{xgb.get.DMatrix.qcut}
\title{Get Quantile Cuts from DMatrix}
\usage{
xgb.get.DMatrix.qcut(dmat, output = c("list", "arrays"))
}
\arguments{
\item{dmat}{An \code{xgb.DMatrix} object, as returned by \link{xgb.DMatrix}.}
\item{output}{Output format for the quantile cuts. Possible options are:\itemize{
\item \code{"list"} will return the output as a list with one entry per column, where
each column will have a numeric vector with the cuts. The list will be named if
\code{dmat} has column names assigned to it.
\item \code{"arrays"} will return a list with entries \code{indptr} (base-0 indexing) and
\code{data}. Here, the cuts for column 'i' are obtained by slicing 'data' from entries
\code{indptr[i]+1} to \code{indptr[i+1]}.
}}
}
\value{
The quantile cuts, in the format specified by parameter \code{output}.
}
\description{
Get the quantile cuts (a.k.a. borders) from an \code{xgb.DMatrix}
that has been quantized for the histogram method (\code{tree_method="hist"}).
These cuts are used in order to assign observations to bins - i.e. these are ordered
boundaries which are used to determine assignment condition \verb{border_low < x < border_high}.
As such, the first and last bin will be outside of the range of the data, so as to include
all of the observations there.
If a given column has 'n' bins, then there will be 'n+1' cuts / borders for that column,
which will be output in sorted order from lowest to highest.
Different columns can have different numbers of bins according to their range.
}
\examples{
library(xgboost)
data(mtcars)
y <- mtcars$mpg
x <- as.matrix(mtcars[, -1])
dm <- xgb.DMatrix(x, label = y, nthread = 1)
# DMatrix is not quantized right away, but will be once a hist model is generated
model <- xgb.train(
data = dm,
params = list(
tree_method = "hist",
max_bin = 8,
nthread = 1
),
nrounds = 3
)
# Now can get the quantile cuts
xgb.get.DMatrix.qcut(dm)
}

View File

@ -0,0 +1,25 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.Booster.R
\name{xgb.get.num.boosted.rounds}
\alias{xgb.get.num.boosted.rounds}
\alias{length.xgb.Booster}
\title{Get number of boosting in a fitted booster}
\usage{
xgb.get.num.boosted.rounds(model)
\method{length}{xgb.Booster}(x)
}
\arguments{
\item{model, x}{A fitted \code{xgb.Booster} model.}
}
\value{
The number of rounds saved in the model, as an integer.
}
\description{
Get number of boosting in a fitted booster
}
\details{
Note that setting booster parameters related to training
continuation / updates through \link{xgb.parameters<-} will reset the
number of rounds to zero.
}

View File

@ -2,11 +2,11 @@
% Please edit documentation in R/xgb.importance.R % Please edit documentation in R/xgb.importance.R
\name{xgb.importance} \name{xgb.importance}
\alias{xgb.importance} \alias{xgb.importance}
\title{Importance of features in a model.} \title{Feature importance}
\usage{ \usage{
xgb.importance( xgb.importance(
feature_names = NULL,
model = NULL, model = NULL,
feature_names = getinfo(model, "feature_name"),
trees = NULL, trees = NULL,
data = NULL, data = NULL,
label = NULL, label = NULL,
@ -14,88 +14,126 @@ xgb.importance(
) )
} }
\arguments{ \arguments{
\item{feature_names}{character vector of feature names. If the model already \item{model}{Object of class \code{xgb.Booster}.}
contains feature names, those would be used when \code{feature_names=NULL} (default value).
Non-null \code{feature_names} could be provided to override those in the model.}
\item{model}{object of class \code{xgb.Booster}.} \item{feature_names}{Character vector used to overwrite the feature names
of the model. The default is \code{NULL} (use original feature names).}
\item{trees}{(only for the gbtree booster) an integer vector of tree indices that should be included \item{trees}{An integer vector of tree indices that should be included
into the importance calculation. If set to \code{NULL}, all trees of the model are parsed. into the importance calculation (only for the "gbtree" booster).
The default (\code{NULL}) parses all trees.
It could be useful, e.g., in multiclass classification to get feature importances It could be useful, e.g., in multiclass classification to get feature importances
for each class separately. IMPORTANT: the tree index in xgboost models for each class separately. \emph{Important}: the tree index in XGBoost models
is zero-based (e.g., use \code{trees = 0:4} for first 5 trees).} is zero-based (e.g., use \code{trees = 0:4} for the first five trees).}
\item{data}{deprecated.} \item{data}{Deprecated.}
\item{label}{deprecated.} \item{label}{Deprecated.}
\item{target}{deprecated.} \item{target}{Deprecated.}
} }
\value{ \value{
For a tree model, a \code{data.table} with the following columns: A \code{data.table} with the following columns:
For a tree model:
\itemize{ \itemize{
\item \code{Features} names of the features used in the model; \item \code{Features}: Names of the features used in the model.
\item \code{Gain} represents fractional contribution of each feature to the model based on \item \code{Gain}: Fractional contribution of each feature to the model based on
the total gain of this feature's splits. Higher percentage means a more important the total gain of this feature's splits. Higher percentage means higher importance.
predictive feature. \item \code{Cover}: Metric of the number of observation related to this feature.
\item \code{Cover} metric of the number of observation related to this feature; \item \code{Frequency}: Percentage of times a feature has been used in trees.
\item \code{Frequency} percentage representing the relative number of times
a feature have been used in trees.
} }
A linear model's importance \code{data.table} has the following columns: For a linear model:
\itemize{ \itemize{
\item \code{Features} names of the features used in the model; \item \code{Features}: Names of the features used in the model.
\item \code{Weight} the linear coefficient of this feature; \item \code{Weight}: Linear coefficient of this feature.
\item \code{Class} (only for multiclass models) class label. \item \code{Class}: Class label (only for multiclass models).
} }
If \code{feature_names} is not provided and \code{model} doesn't have \code{feature_names}, If \code{feature_names} is not provided and \code{model} doesn't have \code{feature_names},
index of the features will be used instead. Because the index is extracted from the model dump the index of the features will be used instead. Because the index is extracted from the model dump
(based on C++ code), it starts at 0 (as in C/C++ or Python) instead of 1 (usual in R). (based on C++ code), it starts at 0 (as in C/C++ or Python) instead of 1 (usual in R).
} }
\description{ \description{
Creates a \code{data.table} of feature importances in a model. Creates a \code{data.table} of feature importances.
} }
\details{ \details{
This function works for both linear and tree models. This function works for both linear and tree models.
For linear models, the importance is the absolute magnitude of linear coefficients. For linear models, the importance is the absolute magnitude of linear coefficients.
For that reason, in order to obtain a meaningful ranking by importance for a linear model, To obtain a meaningful ranking by importance for linear models, the features need to
the features need to be on the same scale (which you also would want to do when using either be on the same scale (which is also recommended when using L1 or L2 regularization).
L1 or L2 regularization).
} }
\examples{ \examples{
# binomial classification using gbtree: # binomial classification using "gbtree":
data(agaricus.train, package='xgboost') data(agaricus.train, package = "xgboost")
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") bst <- xgboost(
data = agaricus.train$data,
label = agaricus.train$label,
max_depth = 2,
eta = 1,
nthread = 2,
nrounds = 2,
objective = "binary:logistic"
)
xgb.importance(model = bst) xgb.importance(model = bst)
# binomial classification using gblinear: # binomial classification using "gblinear":
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, booster = "gblinear", bst <- xgboost(
eta = 0.3, nthread = 1, nrounds = 20, objective = "binary:logistic") data = agaricus.train$data,
label = agaricus.train$label,
booster = "gblinear",
eta = 0.3,
nthread = 1,
nrounds = 20,objective = "binary:logistic"
)
xgb.importance(model = bst) xgb.importance(model = bst)
# multiclass classification using gbtree: # multiclass classification using "gbtree":
nclass <- 3 nclass <- 3
nrounds <- 10 nrounds <- 10
mbst <- xgboost(data = as.matrix(iris[, -5]), label = as.numeric(iris$Species) - 1, mbst <- xgboost(
max_depth = 3, eta = 0.2, nthread = 2, nrounds = nrounds, data = as.matrix(iris[, -5]),
objective = "multi:softprob", num_class = nclass) label = as.numeric(iris$Species) - 1,
max_depth = 3,
eta = 0.2,
nthread = 2,
nrounds = nrounds,
objective = "multi:softprob",
num_class = nclass
)
# all classes clumped together: # all classes clumped together:
xgb.importance(model = mbst) xgb.importance(model = mbst)
# inspect importances separately for each class:
xgb.importance(model = mbst, trees = seq(from=0, by=nclass, length.out=nrounds))
xgb.importance(model = mbst, trees = seq(from=1, by=nclass, length.out=nrounds))
xgb.importance(model = mbst, trees = seq(from=2, by=nclass, length.out=nrounds))
# multiclass classification using gblinear: # inspect importances separately for each class:
mbst <- xgboost(data = scale(as.matrix(iris[, -5])), label = as.numeric(iris$Species) - 1, xgb.importance(
booster = "gblinear", eta = 0.2, nthread = 1, nrounds = 15, model = mbst, trees = seq(from = 0, by = nclass, length.out = nrounds)
objective = "multi:softprob", num_class = nclass) )
xgb.importance(
model = mbst, trees = seq(from = 1, by = nclass, length.out = nrounds)
)
xgb.importance(
model = mbst, trees = seq(from = 2, by = nclass, length.out = nrounds)
)
# multiclass classification using "gblinear":
mbst <- xgboost(
data = scale(as.matrix(iris[, -5])),
label = as.numeric(iris$Species) - 1,
booster = "gblinear",
eta = 0.2,
nthread = 1,
nrounds = 15,
objective = "multi:softprob",
num_class = nclass
)
xgb.importance(model = mbst) xgb.importance(model = mbst)
} }

View File

@ -0,0 +1,59 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.Booster.R
\name{xgb.is.same.Booster}
\alias{xgb.is.same.Booster}
\title{Check if two boosters share the same C object}
\usage{
xgb.is.same.Booster(obj1, obj2)
}
\arguments{
\item{obj1}{Booster model to compare with \code{obj2}.}
\item{obj2}{Booster model to compare with \code{obj1}.}
}
\value{
Either \code{TRUE} or \code{FALSE} according to whether the two boosters share
the underlying C object.
}
\description{
Checks whether two booster objects refer to the same underlying C object.
}
\details{
As booster objects (as returned by e.g. \link{xgb.train}) contain an R 'externalptr'
object, they don't follow typical copy-on-write semantics of other R objects - that is, if
one assigns a booster to a different variable and modifies that new variable through in-place
methods like \link{xgb.attr<-}, the modification will be applied to both the old and the new
variable, unlike typical R assignments which would only modify the latter.
This function allows checking whether two booster objects share the same 'externalptr',
regardless of the R attributes that they might have.
In order to duplicate a booster in such a way that the copy wouldn't share the same
'externalptr', one can use function \link{xgb.copy.Booster}.
}
\examples{
library(xgboost)
data(mtcars)
y <- mtcars$mpg
x <- as.matrix(mtcars[, -1])
model <- xgb.train(
params = list(nthread = 1),
data = xgb.DMatrix(x, label = y, nthread = 1),
nround = 3
)
model_shallow_copy <- model
xgb.is.same.Booster(model, model_shallow_copy) # same C object
model_deep_copy <- xgb.copy.Booster(model)
xgb.is.same.Booster(model, model_deep_copy) # different C objects
# In-place assignments modify all references,
# but not full/deep copies of the booster
xgb.attr(model_shallow_copy, "my_attr") <- 111
xgb.attr(model, "my_attr") # gets modified
xgb.attr(model_deep_copy, "my_attr") # doesn't get modified
}
\seealso{
\link{xgb.copy.Booster}
}

View File

@ -34,17 +34,19 @@ data.table::setDTthreads(nthread)
train <- agaricus.train train <- agaricus.train
test <- agaricus.test test <- agaricus.test
bst <- xgboost( bst <- xgb.train(
data = train$data, label = train$label, max_depth = 2, eta = 1, data = xgb.DMatrix(train$data, label = train$label),
max_depth = 2,
eta = 1,
nthread = nthread, nthread = nthread,
nrounds = 2, nrounds = 2,
objective = "binary:logistic" objective = "binary:logistic"
) )
xgb.save(bst, 'xgb.model') fname <- file.path(tempdir(), "xgb.ubj")
bst <- xgb.load('xgb.model') xgb.save(bst, fname)
if (file.exists('xgb.model')) file.remove('xgb.model') bst <- xgb.load(fname)
} }
\seealso{ \seealso{
\code{\link{xgb.save}}, \code{\link{xgb.Booster.complete}}. \code{\link{xgb.save}}
} }

View File

@ -4,12 +4,10 @@
\alias{xgb.load.raw} \alias{xgb.load.raw}
\title{Load serialised xgboost model from R's raw vector} \title{Load serialised xgboost model from R's raw vector}
\usage{ \usage{
xgb.load.raw(buffer, as_booster = FALSE) xgb.load.raw(buffer)
} }
\arguments{ \arguments{
\item{buffer}{the buffer returned by xgb.save.raw} \item{buffer}{the buffer returned by xgb.save.raw}
\item{as_booster}{Return the loaded model as xgb.Booster instead of xgb.Booster.handle.}
} }
\description{ \description{
User can generate raw memory buffer by calling xgb.save.raw User can generate raw memory buffer by calling xgb.save.raw

View File

@ -2,10 +2,9 @@
% Please edit documentation in R/xgb.model.dt.tree.R % Please edit documentation in R/xgb.model.dt.tree.R
\name{xgb.model.dt.tree} \name{xgb.model.dt.tree}
\alias{xgb.model.dt.tree} \alias{xgb.model.dt.tree}
\title{Parse a boosted tree model text dump} \title{Parse model text dump}
\usage{ \usage{
xgb.model.dt.tree( xgb.model.dt.tree(
feature_names = NULL,
model = NULL, model = NULL,
text = NULL, text = NULL,
trees = NULL, trees = NULL,
@ -14,49 +13,43 @@ xgb.model.dt.tree(
) )
} }
\arguments{ \arguments{
\item{feature_names}{character vector of feature names. If the model already \item{model}{Object of class \code{xgb.Booster}. If it contains feature names (they can be set through
contains feature names, those would be used when \code{feature_names=NULL} (default value). \link{setinfo}), they will be used in the output from this function.}
Non-null \code{feature_names} could be provided to override those in the model.}
\item{model}{object of class \code{xgb.Booster}} \item{text}{Character vector previously generated by the function \code{\link[=xgb.dump]{xgb.dump()}}
(called with parameter \code{with_stats = TRUE}). \code{text} takes precedence over \code{model}.}
\item{text}{\code{character} vector previously generated by the \code{xgb.dump} \item{trees}{An integer vector of tree indices that should be used.
function (where parameter \code{with_stats = TRUE} should have been set). The default (\code{NULL}) uses all trees.
\code{text} takes precedence over \code{model}.} Useful, e.g., in multiclass classification to get only
the trees of one class. \emph{Important}: the tree index in XGBoost models
is zero-based (e.g., use \code{trees = 0:4} for the first five trees).}
\item{trees}{an integer vector of tree indices that should be parsed. \item{use_int_id}{A logical flag indicating whether nodes in columns "Yes", "No", and
If set to \code{NULL}, all trees of the model are parsed. "Missing" should be represented as integers (when \code{TRUE}) or as "Tree-Node"
It could be useful, e.g., in multiclass classification to get only character strings (when \code{FALSE}, default).}
the trees of one certain class. IMPORTANT: the tree index in xgboost models
is zero-based (e.g., use \code{trees = 0:4} for first 5 trees).}
\item{use_int_id}{a logical flag indicating whether nodes in columns "Yes", "No", "Missing" should be \item{...}{Currently not used.}
represented as integers (when FALSE) or as "Tree-Node" character strings (when FALSE).}
\item{...}{currently not used.}
} }
\value{ \value{
A \code{data.table} with detailed information about model trees' nodes. A \code{data.table} with detailed information about tree nodes. It has the following columns:
The columns of the \code{data.table} are:
\itemize{ \itemize{
\item \code{Tree}: integer ID of a tree in a model (zero-based index) \item \code{Tree}: integer ID of a tree in a model (zero-based index).
\item \code{Node}: integer ID of a node in a tree (zero-based index) \item \code{Node}: integer ID of a node in a tree (zero-based index).
\item \code{ID}: character identifier of a node in a model (only when \code{use_int_id=FALSE}) \item \code{ID}: character identifier of a node in a model (only when \code{use_int_id = FALSE}).
\item \code{Feature}: for a branch node, it's a feature id or name (when available); \item \code{Feature}: for a branch node, a feature ID or name (when available);
for a leaf note, it simply labels it as \code{'Leaf'} for a leaf node, it simply labels it as \code{"Leaf"}.
\item \code{Split}: location of the split for a branch node (split condition is always "less than") \item \code{Split}: location of the split for a branch node (split condition is always "less than").
\item \code{Yes}: ID of the next node when the split condition is met \item \code{Yes}: ID of the next node when the split condition is met.
\item \code{No}: ID of the next node when the split condition is not met \item \code{No}: ID of the next node when the split condition is not met.
\item \code{Missing}: ID of the next node when branch value is missing \item \code{Missing}: ID of the next node when the branch value is missing.
\item \code{Quality}: either the split gain (change in loss) or the leaf value \item \code{Gain}: either the split gain (change in loss) or the leaf value.
\item \code{Cover}: metric related to the number of observation either seen by a split \item \code{Cover}: metric related to the number of observations either seen by a split
or collected by a leaf during training. or collected by a leaf during training.
} }
When \code{use_int_id=FALSE}, columns "Yes", "No", and "Missing" point to model-wide node identifiers When \code{use_int_id = FALSE}, columns "Yes", "No", and "Missing" point to model-wide node identifiers
in the "ID" column. When \code{use_int_id=TRUE}, those columns point to node identifiers from in the "ID" column. When \code{use_int_id = TRUE}, those columns point to node identifiers from
the corresponding trees in the "Node" column. the corresponding trees in the "Node" column.
} }
\description{ \description{
@ -65,22 +58,31 @@ Parse a boosted tree model text dump into a \code{data.table} structure.
\examples{ \examples{
# Basic use: # Basic use:
data(agaricus.train, package='xgboost') data(agaricus.train, package = "xgboost")
## Keep the number of threads to 1 for examples ## Keep the number of threads to 1 for examples
nthread <- 1 nthread <- 1
data.table::setDTthreads(nthread) data.table::setDTthreads(nthread)
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, bst <- xgboost(
eta = 1, nthread = nthread, nrounds = 2,objective = "binary:logistic") data = agaricus.train$data,
label = agaricus.train$label,
(dt <- xgb.model.dt.tree(colnames(agaricus.train$data), bst)) max_depth = 2,
eta = 1,
nthread = nthread,
nrounds = 2,
objective = "binary:logistic"
)
# This bst model already has feature_names stored with it, so those would be used when # This bst model already has feature_names stored with it, so those would be used when
# feature_names is not set: # feature_names is not set:
(dt <- xgb.model.dt.tree(model = bst)) dt <- xgb.model.dt.tree(bst)
# How to match feature names of splits that are following a current 'Yes' branch: # How to match feature names of splits that are following a current 'Yes' branch:
merge(
merge(dt, dt[, .(ID, Y.Feature=Feature)], by.x='Yes', by.y='ID', all.x=TRUE)[order(Tree,Node)] dt,
dt[, .(ID, Y.Feature = Feature)], by.x = "Yes", by.y = "ID", all.x = TRUE
)[
order(Tree, Node)
]
} }

View File

@ -2,29 +2,46 @@
% Please edit documentation in R/xgb.Booster.R % Please edit documentation in R/xgb.Booster.R
\name{xgb.parameters<-} \name{xgb.parameters<-}
\alias{xgb.parameters<-} \alias{xgb.parameters<-}
\title{Accessors for model parameters.} \title{Accessors for model parameters}
\usage{ \usage{
xgb.parameters(object) <- value xgb.parameters(object) <- value
} }
\arguments{ \arguments{
\item{object}{Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}.} \item{object}{Object of class \code{xgb.Booster}. \bold{Will be modified in-place}.}
\item{value}{a list (or an object coercible to a list) with the names of parameters to set \item{value}{A list (or an object coercible to a list) with the names of parameters to set
and the elements corresponding to parameter values.} and the elements corresponding to parameter values.}
} }
\value{
The same booster \code{object}, which gets modified in-place.
}
\description{ \description{
Only the setter for xgboost parameters is currently implemented. Only the setter for xgboost parameters is currently implemented.
} }
\details{ \details{
Note that the setter would usually work more efficiently for \code{xgb.Booster.handle} Just like \link{xgb.attr}, this function will make in-place modifications
than for \code{xgb.Booster}, since only just a handle would need to be copied. on the booster object which do not follow typical R assignment semantics - that is,
all references to the same booster will also be updated, unlike assingment of R
attributes which follow copy-on-write semantics.
See \link{xgb.copy.Booster} for an example of this behavior.
Be aware that setting parameters of a fitted booster related to training continuation / updates
will reset its number of rounds indicator to zero.
} }
\examples{ \examples{
data(agaricus.train, package='xgboost') data(agaricus.train, package = "xgboost")
train <- agaricus.train train <- agaricus.train
bst <- xgboost(data = train$data, label = train$label, max_depth = 2, bst <- xgboost(
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") data = train$data,
label = train$label,
max_depth = 2,
eta = 1,
nthread = 2,
nrounds = 2,
objective = "binary:logistic"
)
xgb.parameters(bst) <- list(eta = 0.1) xgb.parameters(bst) <- list(eta = 0.1)

View File

@ -3,7 +3,7 @@
\name{xgb.ggplot.deepness} \name{xgb.ggplot.deepness}
\alias{xgb.ggplot.deepness} \alias{xgb.ggplot.deepness}
\alias{xgb.plot.deepness} \alias{xgb.plot.deepness}
\title{Plot model trees deepness} \title{Plot model tree depth}
\usage{ \usage{
xgb.ggplot.deepness( xgb.ggplot.deepness(
model = NULL, model = NULL,
@ -18,66 +18,84 @@ xgb.plot.deepness(
) )
} }
\arguments{ \arguments{
\item{model}{either an \code{xgb.Booster} model generated by the \code{xgb.train} function \item{model}{Either an \code{xgb.Booster} model, or the "data.table" returned by \code{\link[=xgb.model.dt.tree]{xgb.model.dt.tree()}}.}
or a data.table result of the \code{xgb.model.dt.tree} function.}
\item{which}{which distribution to plot (see details).} \item{which}{Which distribution to plot (see details).}
\item{plot}{(base R barplot) whether a barplot should be produced. \item{plot}{Should the plot be shown? Default is \code{TRUE}.}
If FALSE, only a data.table is returned.}
\item{...}{other parameters passed to \code{barplot} or \code{plot}.} \item{...}{Other parameters passed to \code{\link[graphics:barplot]{graphics::barplot()}} or \code{\link[graphics:plot.default]{graphics::plot()}}.}
} }
\value{ \value{
Other than producing plots (when \code{plot=TRUE}), the \code{xgb.plot.deepness} function The return value of the two functions is as follows:
silently returns a processed data.table where each row corresponds to a terminal leaf in a tree model, \itemize{
and contains information about leaf's depth, cover, and weight (which is used in calculating predictions). \item \code{xgb.plot.deepness()}: A "data.table" (invisibly).
Each row corresponds to a terminal leaf in the model. It contains its information
The \code{xgb.ggplot.deepness} silently returns either a list of two ggplot graphs when \code{which="2x1"} about depth, cover, and weight (used in calculating predictions).
or a single ggplot graph for the other \code{which} options. If \code{plot = TRUE}, also a plot is shown.
\item \code{xgb.ggplot.deepness()}: When \code{which = "2x1"}, a list of two "ggplot" objects,
and a single "ggplot" object otherwise.
}
} }
\description{ \description{
Visualizes distributions related to depth of tree leafs. Visualizes distributions related to the depth of tree leaves.
\code{xgb.plot.deepness} uses base R graphics, while \code{xgb.ggplot.deepness} uses the ggplot backend. \itemize{
\item \code{xgb.plot.deepness()} uses base R graphics, while
\item \code{xgb.ggplot.deepness()} uses "ggplot2".
}
} }
\details{ \details{
When \code{which="2x1"}, two distributions with respect to the leaf depth When \code{which = "2x1"}, two distributions with respect to the leaf depth
are plotted on top of each other: are plotted on top of each other:
\itemize{ \enumerate{
\item the distribution of the number of leafs in a tree model at a certain depth; \item The distribution of the number of leaves in a tree model at a certain depth.
\item the distribution of average weighted number of observations ("cover") \item The distribution of the average weighted number of observations ("cover")
ending up in leafs at certain depth. ending up in leaves at a certain depth.
} }
Those could be helpful in determining sensible ranges of the \code{max_depth} Those could be helpful in determining sensible ranges of the \code{max_depth}
and \code{min_child_weight} parameters. and \code{min_child_weight} parameters.
When \code{which="max.depth"} or \code{which="med.depth"}, plots of either maximum or median depth When \code{which = "max.depth"} or \code{which = "med.depth"}, plots of either maximum or
per tree with respect to tree number are created. And \code{which="med.weight"} allows to see how median depth per tree with respect to the tree number are created.
Finally, \code{which = "med.weight"} allows to see how
a tree's median absolute leaf weight changes through the iterations. a tree's median absolute leaf weight changes through the iterations.
This function was inspired by the blog post These functions have been inspired by the blog post
\url{https://github.com/aysent/random-forest-leaf-visualization}. \url{https://github.com/aysent/random-forest-leaf-visualization}.
} }
\examples{ \examples{
data(agaricus.train, package='xgboost') data(agaricus.train, package = "xgboost")
## Keep the number of threads to 2 for examples ## Keep the number of threads to 2 for examples
nthread <- 2 nthread <- 2
data.table::setDTthreads(nthread) data.table::setDTthreads(nthread)
## Change max_depth to a higher number to get a more significant result ## Change max_depth to a higher number to get a more significant result
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 6, bst <- xgboost(
eta = 0.1, nthread = nthread, nrounds = 50, objective = "binary:logistic", data = agaricus.train$data,
subsample = 0.5, min_child_weight = 2) label = agaricus.train$label,
max_depth = 6,
nthread = nthread,
nrounds = 50,
objective = "binary:logistic",
subsample = 0.5,
min_child_weight = 2
)
xgb.plot.deepness(bst) xgb.plot.deepness(bst)
xgb.ggplot.deepness(bst) xgb.ggplot.deepness(bst)
xgb.plot.deepness(bst, which='max.depth', pch=16, col=rgb(0,0,1,0.3), cex=2) xgb.plot.deepness(
bst, which = "max.depth", pch = 16, col = rgb(0, 0, 1, 0.3), cex = 2
)
xgb.plot.deepness(bst, which='med.weight', pch=16, col=rgb(0,0,1,0.3), cex=2) xgb.plot.deepness(
bst, which = "med.weight", pch = 16, col = rgb(0, 0, 1, 0.3), cex = 2
)
} }
\seealso{ \seealso{
\code{\link{xgb.train}}, \code{\link{xgb.model.dt.tree}}. \code{\link[=xgb.train]{xgb.train()}} and \code{\link[=xgb.model.dt.tree]{xgb.model.dt.tree()}}.
} }

View File

@ -3,7 +3,7 @@
\name{xgb.ggplot.importance} \name{xgb.ggplot.importance}
\alias{xgb.ggplot.importance} \alias{xgb.ggplot.importance}
\alias{xgb.plot.importance} \alias{xgb.plot.importance}
\title{Plot feature importance as a bar graph} \title{Plot feature importance}
\usage{ \usage{
xgb.ggplot.importance( xgb.ggplot.importance(
importance_matrix = NULL, importance_matrix = NULL,
@ -26,74 +26,90 @@ xgb.plot.importance(
) )
} }
\arguments{ \arguments{
\item{importance_matrix}{a \code{data.table} returned by \code{\link{xgb.importance}}.} \item{importance_matrix}{A \code{data.table} as returned by \code{\link[=xgb.importance]{xgb.importance()}}.}
\item{top_n}{maximal number of top features to include into the plot.} \item{top_n}{Maximal number of top features to include into the plot.}
\item{measure}{the name of importance measure to plot. \item{measure}{The name of importance measure to plot.
When \code{NULL}, 'Gain' would be used for trees and 'Weight' would be used for gblinear.} When \code{NULL}, 'Gain' would be used for trees and 'Weight' would be used for gblinear.}
\item{rel_to_first}{whether importance values should be represented as relative to the highest ranked feature. \item{rel_to_first}{Whether importance values should be represented as relative to
See Details.} the highest ranked feature, see Details.}
\item{n_clusters}{(ggplot only) a \code{numeric} vector containing the min and the max range \item{n_clusters}{A numeric vector containing the min and the max range
of the possible number of clusters of bars.} of the possible number of clusters of bars.}
\item{...}{other parameters passed to \code{barplot} (except horiz, border, cex.names, names.arg, and las).} \item{...}{Other parameters passed to \code{\link[graphics:barplot]{graphics::barplot()}}
(except \code{horiz}, \code{border}, \code{cex.names}, \code{names.arg}, and \code{las}).
Only used in \code{xgb.plot.importance()}.}
\item{left_margin}{(base R barplot) allows to adjust the left margin size to fit feature names. \item{left_margin}{Adjust the left margin size to fit feature names.
When it is NULL, the existing \code{par('mar')} is used.} When \code{NULL}, the existing \code{par("mar")} is used.}
\item{cex}{(base R barplot) passed as \code{cex.names} parameter to \code{barplot}.} \item{cex}{Passed as \code{cex.names} parameter to \code{\link[graphics:barplot]{graphics::barplot()}}.}
\item{plot}{(base R barplot) whether a barplot should be produced. \item{plot}{Should the barplot be shown? Default is \code{TRUE}.}
If FALSE, only a data.table is returned.}
} }
\value{ \value{
The \code{xgb.plot.importance} function creates a \code{barplot} (when \code{plot=TRUE}) The return value depends on the function:
and silently returns a processed data.table with \code{n_top} features sorted by importance. \itemize{
\item \code{xgb.plot.importance()}: Invisibly, a "data.table" with \code{n_top} features sorted
The \code{xgb.ggplot.importance} function returns a ggplot graph which could be customized afterwards. by importance. If \code{plot = TRUE}, the values are also plotted as barplot.
E.g., to change the title of the graph, add \code{+ ggtitle("A GRAPH NAME")} to the result. \item \code{xgb.ggplot.importance()}: A customizable "ggplot" object.
E.g., to change the title, set \code{+ ggtitle("A GRAPH NAME")}.
}
} }
\description{ \description{
Represents previously calculated feature importance as a bar graph. Represents previously calculated feature importance as a bar graph.
\code{xgb.plot.importance} uses base R graphics, while \code{xgb.ggplot.importance} uses the ggplot backend. \itemize{
\item \code{xgb.plot.importance()} uses base R graphics, while
\item \code{xgb.ggplot.importance()} uses "ggplot".
}
} }
\details{ \details{
The graph represents each feature as a horizontal bar of length proportional to the importance of a feature. The graph represents each feature as a horizontal bar of length proportional to the importance of a feature.
Features are shown ranked in a decreasing importance order. Features are sorted by decreasing importance.
It works for importances from both \code{gblinear} and \code{gbtree} models. It works for both "gblinear" and "gbtree" models.
When \code{rel_to_first = FALSE}, the values would be plotted as they were in \code{importance_matrix}. When \code{rel_to_first = FALSE}, the values would be plotted as in \code{importance_matrix}.
For gbtree model, that would mean being normalized to the total of 1 For a "gbtree" model, that would mean being normalized to the total of 1
("what is feature's importance contribution relative to the whole model?"). ("what is feature's importance contribution relative to the whole model?").
For linear models, \code{rel_to_first = FALSE} would show actual values of the coefficients. For linear models, \code{rel_to_first = FALSE} would show actual values of the coefficients.
Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of
"what is feature's importance contribution relative to the most important feature?" "what is feature's importance contribution relative to the most important feature?"
The ggplot-backend method also performs 1-D clustering of the importance values, The "ggplot" backend performs 1-D clustering of the importance values,
with bar colors corresponding to different clusters that have somewhat similar importance values. with bar colors corresponding to different clusters having similar importance values.
} }
\examples{ \examples{
data(agaricus.train) data(agaricus.train)
## Keep the number of threads to 2 for examples ## Keep the number of threads to 2 for examples
nthread <- 2 nthread <- 2
data.table::setDTthreads(nthread) data.table::setDTthreads(nthread)
bst <- xgboost( bst <- xgboost(
data = agaricus.train$data, label = agaricus.train$label, max_depth = 3, data = agaricus.train$data,
eta = 1, nthread = nthread, nrounds = 2, objective = "binary:logistic" label = agaricus.train$label,
max_depth = 3,
eta = 1,
nthread = nthread,
nrounds = 2,
objective = "binary:logistic"
) )
importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst) importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst)
xgb.plot.importance(
importance_matrix, rel_to_first = TRUE, xlab = "Relative importance"
)
xgb.plot.importance(importance_matrix, rel_to_first = TRUE, xlab = "Relative importance") gg <- xgb.ggplot.importance(
importance_matrix, measure = "Frequency", rel_to_first = TRUE
(gg <- xgb.ggplot.importance(importance_matrix, measure = "Frequency", rel_to_first = TRUE)) )
gg
gg + ggplot2::ylab("Frequency") gg + ggplot2::ylab("Frequency")
} }
\seealso{ \seealso{
\code{\link[graphics]{barplot}}. \code{\link[graphics:barplot]{graphics::barplot()}}
} }

View File

@ -2,11 +2,10 @@
% Please edit documentation in R/xgb.plot.multi.trees.R % Please edit documentation in R/xgb.plot.multi.trees.R
\name{xgb.plot.multi.trees} \name{xgb.plot.multi.trees}
\alias{xgb.plot.multi.trees} \alias{xgb.plot.multi.trees}
\title{Project all trees on one tree and plot it} \title{Project all trees on one tree}
\usage{ \usage{
xgb.plot.multi.trees( xgb.plot.multi.trees(
model, model,
feature_names = NULL,
features_keep = 5, features_keep = 5,
plot_width = NULL, plot_width = NULL,
plot_height = NULL, plot_height = NULL,
@ -15,29 +14,29 @@ xgb.plot.multi.trees(
) )
} }
\arguments{ \arguments{
\item{model}{produced by the \code{xgb.train} function.} \item{model}{Object of class \code{xgb.Booster}. If it contains feature names (they can be set through
\link{setinfo}), they will be used in the output from this function.}
\item{feature_names}{names of each feature as a \code{character} vector.} \item{features_keep}{Number of features to keep in each position of the multi trees,
by default 5.}
\item{features_keep}{number of features to keep in each position of the multi trees.} \item{plot_width, plot_height}{Width and height of the graph in pixels.
The values are passed to \code{\link[DiagrammeR:render_graph]{DiagrammeR::render_graph()}}.}
\item{plot_width}{width in pixels of the graph to produce} \item{render}{Should the graph be rendered or not? The default is \code{TRUE}.}
\item{plot_height}{height in pixels of the graph to produce} \item{...}{currently not used.}
\item{render}{a logical flag for whether the graph should be rendered (see Value).}
\item{...}{currently not used}
} }
\value{ \value{
When \code{render = TRUE}: The value depends on the \code{render} parameter:
returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}. \itemize{
Similar to ggplot objects, it needs to be printed to see it when not running from command line. \item If \code{render = TRUE} (default): Rendered graph object which is an htmlwidget of
class \code{grViz}. Similar to "ggplot" objects, it needs to be printed when not
When \code{render = FALSE}: running from the command line.
silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}. \item If \code{render = FALSE}: Graph object which is of DiagrammeR's class \code{dgr_graph}.
This could be useful if one wants to modify some of the graph attributes This could be useful if one wants to modify some of the graph attributes
before rendering the graph with \code{\link[DiagrammeR]{render_graph}}. before rendering the graph with \code{\link[DiagrammeR:render_graph]{DiagrammeR::render_graph()}}.
}
} }
\description{ \description{
Visualization of the ensemble of trees as a single collective unit. Visualization of the ensemble of trees as a single collective unit.
@ -62,15 +61,22 @@ This function is inspired by this blog post:
} }
\examples{ \examples{
data(agaricus.train, package='xgboost') data(agaricus.train, package = "xgboost")
## Keep the number of threads to 2 for examples ## Keep the number of threads to 2 for examples
nthread <- 2 nthread <- 2
data.table::setDTthreads(nthread) data.table::setDTthreads(nthread)
bst <- xgboost( bst <- xgboost(
data = agaricus.train$data, label = agaricus.train$label, max_depth = 15, data = agaricus.train$data,
eta = 1, nthread = nthread, nrounds = 30, objective = "binary:logistic", label = agaricus.train$label,
min_child_weight = 50, verbose = 0 max_depth = 15,
eta = 1,
nthread = nthread,
nrounds = 30,
objective = "binary:logistic",
min_child_weight = 50,
verbose = 0
) )
p <- xgb.plot.multi.trees(model = bst, features_keep = 3) p <- xgb.plot.multi.trees(model = bst, features_keep = 3)
@ -78,10 +84,13 @@ print(p)
\dontrun{ \dontrun{
# Below is an example of how to save this plot to a file. # Below is an example of how to save this plot to a file.
# Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed. # Note that for export_graph() to work, the {DiagrammeRsvg} and {rsvg} packages
# must also be installed.
library(DiagrammeR) library(DiagrammeR)
gr <- xgb.plot.multi.trees(model=bst, features_keep = 3, render=FALSE)
export_graph(gr, 'tree.pdf', width=1500, height=600) gr <- xgb.plot.multi.trees(model = bst, features_keep = 3, render = FALSE)
export_graph(gr, "tree.pdf", width = 1500, height = 600)
} }
} }

View File

@ -2,7 +2,7 @@
% Please edit documentation in R/xgb.plot.shap.R % Please edit documentation in R/xgb.plot.shap.R
\name{xgb.plot.shap} \name{xgb.plot.shap}
\alias{xgb.plot.shap} \alias{xgb.plot.shap}
\title{SHAP contribution dependency plots} \title{SHAP dependence plots}
\usage{ \usage{
xgb.plot.shap( xgb.plot.shap(
data, data,
@ -33,87 +33,93 @@ xgb.plot.shap(
) )
} }
\arguments{ \arguments{
\item{data}{data as a \code{matrix} or \code{dgCMatrix}.} \item{data}{The data to explain as a \code{matrix} or \code{dgCMatrix}.}
\item{shap_contrib}{a matrix of SHAP contributions that was computed earlier for the above \item{shap_contrib}{Matrix of SHAP contributions of \code{data}.
\code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.} The default (\code{NULL}) computes it from \code{model} and \code{data}.}
\item{features}{a vector of either column indices or of feature names to plot. When it is NULL, \item{features}{Vector of column indices or feature names to plot.
feature importance is calculated, and \code{top_n} high ranked features are taken.} When \code{NULL} (default), the \code{top_n} most important features are selected
by \code{\link[=xgb.importance]{xgb.importance()}}.}
\item{top_n}{when \code{features} is NULL, top_n [1, 100] most important features in a model are taken.} \item{top_n}{How many of the most important features (<= 100) should be selected?
By default 1 for SHAP dependence and 10 for SHAP summary).
Only used when \code{features = NULL}.}
\item{model}{an \code{xgb.Booster} model. It has to be provided when either \code{shap_contrib} \item{model}{An \code{xgb.Booster} model. Only required when \code{shap_contrib = NULL} or
or \code{features} is missing.} \code{features = NULL}.}
\item{trees}{passed to \code{\link{xgb.importance}} when \code{features = NULL}.} \item{trees}{Passed to \code{\link[=xgb.importance]{xgb.importance()}} when \code{features = NULL}.}
\item{target_class}{is only relevant for multiclass models. When it is set to a 0-based class index, \item{target_class}{Only relevant for multiclass models. The default (\code{NULL})
only SHAP contributions for that specific class are used. averages the SHAP values over all classes. Pass a (0-based) class index
If it is not set, SHAP importances are averaged over all classes.} to show only SHAP values of that class.}
\item{approxcontrib}{passed to \code{\link{predict.xgb.Booster}} when \code{shap_contrib = NULL}.} \item{approxcontrib}{Passed to \code{predict()} when \code{shap_contrib = NULL}.}
\item{subsample}{a random fraction of data points to use for plotting. When it is NULL, \item{subsample}{Fraction of data points randomly picked for plotting.
it is set so that up to 100K data points are used.} The default (\code{NULL}) will use up to 100k data points.}
\item{n_col}{a number of columns in a grid of plots.} \item{n_col}{Number of columns in a grid of plots.}
\item{col}{color of the scatterplot markers.} \item{col}{Color of the scatterplot markers.}
\item{pch}{scatterplot marker.} \item{pch}{Scatterplot marker.}
\item{discrete_n_uniq}{a maximal number of unique values in a feature to consider it as discrete.} \item{discrete_n_uniq}{Maximal number of unique feature values to consider the
feature as discrete.}
\item{discrete_jitter}{an \code{amount} parameter of jitter added to discrete features' positions.} \item{discrete_jitter}{Jitter amount added to the values of discrete features.}
\item{ylab}{a y-axis label in 1D plots.} \item{ylab}{The y-axis label in 1D plots.}
\item{plot_NA}{whether the contributions of cases with missing values should also be plotted.} \item{plot_NA}{Should contributions of cases with missing values be plotted?
Default is \code{TRUE}.}
\item{col_NA}{a color of marker for missing value contributions.} \item{col_NA}{Color of marker for missing value contributions.}
\item{pch_NA}{a marker type for NA values.} \item{pch_NA}{Marker type for \code{NA} values.}
\item{pos_NA}{a relative position of the x-location where NA values are shown: \item{pos_NA}{Relative position of the x-location where \code{NA} values are shown:
\code{min(x) + (max(x) - min(x)) * pos_NA}.} \code{min(x) + (max(x) - min(x)) * pos_NA}.}
\item{plot_loess}{whether to plot loess-smoothed curves. The smoothing is only done for features with \item{plot_loess}{Should loess-smoothed curves be plotted? (Default is \code{TRUE}).
more than 5 distinct values.} The smoothing is only done for features with more than 5 distinct values.}
\item{col_loess}{a color to use for the loess curves.} \item{col_loess}{Color of loess curves.}
\item{span_loess}{the \code{span} parameter in \code{\link[stats]{loess}}'s call.} \item{span_loess}{The \code{span} parameter of \code{\link[stats:loess]{stats::loess()}}.}
\item{which}{whether to do univariate or bivariate plotting. NOTE: only 1D is implemented so far.} \item{which}{Whether to do univariate or bivariate plotting. Currently, only "1d" is implemented.}
\item{plot}{whether a plot should be drawn. If FALSE, only a list of matrices is returned.} \item{plot}{Should the plot be drawn? (Default is \code{TRUE}).
If \code{FALSE}, only a list of matrices is returned.}
\item{...}{other parameters passed to \code{plot}.} \item{...}{Other parameters passed to \code{\link[graphics:plot.default]{graphics::plot()}}.}
} }
\value{ \value{
In addition to producing plots (when \code{plot=TRUE}), it silently returns a list of two matrices: In addition to producing plots (when \code{plot = TRUE}), it silently returns a list of two matrices:
\itemize{ \itemize{
\item \code{data} the values of selected features; \item \code{data}: Feature value matrix.
\item \code{shap_contrib} the contributions of selected features. \item \code{shap_contrib}: Corresponding SHAP value matrix.
} }
} }
\description{ \description{
Visualizing the SHAP feature contribution to prediction dependencies on feature value. Visualizes SHAP values against feature values to gain an impression of feature effects.
} }
\details{ \details{
These scatterplots represent how SHAP feature contributions depend of feature values. These scatterplots represent how SHAP feature contributions depend of feature values.
The similarity to partial dependency plots is that they also give an idea for how feature values The similarity to partial dependence plots is that they also give an idea for how feature values
affect predictions. However, in partial dependency plots, we usually see marginal dependencies affect predictions. However, in partial dependence plots, we see marginal dependencies
of model prediction on feature value, while SHAP contribution dependency plots display the estimated of model prediction on feature value, while SHAP dependence plots display the estimated
contributions of a feature to model prediction for each individual case. contributions of a feature to the prediction for each individual case.
When \code{plot_loess = TRUE} is set, feature values are rounded to 3 significant digits and When \code{plot_loess = TRUE}, feature values are rounded to three significant digits and
weighted LOESS is computed and plotted, where weights are the numbers of data points weighted LOESS is computed and plotted, where the weights are the numbers of data points
at each rounded value. at each rounded value.
Note: SHAP contributions are shown on the scale of model margin. E.g., for a logistic binomial objective, Note: SHAP contributions are on the scale of the model margin.
the margin is prediction before a sigmoidal transform into probability-like values. E.g., for a logistic binomial objective, the margin is on log-odds scale.
Also, since SHAP stands for "SHapley Additive exPlanation" (model prediction = sum of SHAP Also, since SHAP stands for "SHapley Additive exPlanation" (model prediction = sum of SHAP
contributions for all features + bias), depending on the objective used, transforming SHAP contributions for all features + bias), depending on the objective used, transforming SHAP
contributions for a feature from the marginal to the prediction space is not necessarily contributions for a feature from the marginal to the prediction space is not necessarily
@ -121,44 +127,99 @@ a meaningful thing to do.
} }
\examples{ \examples{
data(agaricus.train, package='xgboost') data(agaricus.train, package = "xgboost")
data(agaricus.test, package='xgboost') data(agaricus.test, package = "xgboost")
## Keep the number of threads to 1 for examples ## Keep the number of threads to 1 for examples
nthread <- 1 nthread <- 1
data.table::setDTthreads(nthread) data.table::setDTthreads(nthread)
nrounds <- 20 nrounds <- 20
bst <- xgboost(agaricus.train$data, agaricus.train$label, nrounds = nrounds, bst <- xgboost(
eta = 0.1, max_depth = 3, subsample = .5, agaricus.train$data,
method = "hist", objective = "binary:logistic", nthread = nthread, verbose = 0) agaricus.train$label,
nrounds = nrounds,
eta = 0.1,
max_depth = 3,
subsample = 0.5,
objective = "binary:logistic",
nthread = nthread,
verbose = 0
)
xgb.plot.shap(agaricus.test$data, model = bst, features = "odor=none") xgb.plot.shap(agaricus.test$data, model = bst, features = "odor=none")
contr <- predict(bst, agaricus.test$data, predcontrib = TRUE) contr <- predict(bst, agaricus.test$data, predcontrib = TRUE)
xgb.plot.shap(agaricus.test$data, contr, model = bst, top_n = 12, n_col = 3) xgb.plot.shap(agaricus.test$data, contr, model = bst, top_n = 12, n_col = 3)
xgb.ggplot.shap.summary(agaricus.test$data, contr, model = bst, top_n = 12) # Summary plot
# multiclass example - plots for each class separately: # Summary plot
xgb.ggplot.shap.summary(agaricus.test$data, contr, model = bst, top_n = 12)
# Multiclass example - plots for each class separately:
nclass <- 3 nclass <- 3
x <- as.matrix(iris[, -5]) x <- as.matrix(iris[, -5])
set.seed(123) set.seed(123)
is.na(x[sample(nrow(x) * 4, 30)]) <- TRUE # introduce some missing values is.na(x[sample(nrow(x) * 4, 30)]) <- TRUE # introduce some missing values
mbst <- xgboost(data = x, label = as.numeric(iris$Species) - 1, nrounds = nrounds,
max_depth = 2, eta = 0.3, subsample = .5, nthread = nthread, mbst <- xgboost(
objective = "multi:softprob", num_class = nclass, verbose = 0) data = x,
trees0 <- seq(from=0, by=nclass, length.out=nrounds) label = as.numeric(iris$Species) - 1,
nrounds = nrounds,
max_depth = 2,
eta = 0.3,
subsample = 0.5,
nthread = nthread,
objective = "multi:softprob",
num_class = nclass,
verbose = 0
)
trees0 <- seq(from = 0, by = nclass, length.out = nrounds)
col <- rgb(0, 0, 1, 0.5) col <- rgb(0, 0, 1, 0.5)
xgb.plot.shap(x, model = mbst, trees = trees0, target_class = 0, top_n = 4, xgb.plot.shap(
n_col = 2, col = col, pch = 16, pch_NA = 17) x,
xgb.plot.shap(x, model = mbst, trees = trees0 + 1, target_class = 1, top_n = 4, model = mbst,
n_col = 2, col = col, pch = 16, pch_NA = 17) trees = trees0,
xgb.plot.shap(x, model = mbst, trees = trees0 + 2, target_class = 2, top_n = 4, target_class = 0,
n_col = 2, col = col, pch = 16, pch_NA = 17) top_n = 4,
xgb.ggplot.shap.summary(x, model = mbst, target_class = 0, top_n = 4) # Summary plot n_col = 2,
col = col,
pch = 16,
pch_NA = 17
)
xgb.plot.shap(
x,
model = mbst,
trees = trees0 + 1,
target_class = 1,
top_n = 4,
n_col = 2,
col = col,
pch = 16,
pch_NA = 17
)
xgb.plot.shap(
x,
model = mbst,
trees = trees0 + 2,
target_class = 2,
top_n = 4,
n_col = 2,
col = col,
pch = 16,
pch_NA = 17
)
# Summary plot
xgb.ggplot.shap.summary(x, model = mbst, target_class = 0, top_n = 4)
} }
\references{ \references{
Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions", NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874} \enumerate{
\item Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions",
Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles", \url{https://arxiv.org/abs/1706.06060} NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874}
\item Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles",
\url{https://arxiv.org/abs/1706.06060}
}
} }

View File

@ -3,7 +3,7 @@
\name{xgb.ggplot.shap.summary} \name{xgb.ggplot.shap.summary}
\alias{xgb.ggplot.shap.summary} \alias{xgb.ggplot.shap.summary}
\alias{xgb.plot.shap.summary} \alias{xgb.plot.shap.summary}
\title{SHAP contribution dependency summary plot} \title{SHAP summary plot}
\usage{ \usage{
xgb.ggplot.shap.summary( xgb.ggplot.shap.summary(
data, data,
@ -30,49 +30,54 @@ xgb.plot.shap.summary(
) )
} }
\arguments{ \arguments{
\item{data}{data as a \code{matrix} or \code{dgCMatrix}.} \item{data}{The data to explain as a \code{matrix} or \code{dgCMatrix}.}
\item{shap_contrib}{a matrix of SHAP contributions that was computed earlier for the above \item{shap_contrib}{Matrix of SHAP contributions of \code{data}.
\code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.} The default (\code{NULL}) computes it from \code{model} and \code{data}.}
\item{features}{a vector of either column indices or of feature names to plot. When it is NULL, \item{features}{Vector of column indices or feature names to plot.
feature importance is calculated, and \code{top_n} high ranked features are taken.} When \code{NULL} (default), the \code{top_n} most important features are selected
by \code{\link[=xgb.importance]{xgb.importance()}}.}
\item{top_n}{when \code{features} is NULL, top_n [1, 100] most important features in a model are taken.} \item{top_n}{How many of the most important features (<= 100) should be selected?
By default 1 for SHAP dependence and 10 for SHAP summary).
Only used when \code{features = NULL}.}
\item{model}{an \code{xgb.Booster} model. It has to be provided when either \code{shap_contrib} \item{model}{An \code{xgb.Booster} model. Only required when \code{shap_contrib = NULL} or
or \code{features} is missing.} \code{features = NULL}.}
\item{trees}{passed to \code{\link{xgb.importance}} when \code{features = NULL}.} \item{trees}{Passed to \code{\link[=xgb.importance]{xgb.importance()}} when \code{features = NULL}.}
\item{target_class}{is only relevant for multiclass models. When it is set to a 0-based class index, \item{target_class}{Only relevant for multiclass models. The default (\code{NULL})
only SHAP contributions for that specific class are used. averages the SHAP values over all classes. Pass a (0-based) class index
If it is not set, SHAP importances are averaged over all classes.} to show only SHAP values of that class.}
\item{approxcontrib}{passed to \code{\link{predict.xgb.Booster}} when \code{shap_contrib = NULL}.} \item{approxcontrib}{Passed to \code{predict()} when \code{shap_contrib = NULL}.}
\item{subsample}{a random fraction of data points to use for plotting. When it is NULL, \item{subsample}{Fraction of data points randomly picked for plotting.
it is set so that up to 100K data points are used.} The default (\code{NULL}) will use up to 100k data points.}
} }
\value{ \value{
A \code{ggplot2} object. A \code{ggplot2} object.
} }
\description{ \description{
Compare SHAP contributions of different features. Visualizes SHAP contributions of different features.
} }
\details{ \details{
A point plot (each point representing one sample from \code{data}) is A point plot (each point representing one observation from \code{data}) is
produced for each feature, with the points plotted on the SHAP value axis. produced for each feature, with the points plotted on the SHAP value axis.
Each point (observation) is coloured based on its feature value. The plot Each point (observation) is coloured based on its feature value.
hence allows us to see which features have a negative / positive contribution
The plot allows to see which features have a negative / positive contribution
on the model prediction, and whether the contribution is different for larger on the model prediction, and whether the contribution is different for larger
or smaller values of the feature. We effectively try to replicate the or smaller values of the feature. Inspired by the summary plot of
\code{summary_plot} function from https://github.com/shap/shap. \url{https://github.com/shap/shap}.
} }
\examples{ \examples{
# See \code{\link{xgb.plot.shap}}. # See examples in xgb.plot.shap()
} }
\seealso{ \seealso{
\code{\link{xgb.plot.shap}}, \code{\link{xgb.ggplot.shap.summary}}, \code{\link[=xgb.plot.shap]{xgb.plot.shap()}}, \code{\link[=xgb.ggplot.shap.summary]{xgb.ggplot.shap.summary()}},
\url{https://github.com/shap/shap} and the Python library \url{https://github.com/shap/shap}.
} }

View File

@ -2,90 +2,131 @@
% Please edit documentation in R/xgb.plot.tree.R % Please edit documentation in R/xgb.plot.tree.R
\name{xgb.plot.tree} \name{xgb.plot.tree}
\alias{xgb.plot.tree} \alias{xgb.plot.tree}
\title{Plot a boosted tree model} \title{Plot boosted trees}
\usage{ \usage{
xgb.plot.tree( xgb.plot.tree(
feature_names = NULL,
model = NULL, model = NULL,
trees = NULL, trees = NULL,
plot_width = NULL, plot_width = NULL,
plot_height = NULL, plot_height = NULL,
render = TRUE, render = TRUE,
show_node_id = FALSE, show_node_id = FALSE,
style = c("R", "xgboost"),
... ...
) )
} }
\arguments{ \arguments{
\item{feature_names}{names of each feature as a \code{character} vector.} \item{model}{Object of class \code{xgb.Booster}. If it contains feature names (they can be set through
\link{setinfo}), they will be used in the output from this function.}
\item{model}{produced by the \code{xgb.train} function.} \item{trees}{An integer vector of tree indices that should be used.
The default (\code{NULL}) uses all trees.
Useful, e.g., in multiclass classification to get only
the trees of one class. \emph{Important}: the tree index in XGBoost models
is zero-based (e.g., use \code{trees = 0:2} for the first three trees).}
\item{trees}{an integer vector of tree indices that should be visualized. \item{plot_width, plot_height}{Width and height of the graph in pixels.
If set to \code{NULL}, all trees of the model are included. The values are passed to \code{\link[DiagrammeR:render_graph]{DiagrammeR::render_graph()}}.}
IMPORTANT: the tree index in xgboost model is zero-based
(e.g., use \code{trees = 0:2} for the first 3 trees in a model).}
\item{plot_width}{the width of the diagram in pixels.} \item{render}{Should the graph be rendered or not? The default is \code{TRUE}.}
\item{plot_height}{the height of the diagram in pixels.}
\item{render}{a logical flag for whether the graph should be rendered (see Value).}
\item{show_node_id}{a logical flag for whether to show node id's in the graph.} \item{show_node_id}{a logical flag for whether to show node id's in the graph.}
\item{style}{Style to use for the plot. Options are:\itemize{
\item \code{"xgboost"}: will use the plot style defined in the core XGBoost library,
which is shared between different interfaces through the 'dot' format. This
style was not available before version 2.1.0 in R. It always plots the trees
vertically (from top to bottom).
\item \code{"R"}: will use the style defined from XGBoost's R interface, which predates
the introducition of the standardized style from the core library. It might plot
the trees horizontally (from left to right).
}
Note that \code{style="xgboost"} is only supported when all of the following conditions are met:\itemize{
\item Only a single tree is being plotted.
\item Node IDs are not added to the graph.
\item The graph is being returned as \code{htmlwidget} (\code{render=TRUE}).
}}
\item{...}{currently not used.} \item{...}{currently not used.}
} }
\value{ \value{
When \code{render = TRUE}: The value depends on the \code{render} parameter:
returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}. \itemize{
Similar to ggplot objects, it needs to be printed to see it when not running from command line. \item If \code{render = TRUE} (default): Rendered graph object which is an htmlwidget of
class \code{grViz}. Similar to "ggplot" objects, it needs to be printed when not
When \code{render = FALSE}: running from the command line.
silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}. \item If \code{render = FALSE}: Graph object which is of DiagrammeR's class \code{dgr_graph}.
This could be useful if one wants to modify some of the graph attributes This could be useful if one wants to modify some of the graph attributes
before rendering the graph with \code{\link[DiagrammeR]{render_graph}}. before rendering the graph with \code{\link[DiagrammeR:render_graph]{DiagrammeR::render_graph()}}.
}
} }
\description{ \description{
Read a tree model text dump and plot the model. Read a tree model text dump and plot the model.
} }
\details{ \details{
The content of each node is organised that way: When using \code{style="xgboost"}, the content of each node is visualized as follows:
\itemize{ \itemize{
\item Feature name. \item For non-terminal nodes, it will display the split condition (number or name if
\item \code{Cover}: The sum of second order gradient of training data classified to the leaf. available, and the condition that would decide to which node to go next).
If it is square loss, this simply corresponds to the number of instances seen by a split \item Those nodes will be connected to their children by arrows that indicate whether the
or collected by a leaf during training. branch corresponds to the condition being met or not being met.
The deeper in the tree a node is, the lower this metric will be. \item Terminal (leaf) nodes contain the margin to add when ending there.
\item \code{Gain} (for split nodes): the information gain metric of a split
(corresponds to the importance of the node in the model).
\item \code{Value} (for leafs): the margin value that the leaf may contribute to prediction.
} }
The tree root nodes also indicate the Tree index (0-based).
When using \code{style="R"}, the content of each node is visualized like this:
\itemize{
\item \emph{Feature name}.
\item \emph{Cover:} The sum of second order gradients of training data.
For the squared loss, this simply corresponds to the number of instances in the node.
The deeper in the tree, the lower the value.
\item \emph{Gain} (for split nodes): Information gain metric of a split
(corresponds to the importance of the node in the model).
\item \emph{Value} (for leaves): Margin value that the leaf may contribute to the prediction.
}
The tree root nodes also indicate the tree index (0-based).
The "Yes" branches are marked by the "< split_value" label. The "Yes" branches are marked by the "< split_value" label.
The branches that also used for missing values are marked as bold The branches also used for missing values are marked as bold
(as in "carrying extra capacity"). (as in "carrying extra capacity").
This function uses \href{https://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR. This function uses \href{https://www.graphviz.org/}{GraphViz} as DiagrammeR backend.
} }
\examples{ \examples{
data(agaricus.train, package='xgboost') data(agaricus.train, package = "xgboost")
bst <- xgboost(
data = agaricus.train$data,
label = agaricus.train$label,
max_depth = 3,
eta = 1,
nthread = 2,
nrounds = 2,
objective = "binary:logistic"
)
# plot the first tree, using the style from xgboost's core library
# (this plot should look identical to the ones generated from other
# interfaces like the python package for xgboost)
xgb.plot.tree(model = bst, trees = 1, style = "xgboost")
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 3,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
# plot all the trees # plot all the trees
xgb.plot.tree(model = bst) xgb.plot.tree(model = bst, trees = NULL)
# plot only the first tree and display the node ID: # plot only the first tree and display the node ID:
xgb.plot.tree(model = bst, trees = 0, show_node_id = TRUE) xgb.plot.tree(model = bst, trees = 0, show_node_id = TRUE)
\dontrun{ \dontrun{
# Below is an example of how to save this plot to a file. # Below is an example of how to save this plot to a file.
# Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed. # Note that for export_graph() to work, the {DiagrammeRsvg}
# and {rsvg} packages must also be installed.
library(DiagrammeR) library(DiagrammeR)
gr <- xgb.plot.tree(model=bst, trees=0:1, render=FALSE)
export_graph(gr, 'tree.pdf', width=1500, height=1900) gr <- xgb.plot.tree(model = bst, trees = 0:1, render = FALSE)
export_graph(gr, 'tree.png', width=1500, height=1900) export_graph(gr, "tree.pdf", width = 1500, height = 1900)
export_graph(gr, "tree.png", width = 1500, height = 1900)
} }
} }

View File

@ -7,15 +7,27 @@
xgb.save(model, fname) xgb.save(model, fname)
} }
\arguments{ \arguments{
\item{model}{model object of \code{xgb.Booster} class.} \item{model}{Model object of \code{xgb.Booster} class.}
\item{fname}{name of the file to write.} \item{fname}{Name of the file to write.
Note that the extension of this file name determined the serialization format to use:\itemize{
\item Extension ".ubj" will use the universal binary JSON format (recommended).
This format uses binary types for e.g. floating point numbers, thereby preventing any loss
of precision when converting to a human-readable JSON text or similar.
\item Extension ".json" will use plain JSON, which is a human-readable format.
\item Extension ".deprecated" will use a \bold{deprecated} binary format. This format will
not be able to save attributes introduced after v1 of XGBoost, such as the "best_iteration"
attribute that boosters might keep, nor feature names or user-specifiec attributes.
\item If the format is not specified by passing one of the file extensions above, will
default to UBJ.
}}
} }
\description{ \description{
Save xgboost model to a file in binary format. Save xgboost model to a file in binary or JSON format.
} }
\details{ \details{
This methods allows to save a model in an xgboost-internal binary format which is universal This methods allows to save a model in an xgboost-internal binary or text format which is universal
among the various xgboost interfaces. In R, the saved model file could be read-in later among the various xgboost interfaces. In R, the saved model file could be read-in later
using either the \code{\link{xgb.load}} function or the \code{xgb_model} parameter using either the \code{\link{xgb.load}} function or the \code{xgb_model} parameter
of \code{\link{xgb.train}}. of \code{\link{xgb.train}}.
@ -23,7 +35,7 @@ of \code{\link{xgb.train}}.
Note: a model can also be saved as an R-object (e.g., by using \code{\link[base]{readRDS}} Note: a model can also be saved as an R-object (e.g., by using \code{\link[base]{readRDS}}
or \code{\link[base]{save}}). However, it would then only be compatible with R, and or \code{\link[base]{save}}). However, it would then only be compatible with R, and
corresponding R-methods would need to be used to load it. Moreover, persisting the model with corresponding R-methods would need to be used to load it. Moreover, persisting the model with
\code{\link[base]{readRDS}} or \code{\link[base]{save}}) will cause compatibility problems in \code{\link[base]{readRDS}} or \code{\link[base]{save}}) might cause compatibility problems in
future versions of XGBoost. Consult \code{\link{a-compatibility-note-for-saveRDS-save}} to learn future versions of XGBoost. Consult \code{\link{a-compatibility-note-for-saveRDS-save}} to learn
how to persist models in a future-proof way, i.e. to make the model accessible in future how to persist models in a future-proof way, i.e. to make the model accessible in future
releases of XGBoost. releases of XGBoost.
@ -38,16 +50,18 @@ data.table::setDTthreads(nthread)
train <- agaricus.train train <- agaricus.train
test <- agaricus.test test <- agaricus.test
bst <- xgboost( bst <- xgb.train(
data = train$data, label = train$label, max_depth = 2, eta = 1, data = xgb.DMatrix(train$data, label = train$label),
max_depth = 2,
eta = 1,
nthread = nthread, nthread = nthread,
nrounds = 2, nrounds = 2,
objective = "binary:logistic" objective = "binary:logistic"
) )
xgb.save(bst, 'xgb.model') fname <- file.path(tempdir(), "xgb.ubj")
bst <- xgb.load('xgb.model') xgb.save(bst, fname)
if (file.exists('xgb.model')) file.remove('xgb.model') bst <- xgb.load(fname)
} }
\seealso{ \seealso{
\code{\link{xgb.load}}, \code{\link{xgb.Booster.complete}}. \code{\link{xgb.load}}
} }

View File

@ -5,19 +5,17 @@
\title{Save xgboost model to R's raw vector, \title{Save xgboost model to R's raw vector,
user can call xgb.load.raw to load the model back from raw vector} user can call xgb.load.raw to load the model back from raw vector}
\usage{ \usage{
xgb.save.raw(model, raw_format = "deprecated") xgb.save.raw(model, raw_format = "ubj")
} }
\arguments{ \arguments{
\item{model}{the model object.} \item{model}{the model object.}
\item{raw_format}{The format for encoding the booster. Available options are \item{raw_format}{The format for encoding the booster. Available options are
\itemize{ \itemize{
\item \code{json}: Encode the booster into JSON text document. \item \code{json}: Encode the booster into JSON text document.
\item \code{ubj}: Encode the booster into Universal Binary JSON. \item \code{ubj}: Encode the booster into Universal Binary JSON.
\item \code{deprecated}: Encode the booster into old customized binary format. \item \code{deprecated}: Encode the booster into old customized binary format.
} }}
Right now the default is \code{deprecated} but will be changed to \code{ubj} in upcoming release.}
} }
\description{ \description{
Save xgboost model from xgboost or xgb.train Save xgboost model from xgboost or xgb.train
@ -32,8 +30,8 @@ data.table::setDTthreads(nthread)
train <- agaricus.train train <- agaricus.train
test <- agaricus.test test <- agaricus.test
bst <- xgboost(data = train$data, label = train$label, max_depth = 2, bst <- xgb.train(data = xgb.DMatrix(train$data, label = train$label), max_depth = 2,
eta = 1, nthread = nthread, nrounds = 2,objective = "binary:logistic") eta = 1, nthread = nthread, nrounds = 2,objective = "binary:logistic")
raw <- xgb.save.raw(bst) raw <- xgb.save.raw(bst)
bst <- xgb.load.raw(raw) bst <- xgb.load.raw(raw)

View File

@ -1,29 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.serialize.R
\name{xgb.serialize}
\alias{xgb.serialize}
\title{Serialize the booster instance into R's raw vector. The serialization method differs
from \code{\link{xgb.save.raw}} as the latter one saves only the model but not
parameters. This serialization format is not stable across different xgboost versions.}
\usage{
xgb.serialize(booster)
}
\arguments{
\item{booster}{the booster instance}
}
\description{
Serialize the booster instance into R's raw vector. The serialization method differs
from \code{\link{xgb.save.raw}} as the latter one saves only the model but not
parameters. This serialization format is not stable across different xgboost versions.
}
\examples{
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
raw <- xgb.serialize(bst)
bst <- xgb.unserialize(raw)
}

View File

@ -1,55 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.plot.shap.R
\name{xgb.shap.data}
\alias{xgb.shap.data}
\title{Prepare data for SHAP plots. To be used in xgb.plot.shap, xgb.plot.shap.summary, etc.
Internal utility function.}
\usage{
xgb.shap.data(
data,
shap_contrib = NULL,
features = NULL,
top_n = 1,
model = NULL,
trees = NULL,
target_class = NULL,
approxcontrib = FALSE,
subsample = NULL,
max_observations = 1e+05
)
}
\arguments{
\item{data}{data as a \code{matrix} or \code{dgCMatrix}.}
\item{shap_contrib}{a matrix of SHAP contributions that was computed earlier for the above
\code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.}
\item{features}{a vector of either column indices or of feature names to plot. When it is NULL,
feature importance is calculated, and \code{top_n} high ranked features are taken.}
\item{top_n}{when \code{features} is NULL, top_n [1, 100] most important features in a model are taken.}
\item{model}{an \code{xgb.Booster} model. It has to be provided when either \code{shap_contrib}
or \code{features} is missing.}
\item{trees}{passed to \code{\link{xgb.importance}} when \code{features = NULL}.}
\item{target_class}{is only relevant for multiclass models. When it is set to a 0-based class index,
only SHAP contributions for that specific class are used.
If it is not set, SHAP importances are averaged over all classes.}
\item{approxcontrib}{passed to \code{\link{predict.xgb.Booster}} when \code{shap_contrib = NULL}.}
\item{subsample}{a random fraction of data points to use for plotting. When it is NULL,
it is set so that up to 100K data points are used.}
}
\value{
A list containing: 'data', a matrix containing sample observations
and their feature values; 'shap_contrib', a matrix containing the SHAP contribution
values for these observations.
}
\description{
Prepare data for SHAP plots. To be used in xgb.plot.shap, xgb.plot.shap.summary, etc.
Internal utility function.
}
\keyword{internal}

View File

@ -0,0 +1,57 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.Booster.R
\name{xgb.slice.Booster}
\alias{xgb.slice.Booster}
\alias{[.xgb.Booster}
\title{Slice Booster by Rounds}
\usage{
xgb.slice.Booster(
model,
start,
end = xgb.get.num.boosted.rounds(model),
step = 1L
)
\method{[}{xgb.Booster}(x, i)
}
\arguments{
\item{model, x}{A fitted \code{xgb.Booster} object, which is to be sliced by taking only a subset
of its rounds / iterations.}
\item{start}{Start of the slice (base-1 and inclusive, like R's \link{seq}).}
\item{end}{End of the slice (base-1 and inclusive, like R's \link{seq}).
Passing a value of zero here is equivalent to passing the full number of rounds in the
booster object.}
\item{step}{Step size of the slice. Passing '1' will take every round in the sequence defined by
\verb{(start, end)}, while passing '2' will take every second value, and so on.}
\item{i}{The indices - must be an increasing sequence as generated by e.g. \code{seq(...)}.}
}
\value{
A sliced booster object containing only the requested rounds.
}
\description{
Creates a new booster including only a selected range of rounds / iterations
from an existing booster, as given by the sequence \code{seq(start, end, step)}.
}
\details{
Note that any R attributes that the booster might have, will not be copied into
the resulting object.
}
\examples{
data(mtcars)
y <- mtcars$mpg
x <- as.matrix(mtcars[, -1])
dm <- xgb.DMatrix(x, label = y, nthread = 1)
model <- xgb.train(data = dm, params = list(nthread = 1), nrounds = 5)
model_slice <- xgb.slice.Booster(model, 1, 3)
# Prediction for first three rounds
predict(model, x, predleaf = TRUE)[, 1:3]
# The new model has only those rounds, so
# a full prediction from it is equivalent
predict(model_slice, x, predleaf = TRUE)
}

View File

@ -43,111 +43,114 @@ xgboost(
} }
\arguments{ \arguments{
\item{params}{the list of parameters. The complete list of parameters is \item{params}{the list of parameters. The complete list of parameters is
available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below
is a shorter summary: is a shorter summary:
\enumerate{
1. General Parameters \item General Parameters
\itemize{
\item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree}.
} }
2. Booster Parameters \itemize{
\item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree}.
}
\enumerate{
\item Booster Parameters
}
2.1. Parameters for Tree Booster 2.1. Parameters for Tree Booster
\itemize{ \itemize{
\item{ \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} \item{ \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1}
when it is added to the current approximation. when it is added to the current approximation.
Used to prevent overfitting by making the boosting process more conservative. Used to prevent overfitting by making the boosting process more conservative.
Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model
more robust to overfitting but slower to compute. Default: 0.3} more robust to overfitting but slower to compute. Default: 0.3}
\item{ \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. \item{ \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree.
the larger, the more conservative the algorithm will be.} the larger, the more conservative the algorithm will be.}
\item \code{max_depth} maximum depth of a tree. Default: 6 \item \code{max_depth} maximum depth of a tree. Default: 6
\item{\code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. \item{\code{min_child_weight} minimum sum of instance weight (hessian) needed in a child.
If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight,
then the building process will give up further partitioning. then the building process will give up further partitioning.
In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node.
The larger, the more conservative the algorithm will be. Default: 1} The larger, the more conservative the algorithm will be. Default: 1}
\item{ \code{subsample} subsample ratio of the training instance. \item{ \code{subsample} subsample ratio of the training instance.
Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees
and this will prevent overfitting. It makes computation shorter (because less data to analyse). and this will prevent overfitting. It makes computation shorter (because less data to analyse).
It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1} It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1}
\item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1 \item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
\item \code{lambda} L2 regularization term on weights. Default: 1 \item \code{lambda} L2 regularization term on weights. Default: 1
\item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0 \item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
\item{ \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. \item{ \code{num_parallel_tree} Experimental parameter. number of trees to grow per round.
Useful to test Random Forest through XGBoost Useful to test Random Forest through XGBoost
(set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly.
Default: 1} Default: 1}
\item{ \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length \item{ \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length
equals to the number of features in the training data. equals to the number of features in the training data.
\code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.} \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.}
\item{ \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. \item{ \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions.
Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other.
Feature index values should start from \code{0} (\code{0} references the first column). Feature index values should start from \code{0} (\code{0} references the first column).
Leave argument unspecified for no interaction constraints.} Leave argument unspecified for no interaction constraints.}
} }
2.2. Parameters for Linear Booster 2.2. Parameters for Linear Booster
\itemize{ \itemize{
\item \code{lambda} L2 regularization term on weights. Default: 0 \item \code{lambda} L2 regularization term on weights. Default: 0
\item \code{lambda_bias} L2 regularization term on bias. Default: 0 \item \code{lambda_bias} L2 regularization term on bias. Default: 0
\item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0 \item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
}
\enumerate{
\item Task Parameters
} }
3. Task Parameters
\itemize{ \itemize{
\item{ \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. \item{ \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it.
The default objective options are below: The default objective options are below:
\itemize{ \itemize{
\item \code{reg:squarederror} Regression with squared loss (Default). \item \code{reg:squarederror} Regression with squared loss (Default).
\item{ \code{reg:squaredlogerror}: regression with squared log loss \eqn{1/2 * (log(pred + 1) - log(label + 1))^2}. \item{ \code{reg:squaredlogerror}: regression with squared log loss \eqn{1/2 * (log(pred + 1) - log(label + 1))^2}.
All inputs are required to be greater than -1. All inputs are required to be greater than -1.
Also, see metric rmsle for possible issue with this objective.} Also, see metric rmsle for possible issue with this objective.}
\item \code{reg:logistic} logistic regression. \item \code{reg:logistic} logistic regression.
\item \code{reg:pseudohubererror}: regression with Pseudo Huber loss, a twice differentiable alternative to absolute loss. \item \code{reg:pseudohubererror}: regression with Pseudo Huber loss, a twice differentiable alternative to absolute loss.
\item \code{binary:logistic} logistic regression for binary classification. Output probability. \item \code{binary:logistic} logistic regression for binary classification. Output probability.
\item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation. \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
\item \code{binary:hinge}: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities. \item \code{binary:hinge}: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
\item{ \code{count:poisson}: Poisson regression for count data, output mean of Poisson distribution. \item{ \code{count:poisson}: Poisson regression for count data, output mean of Poisson distribution.
\code{max_delta_step} is set to 0.7 by default in poisson regression (used to safeguard optimization).} \code{max_delta_step} is set to 0.7 by default in poisson regression (used to safeguard optimization).}
\item{ \code{survival:cox}: Cox regression for right censored survival time data (negative values are considered right censored). \item{ \code{survival:cox}: Cox regression for right censored survival time data (negative values are considered right censored).
Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional
hazard function \code{h(t) = h0(t) * HR)}.} hazard function \code{h(t) = h0(t) * HR)}.}
\item{ \code{survival:aft}: Accelerated failure time model for censored survival time data. See \item{ \code{survival:aft}: Accelerated failure time model for censored survival time data. See
\href{https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html}{Survival Analysis with Accelerated Failure Time} \href{https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html}{Survival Analysis with Accelerated Failure Time}
for details.} for details.}
\item \code{aft_loss_distribution}: Probability Density Function used by \code{survival:aft} and \code{aft-nloglik} metric. \item \code{aft_loss_distribution}: Probability Density Function used by \code{survival:aft} and \code{aft-nloglik} metric.
\item{ \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. \item{ \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective.
Class is represented by a number and should be from 0 to \code{num_class - 1}.} Class is represented by a number and should be from 0 to \code{num_class - 1}.}
\item{ \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be \item{ \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be
further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging
to each class.} to each class.}
\item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss. \item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
\item{ \code{rank:ndcg}: Use LambdaMART to perform list-wise ranking where \item{ \code{rank:ndcg}: Use LambdaMART to perform list-wise ranking where
\href{https://en.wikipedia.org/wiki/Discounted_cumulative_gain}{Normalized Discounted Cumulative Gain (NDCG)} is maximized.} \href{https://en.wikipedia.org/wiki/Discounted_cumulative_gain}{Normalized Discounted Cumulative Gain (NDCG)} is maximized.}
\item{ \code{rank:map}: Use LambdaMART to perform list-wise ranking where \item{ \code{rank:map}: Use LambdaMART to perform list-wise ranking where
\href{https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision}{Mean Average Precision (MAP)} \href{https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision}{Mean Average Precision (MAP)}
is maximized.} is maximized.}
\item{ \code{reg:gamma}: gamma regression with log-link. \item{ \code{reg:gamma}: gamma regression with log-link.
Output is a mean of gamma distribution. Output is a mean of gamma distribution.
It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be
\href{https://en.wikipedia.org/wiki/Gamma_distribution#Applications}{gamma-distributed}.} \href{https://en.wikipedia.org/wiki/Gamma_distribution#Applications}{gamma-distributed}.}
\item{ \code{reg:tweedie}: Tweedie regression with log-link. \item{ \code{reg:tweedie}: Tweedie regression with log-link.
It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be
\href{https://en.wikipedia.org/wiki/Tweedie_distribution#Applications}{Tweedie-distributed}.} \href{https://en.wikipedia.org/wiki/Tweedie_distribution#Applications}{Tweedie-distributed}.}
} }
} }
\item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5 \item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
\item{ \code{eval_metric} evaluation metrics for validation data. \item{ \code{eval_metric} evaluation metrics for validation data.
Users can pass a self-defined function to it. Users can pass a self-defined function to it.
Default: metric will be assigned according to objective Default: metric will be assigned according to objective
(rmse for regression, and error for classification, mean average precision for ranking). (rmse for regression, and error for classification, mean average precision for ranking).
List is provided in detail section.} List is provided in detail section.}
}} }}
\item{data}{training dataset. \code{xgb.train} accepts only an \code{xgb.DMatrix} as the input. \item{data}{training dataset. \code{xgb.train} accepts only an \code{xgb.DMatrix} as the input.
@ -202,7 +205,12 @@ file with a previously saved model.}
\item{callbacks}{a list of callback functions to perform various task during boosting. \item{callbacks}{a list of callback functions to perform various task during boosting.
See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
parameters' values. User can provide either existing or their own callback methods in order parameters' values. User can provide either existing or their own callback methods in order
to customize the training process.} to customize the training process.
\if{html}{\out{<div class="sourceCode">}}\preformatted{ Note that some callbacks might try to set an evaluation log - be aware that these evaluation logs
are kept as R attributes, and thus do not get saved when using non-R serializaters like
\link{xgb.save} (but are kept when using R serializers like \link{saveRDS}).
}\if{html}{\out{</div>}}}
\item{...}{other parameters to pass to \code{params}.} \item{...}{other parameters to pass to \code{params}.}
@ -216,27 +224,7 @@ This parameter is only used when input is a dense matrix.}
\item{weight}{a vector indicating the weight for each row of the input.} \item{weight}{a vector indicating the weight for each row of the input.}
} }
\value{ \value{
An object of class \code{xgb.Booster} with the following elements: An object of class \code{xgb.Booster}.
\itemize{
\item \code{handle} a handle (pointer) to the xgboost model in memory.
\item \code{raw} a cached memory dump of the xgboost model saved as R's \code{raw} type.
\item \code{niter} number of boosting iterations.
\item \code{evaluation_log} evaluation history stored as a \code{data.table} with the
first column corresponding to iteration number and the rest corresponding to evaluation
metrics' values. It is created by the \code{\link{cb.evaluation.log}} callback.
\item \code{call} a function call.
\item \code{params} parameters that were passed to the xgboost library. Note that it does not
capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
\item \code{callbacks} callback functions that were either automatically assigned or
explicitly passed.
\item \code{best_iteration} iteration number with the best evaluation metric value
(only available with early stopping).
\item \code{best_score} the best evaluation metric value during early stopping.
(only available with early stopping).
\item \code{feature_names} names of the training dataset features
(only when column names were defined in training data).
\item \code{nfeatures} number of features in training data.
}
} }
\description{ \description{
\code{xgb.train} is an advanced interface for training an xgboost model. \code{xgb.train} is an advanced interface for training an xgboost model.
@ -258,30 +246,45 @@ when the \code{eval_metric} parameter is not provided.
User may set one or several \code{eval_metric} parameters. User may set one or several \code{eval_metric} parameters.
Note that when using a customized metric, only this single metric can be used. Note that when using a customized metric, only this single metric can be used.
The following is the list of built-in metrics for which XGBoost provides optimized implementation: The following is the list of built-in metrics for which XGBoost provides optimized implementation:
\itemize{ \itemize{
\item \code{rmse} root mean square error. \url{https://en.wikipedia.org/wiki/Root_mean_square_error} \item \code{rmse} root mean square error. \url{https://en.wikipedia.org/wiki/Root_mean_square_error}
\item \code{logloss} negative log-likelihood. \url{https://en.wikipedia.org/wiki/Log-likelihood} \item \code{logloss} negative log-likelihood. \url{https://en.wikipedia.org/wiki/Log-likelihood}
\item \code{mlogloss} multiclass logloss. \url{https://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html} \item \code{mlogloss} multiclass logloss. \url{https://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html}
\item \code{error} Binary classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}. \item \code{error} Binary classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
By default, it uses the 0.5 threshold for predicted values to define negative and positive instances. By default, it uses the 0.5 threshold for predicted values to define negative and positive instances.
Different threshold (e.g., 0.) could be specified as "error@0." Different threshold (e.g., 0.) could be specified as "error@0."
\item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}. \item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
\item \code{mae} Mean absolute error \item \code{mae} Mean absolute error
\item \code{mape} Mean absolute percentage error \item \code{mape} Mean absolute percentage error
\item{ \code{auc} Area under the curve. \item{ \code{auc} Area under the curve.
\url{https://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.} \url{https://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.}
\item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation. \item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation.
\item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{https://en.wikipedia.org/wiki/NDCG} \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{https://en.wikipedia.org/wiki/NDCG}
} }
The following callbacks are automatically created when certain parameters are set: The following callbacks are automatically created when certain parameters are set:
\itemize{ \itemize{
\item \code{cb.print.evaluation} is turned on when \code{verbose > 0}; \item \code{cb.print.evaluation} is turned on when \code{verbose > 0};
and the \code{print_every_n} parameter is passed to it. and the \code{print_every_n} parameter is passed to it.
\item \code{cb.evaluation.log} is on when \code{watchlist} is present. \item \code{cb.evaluation.log} is on when \code{watchlist} is present.
\item \code{cb.early.stop}: when \code{early_stopping_rounds} is set. \item \code{cb.early.stop}: when \code{early_stopping_rounds} is set.
\item \code{cb.save.model}: when \code{save_period > 0} is set. \item \code{cb.save.model}: when \code{save_period > 0} is set.
} }
Note that objects of type \code{xgb.Booster} as returned by this function behave a bit differently
from typical R objects (it's an 'altrep' list class), and it makes a separation between
internal booster attributes (restricted to jsonifyable data), accessed through \link{xgb.attr}
and shared between interfaces through serialization functions like \link{xgb.save}; and
R-specific attributes, accessed through \link{attributes} and \link{attr}, which are otherwise
only used in the R interface, only kept when using R's serializers like \link{saveRDS}, and
not anyhow used by functions like \link{predict.xgb.Booster}.
Be aware that one such R attribute that is automatically added is \code{params} - this attribute
is assigned from the \code{params} argument to this function, and is only meant to serve as a
reference for what went into the booster, but is not used in other methods that take a booster
object - so for example, changing the booster's configuration requires calling \verb{xgb.config<-}
or 'xgb.parameters<-', while simply modifying \verb{attributes(model)$params$<...>} will have no
effect elsewhere.
} }
\examples{ \examples{
data(agaricus.train, package='xgboost') data(agaricus.train, package='xgboost')
@ -300,9 +303,9 @@ dtest <- with(
watchlist <- list(train = dtrain, eval = dtest) watchlist <- list(train = dtrain, eval = dtest)
## A simple xgb.train example: ## A simple xgb.train example:
param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = nthread, param <- list(max_depth = 2, eta = 1, nthread = nthread,
objective = "binary:logistic", eval_metric = "auc") objective = "binary:logistic", eval_metric = "auc")
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist) bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0)
## An xgb.train example where custom objective and evaluation metric are ## An xgb.train example where custom objective and evaluation metric are
## used: ## used:
@ -321,13 +324,13 @@ evalerror <- function(preds, dtrain) {
# These functions could be used by passing them either: # These functions could be used by passing them either:
# as 'objective' and 'eval_metric' parameters in the params list: # as 'objective' and 'eval_metric' parameters in the params list:
param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = nthread, param <- list(max_depth = 2, eta = 1, nthread = nthread,
objective = logregobj, eval_metric = evalerror) objective = logregobj, eval_metric = evalerror)
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist) bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0)
# or through the ... arguments: # or through the ... arguments:
param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = nthread) param <- list(max_depth = 2, eta = 1, nthread = nthread)
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0,
objective = logregobj, eval_metric = evalerror) objective = logregobj, eval_metric = evalerror)
# or as dedicated 'obj' and 'feval' parameters of xgb.train: # or as dedicated 'obj' and 'feval' parameters of xgb.train:
@ -336,10 +339,10 @@ bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
## An xgb.train example of using variable learning rates at each iteration: ## An xgb.train example of using variable learning rates at each iteration:
param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = nthread, param <- list(max_depth = 2, eta = 1, nthread = nthread,
objective = "binary:logistic", eval_metric = "auc") objective = "binary:logistic", eval_metric = "auc")
my_etas <- list(eta = c(0.5, 0.1)) my_etas <- list(eta = c(0.5, 0.1))
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0,
callbacks = list(cb.reset.parameters(my_etas))) callbacks = list(cb.reset.parameters(my_etas)))
## Early stopping: ## Early stopping:

View File

@ -1,21 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.unserialize.R
\name{xgb.unserialize}
\alias{xgb.unserialize}
\title{Load the instance back from \code{\link{xgb.serialize}}}
\usage{
xgb.unserialize(buffer, handle = NULL)
}
\arguments{
\item{buffer}{the buffer containing booster instance saved by \code{\link{xgb.serialize}}}
\item{handle}{An \code{xgb.Booster.handle} object which will be overwritten with
the new deserialized object. Must be a null handle (e.g. when loading the model through
`readRDS`). If not provided, a new handle will be created.}
}
\value{
An \code{xgb.Booster.handle} object.
}
\description{
Load the instance back from \code{\link{xgb.serialize}}
}

View File

@ -63,6 +63,7 @@ OBJECTS= \
$(PKGROOT)/src/gbm/gblinear.o \ $(PKGROOT)/src/gbm/gblinear.o \
$(PKGROOT)/src/gbm/gblinear_model.o \ $(PKGROOT)/src/gbm/gblinear_model.o \
$(PKGROOT)/src/data/adapter.o \ $(PKGROOT)/src/data/adapter.o \
$(PKGROOT)/src/data/array_interface.o \
$(PKGROOT)/src/data/simple_dmatrix.o \ $(PKGROOT)/src/data/simple_dmatrix.o \
$(PKGROOT)/src/data/data.o \ $(PKGROOT)/src/data/data.o \
$(PKGROOT)/src/data/sparse_page_raw_format.o \ $(PKGROOT)/src/data/sparse_page_raw_format.o \

View File

@ -63,6 +63,7 @@ OBJECTS= \
$(PKGROOT)/src/gbm/gblinear.o \ $(PKGROOT)/src/gbm/gblinear.o \
$(PKGROOT)/src/gbm/gblinear_model.o \ $(PKGROOT)/src/gbm/gblinear_model.o \
$(PKGROOT)/src/data/adapter.o \ $(PKGROOT)/src/data/adapter.o \
$(PKGROOT)/src/data/array_interface.o \
$(PKGROOT)/src/data/simple_dmatrix.o \ $(PKGROOT)/src/data/simple_dmatrix.o \
$(PKGROOT)/src/data/data.o \ $(PKGROOT)/src/data/data.o \
$(PKGROOT)/src/data/sparse_page_raw_format.o \ $(PKGROOT)/src/data/sparse_page_raw_format.o \

View File

@ -15,9 +15,16 @@ Check these declarations against the C/Fortran source code.
*/ */
/* .Call calls */ /* .Call calls */
extern void XGBInitializeAltrepClass_R(DllInfo *info);
extern SEXP XGDuplicate_R(SEXP);
extern SEXP XGPointerEqComparison_R(SEXP, SEXP);
extern SEXP XGBoosterTrainOneIter_R(SEXP, SEXP, SEXP, SEXP, SEXP); extern SEXP XGBoosterTrainOneIter_R(SEXP, SEXP, SEXP, SEXP, SEXP);
extern SEXP XGBoosterCreate_R(SEXP); extern SEXP XGBoosterCreate_R(SEXP);
extern SEXP XGBoosterCreateInEmptyObj_R(SEXP, SEXP); extern SEXP XGBoosterCopyInfoFromDMatrix_R(SEXP, SEXP);
extern SEXP XGBoosterSetStrFeatureInfo_R(SEXP, SEXP, SEXP);
extern SEXP XGBoosterGetStrFeatureInfo_R(SEXP, SEXP);
extern SEXP XGBoosterBoostedRounds_R(SEXP);
extern SEXP XGBoosterGetNumFeature_R(SEXP);
extern SEXP XGBoosterDumpModel_R(SEXP, SEXP, SEXP, SEXP); extern SEXP XGBoosterDumpModel_R(SEXP, SEXP, SEXP, SEXP);
extern SEXP XGBoosterEvalOneIter_R(SEXP, SEXP, SEXP, SEXP); extern SEXP XGBoosterEvalOneIter_R(SEXP, SEXP, SEXP, SEXP);
extern SEXP XGBoosterGetAttrNames_R(SEXP); extern SEXP XGBoosterGetAttrNames_R(SEXP);
@ -35,14 +42,21 @@ extern SEXP XGBoosterSetAttr_R(SEXP, SEXP, SEXP);
extern SEXP XGBoosterSetParam_R(SEXP, SEXP, SEXP); extern SEXP XGBoosterSetParam_R(SEXP, SEXP, SEXP);
extern SEXP XGBoosterUpdateOneIter_R(SEXP, SEXP, SEXP); extern SEXP XGBoosterUpdateOneIter_R(SEXP, SEXP, SEXP);
extern SEXP XGCheckNullPtr_R(SEXP); extern SEXP XGCheckNullPtr_R(SEXP);
extern SEXP XGSetArrayDimInplace_R(SEXP, SEXP);
extern SEXP XGSetArrayDimNamesInplace_R(SEXP, SEXP);
extern SEXP XGDMatrixCreateFromCSC_R(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); extern SEXP XGDMatrixCreateFromCSC_R(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
extern SEXP XGDMatrixCreateFromCSR_R(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); extern SEXP XGDMatrixCreateFromCSR_R(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
extern SEXP XGDMatrixCreateFromFile_R(SEXP, SEXP); extern SEXP XGDMatrixCreateFromFile_R(SEXP, SEXP);
extern SEXP XGDMatrixCreateFromMat_R(SEXP, SEXP, SEXP); extern SEXP XGDMatrixCreateFromMat_R(SEXP, SEXP, SEXP);
extern SEXP XGDMatrixGetInfo_R(SEXP, SEXP); extern SEXP XGDMatrixGetFloatInfo_R(SEXP, SEXP);
extern SEXP XGDMatrixGetUIntInfo_R(SEXP, SEXP);
extern SEXP XGDMatrixCreateFromDF_R(SEXP, SEXP, SEXP);
extern SEXP XGDMatrixGetStrFeatureInfo_R(SEXP, SEXP); extern SEXP XGDMatrixGetStrFeatureInfo_R(SEXP, SEXP);
extern SEXP XGDMatrixNumCol_R(SEXP); extern SEXP XGDMatrixNumCol_R(SEXP);
extern SEXP XGDMatrixNumRow_R(SEXP); extern SEXP XGDMatrixNumRow_R(SEXP);
extern SEXP XGDMatrixGetQuantileCut_R(SEXP);
extern SEXP XGDMatrixNumNonMissing_R(SEXP);
extern SEXP XGDMatrixGetDataAsCSR_R(SEXP);
extern SEXP XGDMatrixSaveBinary_R(SEXP, SEXP, SEXP); extern SEXP XGDMatrixSaveBinary_R(SEXP, SEXP, SEXP);
extern SEXP XGDMatrixSetInfo_R(SEXP, SEXP, SEXP); extern SEXP XGDMatrixSetInfo_R(SEXP, SEXP, SEXP);
extern SEXP XGDMatrixSetStrFeatureInfo_R(SEXP, SEXP, SEXP); extern SEXP XGDMatrixSetStrFeatureInfo_R(SEXP, SEXP, SEXP);
@ -50,11 +64,18 @@ extern SEXP XGDMatrixSliceDMatrix_R(SEXP, SEXP);
extern SEXP XGBSetGlobalConfig_R(SEXP); extern SEXP XGBSetGlobalConfig_R(SEXP);
extern SEXP XGBGetGlobalConfig_R(void); extern SEXP XGBGetGlobalConfig_R(void);
extern SEXP XGBoosterFeatureScore_R(SEXP, SEXP); extern SEXP XGBoosterFeatureScore_R(SEXP, SEXP);
extern SEXP XGBoosterSlice_R(SEXP, SEXP, SEXP, SEXP);
static const R_CallMethodDef CallEntries[] = { static const R_CallMethodDef CallEntries[] = {
{"XGBoosterBoostOneIter_R", (DL_FUNC) &XGBoosterTrainOneIter_R, 5}, {"XGDuplicate_R", (DL_FUNC) &XGDuplicate_R, 1},
{"XGPointerEqComparison_R", (DL_FUNC) &XGPointerEqComparison_R, 2},
{"XGBoosterTrainOneIter_R", (DL_FUNC) &XGBoosterTrainOneIter_R, 5},
{"XGBoosterCreate_R", (DL_FUNC) &XGBoosterCreate_R, 1}, {"XGBoosterCreate_R", (DL_FUNC) &XGBoosterCreate_R, 1},
{"XGBoosterCreateInEmptyObj_R", (DL_FUNC) &XGBoosterCreateInEmptyObj_R, 2}, {"XGBoosterCopyInfoFromDMatrix_R", (DL_FUNC) &XGBoosterCopyInfoFromDMatrix_R, 2},
{"XGBoosterSetStrFeatureInfo_R",(DL_FUNC) &XGBoosterSetStrFeatureInfo_R,3}, // NOLINT
{"XGBoosterGetStrFeatureInfo_R",(DL_FUNC) &XGBoosterGetStrFeatureInfo_R,2}, // NOLINT
{"XGBoosterBoostedRounds_R", (DL_FUNC) &XGBoosterBoostedRounds_R, 1},
{"XGBoosterGetNumFeature_R", (DL_FUNC) &XGBoosterGetNumFeature_R, 1},
{"XGBoosterDumpModel_R", (DL_FUNC) &XGBoosterDumpModel_R, 4}, {"XGBoosterDumpModel_R", (DL_FUNC) &XGBoosterDumpModel_R, 4},
{"XGBoosterEvalOneIter_R", (DL_FUNC) &XGBoosterEvalOneIter_R, 4}, {"XGBoosterEvalOneIter_R", (DL_FUNC) &XGBoosterEvalOneIter_R, 4},
{"XGBoosterGetAttrNames_R", (DL_FUNC) &XGBoosterGetAttrNames_R, 1}, {"XGBoosterGetAttrNames_R", (DL_FUNC) &XGBoosterGetAttrNames_R, 1},
@ -72,14 +93,21 @@ static const R_CallMethodDef CallEntries[] = {
{"XGBoosterSetParam_R", (DL_FUNC) &XGBoosterSetParam_R, 3}, {"XGBoosterSetParam_R", (DL_FUNC) &XGBoosterSetParam_R, 3},
{"XGBoosterUpdateOneIter_R", (DL_FUNC) &XGBoosterUpdateOneIter_R, 3}, {"XGBoosterUpdateOneIter_R", (DL_FUNC) &XGBoosterUpdateOneIter_R, 3},
{"XGCheckNullPtr_R", (DL_FUNC) &XGCheckNullPtr_R, 1}, {"XGCheckNullPtr_R", (DL_FUNC) &XGCheckNullPtr_R, 1},
{"XGSetArrayDimInplace_R", (DL_FUNC) &XGSetArrayDimInplace_R, 2},
{"XGSetArrayDimNamesInplace_R", (DL_FUNC) &XGSetArrayDimNamesInplace_R, 2},
{"XGDMatrixCreateFromCSC_R", (DL_FUNC) &XGDMatrixCreateFromCSC_R, 6}, {"XGDMatrixCreateFromCSC_R", (DL_FUNC) &XGDMatrixCreateFromCSC_R, 6},
{"XGDMatrixCreateFromCSR_R", (DL_FUNC) &XGDMatrixCreateFromCSR_R, 6}, {"XGDMatrixCreateFromCSR_R", (DL_FUNC) &XGDMatrixCreateFromCSR_R, 6},
{"XGDMatrixCreateFromFile_R", (DL_FUNC) &XGDMatrixCreateFromFile_R, 2}, {"XGDMatrixCreateFromFile_R", (DL_FUNC) &XGDMatrixCreateFromFile_R, 2},
{"XGDMatrixCreateFromMat_R", (DL_FUNC) &XGDMatrixCreateFromMat_R, 3}, {"XGDMatrixCreateFromMat_R", (DL_FUNC) &XGDMatrixCreateFromMat_R, 3},
{"XGDMatrixGetInfo_R", (DL_FUNC) &XGDMatrixGetInfo_R, 2}, {"XGDMatrixGetFloatInfo_R", (DL_FUNC) &XGDMatrixGetFloatInfo_R, 2},
{"XGDMatrixGetUIntInfo_R", (DL_FUNC) &XGDMatrixGetUIntInfo_R, 2},
{"XGDMatrixCreateFromDF_R", (DL_FUNC) &XGDMatrixCreateFromDF_R, 3},
{"XGDMatrixGetStrFeatureInfo_R", (DL_FUNC) &XGDMatrixGetStrFeatureInfo_R, 2}, {"XGDMatrixGetStrFeatureInfo_R", (DL_FUNC) &XGDMatrixGetStrFeatureInfo_R, 2},
{"XGDMatrixNumCol_R", (DL_FUNC) &XGDMatrixNumCol_R, 1}, {"XGDMatrixNumCol_R", (DL_FUNC) &XGDMatrixNumCol_R, 1},
{"XGDMatrixNumRow_R", (DL_FUNC) &XGDMatrixNumRow_R, 1}, {"XGDMatrixNumRow_R", (DL_FUNC) &XGDMatrixNumRow_R, 1},
{"XGDMatrixGetQuantileCut_R", (DL_FUNC) &XGDMatrixGetQuantileCut_R, 1},
{"XGDMatrixNumNonMissing_R", (DL_FUNC) &XGDMatrixNumNonMissing_R, 1},
{"XGDMatrixGetDataAsCSR_R", (DL_FUNC) &XGDMatrixGetDataAsCSR_R, 1},
{"XGDMatrixSaveBinary_R", (DL_FUNC) &XGDMatrixSaveBinary_R, 3}, {"XGDMatrixSaveBinary_R", (DL_FUNC) &XGDMatrixSaveBinary_R, 3},
{"XGDMatrixSetInfo_R", (DL_FUNC) &XGDMatrixSetInfo_R, 3}, {"XGDMatrixSetInfo_R", (DL_FUNC) &XGDMatrixSetInfo_R, 3},
{"XGDMatrixSetStrFeatureInfo_R", (DL_FUNC) &XGDMatrixSetStrFeatureInfo_R, 3}, {"XGDMatrixSetStrFeatureInfo_R", (DL_FUNC) &XGDMatrixSetStrFeatureInfo_R, 3},
@ -87,6 +115,7 @@ static const R_CallMethodDef CallEntries[] = {
{"XGBSetGlobalConfig_R", (DL_FUNC) &XGBSetGlobalConfig_R, 1}, {"XGBSetGlobalConfig_R", (DL_FUNC) &XGBSetGlobalConfig_R, 1},
{"XGBGetGlobalConfig_R", (DL_FUNC) &XGBGetGlobalConfig_R, 0}, {"XGBGetGlobalConfig_R", (DL_FUNC) &XGBGetGlobalConfig_R, 0},
{"XGBoosterFeatureScore_R", (DL_FUNC) &XGBoosterFeatureScore_R, 2}, {"XGBoosterFeatureScore_R", (DL_FUNC) &XGBoosterFeatureScore_R, 2},
{"XGBoosterSlice_R", (DL_FUNC) &XGBoosterSlice_R, 4},
{NULL, NULL, 0} {NULL, NULL, 0}
}; };
@ -96,4 +125,5 @@ __declspec(dllexport)
void attribute_visible R_init_xgboost(DllInfo *dll) { void attribute_visible R_init_xgboost(DllInfo *dll) {
R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_registerRoutines(dll, NULL, CallEntries, NULL, NULL);
R_useDynamicSymbols(dll, FALSE); R_useDynamicSymbols(dll, FALSE);
XGBInitializeAltrepClass_R(dll);
} }

File diff suppressed because it is too large Load Diff

View File

@ -8,7 +8,9 @@
#define XGBOOST_R_H_ // NOLINT(*) #define XGBOOST_R_H_ // NOLINT(*)
#include <R.h>
#include <Rinternals.h> #include <Rinternals.h>
#include <R_ext/Altrep.h>
#include <R_ext/Random.h> #include <R_ext/Random.h>
#include <Rmath.h> #include <Rmath.h>
@ -21,6 +23,22 @@
*/ */
XGB_DLL SEXP XGCheckNullPtr_R(SEXP handle); XGB_DLL SEXP XGCheckNullPtr_R(SEXP handle);
/*!
* \brief set the dimensions of an array in-place
* \param arr
* \param dims dimensions to set to the array
* \return NULL value
*/
XGB_DLL SEXP XGSetArrayDimInplace_R(SEXP arr, SEXP dims);
/*!
* \brief set the names of the dimensions of an array in-place
* \param arr
* \param dim_names names for the dimensions to set
* \return NULL value
*/
XGB_DLL SEXP XGSetArrayDimNamesInplace_R(SEXP arr, SEXP dim_names);
/*! /*!
* \brief Set global configuration * \brief Set global configuration
* \param json_str a JSON string representing the list of key-value pairs * \param json_str a JSON string representing the list of key-value pairs
@ -53,6 +71,16 @@ XGB_DLL SEXP XGDMatrixCreateFromFile_R(SEXP fname, SEXP silent);
XGB_DLL SEXP XGDMatrixCreateFromMat_R(SEXP mat, XGB_DLL SEXP XGDMatrixCreateFromMat_R(SEXP mat,
SEXP missing, SEXP missing,
SEXP n_threads); SEXP n_threads);
/**
* @brief Create matrix content from a data frame.
* @param data R data.frame object
* @param missing which value to represent missing value
* @param n_threads Number of threads used to construct DMatrix from dense matrix.
* @return created dmatrix
*/
XGB_DLL SEXP XGDMatrixCreateFromDF_R(SEXP df, SEXP missing, SEXP n_threads);
/*! /*!
* \brief create a matrix content from CSC format * \brief create a matrix content from CSC format
* \param indptr pointer to column headers * \param indptr pointer to column headers
@ -106,12 +134,20 @@ XGB_DLL SEXP XGDMatrixSaveBinary_R(SEXP handle, SEXP fname, SEXP silent);
XGB_DLL SEXP XGDMatrixSetInfo_R(SEXP handle, SEXP field, SEXP array); XGB_DLL SEXP XGDMatrixSetInfo_R(SEXP handle, SEXP field, SEXP array);
/*! /*!
* \brief get info vector from matrix * \brief get info vector (float type) from matrix
* \param handle a instance of data matrix * \param handle a instance of data matrix
* \param field field name * \param field field name
* \return info vector * \return info vector
*/ */
XGB_DLL SEXP XGDMatrixGetInfo_R(SEXP handle, SEXP field); XGB_DLL SEXP XGDMatrixGetFloatInfo_R(SEXP handle, SEXP field);
/*!
* \brief get info vector (uint type) from matrix
* \param handle a instance of data matrix
* \param field field name
* \return info vector
*/
XGB_DLL SEXP XGDMatrixGetUIntInfo_R(SEXP handle, SEXP field);
/*! /*!
* \brief return number of rows * \brief return number of rows
@ -125,19 +161,87 @@ XGB_DLL SEXP XGDMatrixNumRow_R(SEXP handle);
*/ */
XGB_DLL SEXP XGDMatrixNumCol_R(SEXP handle); XGB_DLL SEXP XGDMatrixNumCol_R(SEXP handle);
/*!
* \brief Call R C-level function 'duplicate'
* \param obj Object to duplicate
*/
XGB_DLL SEXP XGDuplicate_R(SEXP obj);
/*!
* \brief Equality comparison for two pointers
* \param obj1 R 'externalptr'
* \param obj2 R 'externalptr'
*/
XGB_DLL SEXP XGPointerEqComparison_R(SEXP obj1, SEXP obj2);
/*!
* \brief Register the Altrep class used for the booster
* \param dll DLL info as provided by R_init
*/
XGB_DLL void XGBInitializeAltrepClass_R(DllInfo *dll);
/*!
* \brief return the quantile cuts used for the histogram method
* \param handle an instance of data matrix
* \return A list with entries 'indptr' and 'data'
*/
XGB_DLL SEXP XGDMatrixGetQuantileCut_R(SEXP handle);
/*!
* \brief get the number of non-missing entries in a dmatrix
* \param handle an instance of data matrix
* \return the number of non-missing entries
*/
XGB_DLL SEXP XGDMatrixNumNonMissing_R(SEXP handle);
/*!
* \brief get the data in a dmatrix in CSR format
* \param handle an instance of data matrix
* \return R list with the following entries in this order:
* - 'indptr
* - 'indices
* - 'data'
* - 'ncol'
*/
XGB_DLL SEXP XGDMatrixGetDataAsCSR_R(SEXP handle);
/*! /*!
* \brief create xgboost learner * \brief create xgboost learner
* \param dmats a list of dmatrix handles that will be cached * \param dmats a list of dmatrix handles that will be cached
*/ */
XGB_DLL SEXP XGBoosterCreate_R(SEXP dmats); XGB_DLL SEXP XGBoosterCreate_R(SEXP dmats);
/*!
* \brief copy information about features from a DMatrix into a Booster
* \param booster R 'externalptr' pointing to a booster object
* \param dmat R 'externalptr' pointing to a DMatrix object
*/
XGB_DLL SEXP XGBoosterCopyInfoFromDMatrix_R(SEXP booster, SEXP dmat);
/*! /*!
* \brief create xgboost learner, saving the pointer into an existing R object * \brief handle R 'externalptr' holding the booster object
* \param dmats a list of dmatrix handles that will be cached * \param field field name
* \param R_handle a clean R external pointer (not holding any object) * \param features features to set for the field
*/ */
XGB_DLL SEXP XGBoosterCreateInEmptyObj_R(SEXP dmats, SEXP R_handle); XGB_DLL SEXP XGBoosterSetStrFeatureInfo_R(SEXP handle, SEXP field, SEXP features);
/*!
* \brief handle R 'externalptr' holding the booster object
* \param field field name
*/
XGB_DLL SEXP XGBoosterGetStrFeatureInfo_R(SEXP handle, SEXP field);
/*!
* \brief Get the number of boosted rounds from a model
* \param handle R 'externalptr' holding the booster object
*/
XGB_DLL SEXP XGBoosterBoostedRounds_R(SEXP handle);
/*!
* \brief Get the number of features to which the model was fitted
* \param handle R 'externalptr' holding the booster object
*/
XGB_DLL SEXP XGBoosterGetNumFeature_R(SEXP handle);
/*! /*!
* \brief set parameters * \brief set parameters
@ -298,4 +402,14 @@ XGB_DLL SEXP XGBoosterGetAttrNames_R(SEXP handle);
*/ */
XGB_DLL SEXP XGBoosterFeatureScore_R(SEXP handle, SEXP json_config); XGB_DLL SEXP XGBoosterFeatureScore_R(SEXP handle, SEXP json_config);
/*!
* \brief Slice a fitted booster model (by rounds)
* \param handle handle to the fitted booster
* \param begin_layer start of the slice
* \param end_later end of the slice; end_layer=0 is equivalent to end_layer=num_boost_round
* \param step step size of the slice
* \return The sliced booster with the requested rounds only
*/
XGB_DLL SEXP XGBoosterSlice_R(SEXP handle, SEXP begin_layer, SEXP end_layer, SEXP step);
#endif // XGBOOST_WRAPPER_R_H_ // NOLINT(*) #endif // XGBOOST_WRAPPER_R_H_ // NOLINT(*)

View File

@ -17,7 +17,11 @@ namespace xgboost {
ConsoleLogger::~ConsoleLogger() { ConsoleLogger::~ConsoleLogger() {
if (cur_verbosity_ == LogVerbosity::kIgnore || if (cur_verbosity_ == LogVerbosity::kIgnore ||
cur_verbosity_ <= GlobalVerbosity()) { cur_verbosity_ <= GlobalVerbosity()) {
dmlc::CustomLogMessage::Log(log_stream_.str()); if (cur_verbosity_ == LogVerbosity::kWarning) {
REprintf("%s\n", log_stream_.str().c_str());
} else {
dmlc::CustomLogMessage::Log(log_stream_.str());
}
} }
} }
TrackerLogger::~TrackerLogger() { TrackerLogger::~TrackerLogger() {

View File

@ -3,7 +3,6 @@
## inconsistent is found. ## inconsistent is found.
pkgs <- c( pkgs <- c(
## CI ## CI
"caret",
"pkgbuild", "pkgbuild",
"roxygen2", "roxygen2",
"XML", "XML",

View File

@ -16,31 +16,28 @@ n_threads <- 1
test_that("train and predict binary classification", { test_that("train and predict binary classification", {
nrounds <- 2 nrounds <- 2
expect_output( expect_output(
bst <- xgboost( bst <- xgb.train(
data = train$data, label = train$label, max_depth = 2, data = xgb.DMatrix(train$data, label = train$label), max_depth = 2,
eta = 1, nthread = n_threads, nrounds = nrounds, eta = 1, nthread = n_threads, nrounds = nrounds,
objective = "binary:logistic", eval_metric = "error" objective = "binary:logistic", eval_metric = "error",
watchlist = list(train = xgb.DMatrix(train$data, label = train$label))
), ),
"train-error" "train-error"
) )
expect_equal(class(bst), "xgb.Booster") expect_equal(class(bst), "xgb.Booster")
expect_equal(bst$niter, nrounds) expect_equal(xgb.get.num.boosted.rounds(bst), nrounds)
expect_false(is.null(bst$evaluation_log)) expect_false(is.null(attributes(bst)$evaluation_log))
expect_equal(nrow(bst$evaluation_log), nrounds) expect_equal(nrow(attributes(bst)$evaluation_log), nrounds)
expect_lt(bst$evaluation_log[, min(train_error)], 0.03) expect_lt(attributes(bst)$evaluation_log[, min(train_error)], 0.03)
pred <- predict(bst, test$data) pred <- predict(bst, test$data)
expect_length(pred, 1611) expect_length(pred, 1611)
pred1 <- predict(bst, train$data, ntreelimit = 1) pred1 <- predict(bst, train$data, iterationrange = c(1, 1))
expect_length(pred1, 6513) expect_length(pred1, 6513)
err_pred1 <- sum((pred1 > 0.5) != train$label) / length(train$label) err_pred1 <- sum((pred1 > 0.5) != train$label) / length(train$label)
err_log <- bst$evaluation_log[1, train_error] err_log <- attributes(bst)$evaluation_log[1, train_error]
expect_lt(abs(err_pred1 - err_log), 10e-6) expect_lt(abs(err_pred1 - err_log), 10e-6)
pred2 <- predict(bst, train$data, iterationrange = c(1, 2))
expect_length(pred1, 6513)
expect_equal(pred1, pred2)
}) })
test_that("parameter validation works", { test_that("parameter validation works", {
@ -56,7 +53,7 @@ test_that("parameter validation works", {
y <- d[, "x1"] + d[, "x2"]^2 + y <- d[, "x1"] + d[, "x2"]^2 +
ifelse(d[, "x3"] > .5, d[, "x3"]^2, 2^d[, "x3"]) + ifelse(d[, "x3"] > .5, d[, "x3"]^2, 2^d[, "x3"]) +
rnorm(10) rnorm(10)
dtrain <- xgb.DMatrix(data = d, info = list(label = y), nthread = n_threads) dtrain <- xgb.DMatrix(data = d, label = y, nthread = n_threads)
correct <- function() { correct <- function() {
params <- list( params <- list(
@ -82,7 +79,8 @@ test_that("parameter validation works", {
bar = "foo" bar = "foo"
) )
output <- capture.output( output <- capture.output(
xgb.train(params = params, data = dtrain, nrounds = nrounds) xgb.train(params = params, data = dtrain, nrounds = nrounds),
type = "message"
) )
print(output) print(output)
} }
@ -104,9 +102,8 @@ test_that("dart prediction works", {
rnorm(100) rnorm(100)
set.seed(1994) set.seed(1994)
booster_by_xgboost <- xgboost( booster_by_xgboost <- xgb.train(
data = d, data = xgb.DMatrix(d, label = y),
label = y,
max_depth = 2, max_depth = 2,
booster = "dart", booster = "dart",
rate_drop = 0.5, rate_drop = 0.5,
@ -116,15 +113,15 @@ test_that("dart prediction works", {
nrounds = nrounds, nrounds = nrounds,
objective = "reg:squarederror" objective = "reg:squarederror"
) )
pred_by_xgboost_0 <- predict(booster_by_xgboost, newdata = d, ntreelimit = 0) pred_by_xgboost_0 <- predict(booster_by_xgboost, newdata = d, iterationrange = NULL)
pred_by_xgboost_1 <- predict(booster_by_xgboost, newdata = d, ntreelimit = nrounds) pred_by_xgboost_1 <- predict(booster_by_xgboost, newdata = d, iterationrange = c(1, nrounds))
expect_true(all(matrix(pred_by_xgboost_0, byrow = TRUE) == matrix(pred_by_xgboost_1, byrow = TRUE))) expect_true(all(matrix(pred_by_xgboost_0, byrow = TRUE) == matrix(pred_by_xgboost_1, byrow = TRUE)))
pred_by_xgboost_2 <- predict(booster_by_xgboost, newdata = d, training = TRUE) pred_by_xgboost_2 <- predict(booster_by_xgboost, newdata = d, training = TRUE)
expect_false(all(matrix(pred_by_xgboost_0, byrow = TRUE) == matrix(pred_by_xgboost_2, byrow = TRUE))) expect_false(all(matrix(pred_by_xgboost_0, byrow = TRUE) == matrix(pred_by_xgboost_2, byrow = TRUE)))
set.seed(1994) set.seed(1994)
dtrain <- xgb.DMatrix(data = d, info = list(label = y), nthread = n_threads) dtrain <- xgb.DMatrix(data = d, label = y, nthread = n_threads)
booster_by_train <- xgb.train( booster_by_train <- xgb.train(
params = list( params = list(
booster = "dart", booster = "dart",
@ -138,8 +135,8 @@ test_that("dart prediction works", {
data = dtrain, data = dtrain,
nrounds = nrounds nrounds = nrounds
) )
pred_by_train_0 <- predict(booster_by_train, newdata = dtrain, ntreelimit = 0) pred_by_train_0 <- predict(booster_by_train, newdata = dtrain, iterationrange = NULL)
pred_by_train_1 <- predict(booster_by_train, newdata = dtrain, ntreelimit = nrounds) pred_by_train_1 <- predict(booster_by_train, newdata = dtrain, iterationrange = c(1, nrounds))
pred_by_train_2 <- predict(booster_by_train, newdata = dtrain, training = TRUE) pred_by_train_2 <- predict(booster_by_train, newdata = dtrain, training = TRUE)
expect_true(all(matrix(pred_by_train_0, byrow = TRUE) == matrix(pred_by_xgboost_0, byrow = TRUE))) expect_true(all(matrix(pred_by_train_0, byrow = TRUE) == matrix(pred_by_xgboost_0, byrow = TRUE)))
@ -151,16 +148,17 @@ test_that("train and predict softprob", {
lb <- as.numeric(iris$Species) - 1 lb <- as.numeric(iris$Species) - 1
set.seed(11) set.seed(11)
expect_output( expect_output(
bst <- xgboost( bst <- xgb.train(
data = as.matrix(iris[, -5]), label = lb, data = xgb.DMatrix(as.matrix(iris[, -5]), label = lb),
max_depth = 3, eta = 0.5, nthread = n_threads, nrounds = 5, max_depth = 3, eta = 0.5, nthread = n_threads, nrounds = 5,
objective = "multi:softprob", num_class = 3, eval_metric = "merror" objective = "multi:softprob", num_class = 3, eval_metric = "merror",
watchlist = list(train = xgb.DMatrix(as.matrix(iris[, -5]), label = lb))
), ),
"train-merror" "train-merror"
) )
expect_false(is.null(bst$evaluation_log)) expect_false(is.null(attributes(bst)$evaluation_log))
expect_lt(bst$evaluation_log[, min(train_merror)], 0.025) expect_lt(attributes(bst)$evaluation_log[, min(train_merror)], 0.025)
expect_equal(bst$niter * 3, xgb.ntree(bst)) expect_equal(xgb.get.num.boosted.rounds(bst), 5)
pred <- predict(bst, as.matrix(iris[, -5])) pred <- predict(bst, as.matrix(iris[, -5]))
expect_length(pred, nrow(iris) * 3) expect_length(pred, nrow(iris) * 3)
# row sums add up to total probability of 1: # row sums add up to total probability of 1:
@ -170,14 +168,14 @@ test_that("train and predict softprob", {
expect_equal(as.numeric(t(mpred)), pred) expect_equal(as.numeric(t(mpred)), pred)
pred_labels <- max.col(mpred) - 1 pred_labels <- max.col(mpred) - 1
err <- sum(pred_labels != lb) / length(lb) err <- sum(pred_labels != lb) / length(lb)
expect_equal(bst$evaluation_log[5, train_merror], err, tolerance = 5e-6) expect_equal(attributes(bst)$evaluation_log[5, train_merror], err, tolerance = 5e-6)
# manually calculate error at the 1st iteration: # manually calculate error at the 1st iteration:
mpred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE, ntreelimit = 1) mpred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE, iterationrange = c(1, 1))
pred_labels <- max.col(mpred) - 1 pred_labels <- max.col(mpred) - 1
err <- sum(pred_labels != lb) / length(lb) err <- sum(pred_labels != lb) / length(lb)
expect_equal(bst$evaluation_log[1, train_merror], err, tolerance = 5e-6) expect_equal(attributes(bst)$evaluation_log[1, train_merror], err, tolerance = 5e-6)
mpred1 <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE, iterationrange = c(1, 2)) mpred1 <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE, iterationrange = c(1, 1))
expect_equal(mpred, mpred1) expect_equal(mpred, mpred1)
d <- cbind( d <- cbind(
@ -186,7 +184,7 @@ test_that("train and predict softprob", {
x3 = rnorm(100) x3 = rnorm(100)
) )
y <- sample.int(10, 100, replace = TRUE) - 1 y <- sample.int(10, 100, replace = TRUE) - 1
dtrain <- xgb.DMatrix(data = d, info = list(label = y), nthread = n_threads) dtrain <- xgb.DMatrix(data = d, label = y, nthread = n_threads)
booster <- xgb.train( booster <- xgb.train(
params = list(tree_method = "hist", nthread = n_threads), params = list(tree_method = "hist", nthread = n_threads),
data = dtrain, nrounds = 4, num_class = 10, data = dtrain, nrounds = 4, num_class = 10,
@ -201,97 +199,97 @@ test_that("train and predict softmax", {
lb <- as.numeric(iris$Species) - 1 lb <- as.numeric(iris$Species) - 1
set.seed(11) set.seed(11)
expect_output( expect_output(
bst <- xgboost( bst <- xgb.train(
data = as.matrix(iris[, -5]), label = lb, data = xgb.DMatrix(as.matrix(iris[, -5]), label = lb),
max_depth = 3, eta = 0.5, nthread = n_threads, nrounds = 5, max_depth = 3, eta = 0.5, nthread = n_threads, nrounds = 5,
objective = "multi:softmax", num_class = 3, eval_metric = "merror" objective = "multi:softmax", num_class = 3, eval_metric = "merror",
watchlist = list(train = xgb.DMatrix(as.matrix(iris[, -5]), label = lb))
), ),
"train-merror" "train-merror"
) )
expect_false(is.null(bst$evaluation_log)) expect_false(is.null(attributes(bst)$evaluation_log))
expect_lt(bst$evaluation_log[, min(train_merror)], 0.025) expect_lt(attributes(bst)$evaluation_log[, min(train_merror)], 0.025)
expect_equal(bst$niter * 3, xgb.ntree(bst)) expect_equal(xgb.get.num.boosted.rounds(bst), 5)
pred <- predict(bst, as.matrix(iris[, -5])) pred <- predict(bst, as.matrix(iris[, -5]))
expect_length(pred, nrow(iris)) expect_length(pred, nrow(iris))
err <- sum(pred != lb) / length(lb) err <- sum(pred != lb) / length(lb)
expect_equal(bst$evaluation_log[5, train_merror], err, tolerance = 5e-6) expect_equal(attributes(bst)$evaluation_log[5, train_merror], err, tolerance = 5e-6)
}) })
test_that("train and predict RF", { test_that("train and predict RF", {
set.seed(11) set.seed(11)
lb <- train$label lb <- train$label
# single iteration # single iteration
bst <- xgboost( bst <- xgb.train(
data = train$data, label = lb, max_depth = 5, data = xgb.DMatrix(train$data, label = lb), max_depth = 5,
nthread = n_threads, nthread = n_threads,
nrounds = 1, objective = "binary:logistic", eval_metric = "error", nrounds = 1, objective = "binary:logistic", eval_metric = "error",
num_parallel_tree = 20, subsample = 0.6, colsample_bytree = 0.1 num_parallel_tree = 20, subsample = 0.6, colsample_bytree = 0.1,
watchlist = list(train = xgb.DMatrix(train$data, label = lb))
) )
expect_equal(bst$niter, 1) expect_equal(xgb.get.num.boosted.rounds(bst), 1)
expect_equal(xgb.ntree(bst), 20)
pred <- predict(bst, train$data) pred <- predict(bst, train$data)
pred_err <- sum((pred > 0.5) != lb) / length(lb) pred_err <- sum((pred > 0.5) != lb) / length(lb)
expect_lt(abs(bst$evaluation_log[1, train_error] - pred_err), 10e-6) expect_lt(abs(attributes(bst)$evaluation_log[1, train_error] - pred_err), 10e-6)
# expect_lt(pred_err, 0.03) # expect_lt(pred_err, 0.03)
pred <- predict(bst, train$data, ntreelimit = 20) pred <- predict(bst, train$data, iterationrange = c(1, 1))
pred_err_20 <- sum((pred > 0.5) != lb) / length(lb) pred_err_20 <- sum((pred > 0.5) != lb) / length(lb)
expect_equal(pred_err_20, pred_err) expect_equal(pred_err_20, pred_err)
pred1 <- predict(bst, train$data, iterationrange = c(1, 2))
expect_equal(pred, pred1)
}) })
test_that("train and predict RF with softprob", { test_that("train and predict RF with softprob", {
lb <- as.numeric(iris$Species) - 1 lb <- as.numeric(iris$Species) - 1
nrounds <- 15 nrounds <- 15
set.seed(11) set.seed(11)
bst <- xgboost( bst <- xgb.train(
data = as.matrix(iris[, -5]), label = lb, data = xgb.DMatrix(as.matrix(iris[, -5]), label = lb),
max_depth = 3, eta = 0.9, nthread = n_threads, nrounds = nrounds, max_depth = 3, eta = 0.9, nthread = n_threads, nrounds = nrounds,
objective = "multi:softprob", eval_metric = "merror", objective = "multi:softprob", eval_metric = "merror",
num_class = 3, verbose = 0, num_class = 3, verbose = 0,
num_parallel_tree = 4, subsample = 0.5, colsample_bytree = 0.5 num_parallel_tree = 4, subsample = 0.5, colsample_bytree = 0.5,
watchlist = list(train = xgb.DMatrix(as.matrix(iris[, -5]), label = lb))
) )
expect_equal(bst$niter, 15) expect_equal(xgb.get.num.boosted.rounds(bst), 15)
expect_equal(xgb.ntree(bst), 15 * 3 * 4)
# predict for all iterations: # predict for all iterations:
pred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE) pred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE)
expect_equal(dim(pred), c(nrow(iris), 3)) expect_equal(dim(pred), c(nrow(iris), 3))
pred_labels <- max.col(pred) - 1 pred_labels <- max.col(pred) - 1
err <- sum(pred_labels != lb) / length(lb) err <- sum(pred_labels != lb) / length(lb)
expect_equal(bst$evaluation_log[nrounds, train_merror], err, tolerance = 5e-6) expect_equal(attributes(bst)$evaluation_log[nrounds, train_merror], err, tolerance = 5e-6)
# predict for 7 iterations and adjust for 4 parallel trees per iteration # predict for 7 iterations and adjust for 4 parallel trees per iteration
pred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE, ntreelimit = 7 * 4) pred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE, iterationrange = c(1, 7))
err <- sum((max.col(pred) - 1) != lb) / length(lb) err <- sum((max.col(pred) - 1) != lb) / length(lb)
expect_equal(bst$evaluation_log[7, train_merror], err, tolerance = 5e-6) expect_equal(attributes(bst)$evaluation_log[7, train_merror], err, tolerance = 5e-6)
}) })
test_that("use of multiple eval metrics works", { test_that("use of multiple eval metrics works", {
expect_output( expect_output(
bst <- xgboost( bst <- xgb.train(
data = train$data, label = train$label, max_depth = 2, data = xgb.DMatrix(train$data, label = train$label), max_depth = 2,
eta = 1, nthread = n_threads, nrounds = 2, objective = "binary:logistic", eta = 1, nthread = n_threads, nrounds = 2, objective = "binary:logistic",
eval_metric = "error", eval_metric = "auc", eval_metric = "logloss" eval_metric = "error", eval_metric = "auc", eval_metric = "logloss",
watchlist = list(train = xgb.DMatrix(train$data, label = train$label))
), ),
"train-error.*train-auc.*train-logloss" "train-error.*train-auc.*train-logloss"
) )
expect_false(is.null(bst$evaluation_log)) expect_false(is.null(attributes(bst)$evaluation_log))
expect_equal(dim(bst$evaluation_log), c(2, 4)) expect_equal(dim(attributes(bst)$evaluation_log), c(2, 4))
expect_equal(colnames(bst$evaluation_log), c("iter", "train_error", "train_auc", "train_logloss")) expect_equal(colnames(attributes(bst)$evaluation_log), c("iter", "train_error", "train_auc", "train_logloss"))
expect_output( expect_output(
bst2 <- xgboost( bst2 <- xgb.train(
data = train$data, label = train$label, max_depth = 2, data = xgb.DMatrix(train$data, label = train$label), max_depth = 2,
eta = 1, nthread = n_threads, nrounds = 2, objective = "binary:logistic", eta = 1, nthread = n_threads, nrounds = 2, objective = "binary:logistic",
eval_metric = list("error", "auc", "logloss") eval_metric = list("error", "auc", "logloss"),
watchlist = list(train = xgb.DMatrix(train$data, label = train$label))
), ),
"train-error.*train-auc.*train-logloss" "train-error.*train-auc.*train-logloss"
) )
expect_false(is.null(bst2$evaluation_log)) expect_false(is.null(attributes(bst2)$evaluation_log))
expect_equal(dim(bst2$evaluation_log), c(2, 4)) expect_equal(dim(attributes(bst2)$evaluation_log), c(2, 4))
expect_equal(colnames(bst2$evaluation_log), c("iter", "train_error", "train_auc", "train_logloss")) expect_equal(colnames(attributes(bst2)$evaluation_log), c("iter", "train_error", "train_auc", "train_logloss"))
}) })
@ -311,41 +309,25 @@ test_that("training continuation works", {
# continue for two more: # continue for two more:
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, xgb_model = bst1) bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, xgb_model = bst1)
if (!windows_flag && !solaris_flag) { if (!windows_flag && !solaris_flag) {
expect_equal(bst$raw, bst2$raw) expect_equal(xgb.save.raw(bst), xgb.save.raw(bst2))
} }
expect_false(is.null(bst2$evaluation_log)) expect_false(is.null(attributes(bst2)$evaluation_log))
expect_equal(dim(bst2$evaluation_log), c(4, 2)) expect_equal(dim(attributes(bst2)$evaluation_log), c(4, 2))
expect_equal(bst2$evaluation_log, bst$evaluation_log) expect_equal(attributes(bst2)$evaluation_log, attributes(bst)$evaluation_log)
# test continuing from raw model data # test continuing from raw model data
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, xgb_model = bst1$raw) bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, xgb_model = xgb.save.raw(bst1))
if (!windows_flag && !solaris_flag) { if (!windows_flag && !solaris_flag) {
expect_equal(bst$raw, bst2$raw) expect_equal(xgb.save.raw(bst), xgb.save.raw(bst2))
} }
expect_equal(dim(bst2$evaluation_log), c(2, 2)) expect_equal(dim(attributes(bst2)$evaluation_log), c(2, 2))
# test continuing from a model in file # test continuing from a model in file
xgb.save(bst1, "xgboost.json") fname <- file.path(tempdir(), "xgboost.json")
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, xgb_model = "xgboost.json") xgb.save(bst1, fname)
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, xgb_model = fname)
if (!windows_flag && !solaris_flag) { if (!windows_flag && !solaris_flag) {
expect_equal(bst$raw, bst2$raw) expect_equal(xgb.save.raw(bst), xgb.save.raw(bst2))
} }
expect_equal(dim(bst2$evaluation_log), c(2, 2)) expect_equal(dim(attributes(bst2)$evaluation_log), c(2, 2))
file.remove("xgboost.json")
})
test_that("model serialization works", {
out_path <- "model_serialization"
dtrain <- xgb.DMatrix(train$data, label = train$label, nthread = n_threads)
watchlist <- list(train = dtrain)
param <- list(objective = "binary:logistic", nthread = n_threads)
booster <- xgb.train(param, dtrain, nrounds = 4, watchlist)
raw <- xgb.serialize(booster)
saveRDS(raw, out_path)
raw <- readRDS(out_path)
loaded <- xgb.unserialize(raw)
raw_from_loaded <- xgb.serialize(loaded)
expect_equal(raw, raw_from_loaded)
file.remove(out_path)
}) })
test_that("xgb.cv works", { test_that("xgb.cv works", {
@ -361,7 +343,7 @@ test_that("xgb.cv works", {
expect_is(cv, "xgb.cv.synchronous") expect_is(cv, "xgb.cv.synchronous")
expect_false(is.null(cv$evaluation_log)) expect_false(is.null(cv$evaluation_log))
expect_lt(cv$evaluation_log[, min(test_error_mean)], 0.03) expect_lt(cv$evaluation_log[, min(test_error_mean)], 0.03)
expect_lt(cv$evaluation_log[, min(test_error_std)], 0.008) expect_lt(cv$evaluation_log[, min(test_error_std)], 0.0085)
expect_equal(cv$niter, 2) expect_equal(cv$niter, 2)
expect_false(is.null(cv$folds) && is.list(cv$folds)) expect_false(is.null(cv$folds) && is.list(cv$folds))
expect_length(cv$folds, 5) expect_length(cv$folds, 5)
@ -391,8 +373,8 @@ test_that("xgb.cv works with stratified folds", {
test_that("train and predict with non-strict classes", { test_that("train and predict with non-strict classes", {
# standard dense matrix input # standard dense matrix input
train_dense <- as.matrix(train$data) train_dense <- as.matrix(train$data)
bst <- xgboost( bst <- xgb.train(
data = train_dense, label = train$label, max_depth = 2, data = xgb.DMatrix(train_dense, label = train$label), max_depth = 2,
eta = 1, nthread = n_threads, nrounds = 2, objective = "binary:logistic", eta = 1, nthread = n_threads, nrounds = 2, objective = "binary:logistic",
verbose = 0 verbose = 0
) )
@ -402,8 +384,8 @@ test_that("train and predict with non-strict classes", {
class(train_dense) <- "shmatrix" class(train_dense) <- "shmatrix"
expect_true(is.matrix(train_dense)) expect_true(is.matrix(train_dense))
expect_error( expect_error(
bst <- xgboost( bst <- xgb.train(
data = train_dense, label = train$label, max_depth = 2, data = xgb.DMatrix(train_dense, label = train$label), max_depth = 2,
eta = 1, nthread = n_threads, nrounds = 2, objective = "binary:logistic", eta = 1, nthread = n_threads, nrounds = 2, objective = "binary:logistic",
verbose = 0 verbose = 0
), ),
@ -416,8 +398,8 @@ test_that("train and predict with non-strict classes", {
class(train_dense) <- c("pphmatrix", "shmatrix") class(train_dense) <- c("pphmatrix", "shmatrix")
expect_true(is.matrix(train_dense)) expect_true(is.matrix(train_dense))
expect_error( expect_error(
bst <- xgboost( bst <- xgb.train(
data = train_dense, label = train$label, max_depth = 2, data = xgb.DMatrix(train_dense, label = train$label), max_depth = 2,
eta = 1, nthread = n_threads, nrounds = 2, objective = "binary:logistic", eta = 1, nthread = n_threads, nrounds = 2, objective = "binary:logistic",
verbose = 0 verbose = 0
), ),
@ -448,8 +430,8 @@ test_that("max_delta_step works", {
# model with restricted max_delta_step # model with restricted max_delta_step
bst2 <- xgb.train(param, dtrain, nrounds, watchlist, verbose = 1, max_delta_step = 1) bst2 <- xgb.train(param, dtrain, nrounds, watchlist, verbose = 1, max_delta_step = 1)
# the no-restriction model is expected to have consistently lower loss during the initial iterations # the no-restriction model is expected to have consistently lower loss during the initial iterations
expect_true(all(bst1$evaluation_log$train_logloss < bst2$evaluation_log$train_logloss)) expect_true(all(attributes(bst1)$evaluation_log$train_logloss < attributes(bst2)$evaluation_log$train_logloss))
expect_lt(mean(bst1$evaluation_log$train_logloss) / mean(bst2$evaluation_log$train_logloss), 0.8) expect_lt(mean(attributes(bst1)$evaluation_log$train_logloss) / mean(attributes(bst2)$evaluation_log$train_logloss), 0.8)
}) })
test_that("colsample_bytree works", { test_that("colsample_bytree works", {
@ -480,8 +462,8 @@ test_that("colsample_bytree works", {
}) })
test_that("Configuration works", { test_that("Configuration works", {
bst <- xgboost( bst <- xgb.train(
data = train$data, label = train$label, max_depth = 2, data = xgb.DMatrix(train$data, label = train$label), max_depth = 2,
eta = 1, nthread = n_threads, nrounds = 2, objective = "binary:logistic", eta = 1, nthread = n_threads, nrounds = 2, objective = "binary:logistic",
eval_metric = "error", eval_metric = "auc", eval_metric = "logloss" eval_metric = "error", eval_metric = "auc", eval_metric = "logloss"
) )
@ -521,8 +503,8 @@ test_that("strict_shape works", {
y <- as.numeric(iris$Species) - 1 y <- as.numeric(iris$Species) - 1
X <- as.matrix(iris[, -5]) X <- as.matrix(iris[, -5])
bst <- xgboost( bst <- xgb.train(
data = X, label = y, data = xgb.DMatrix(X, label = y),
max_depth = 2, nrounds = n_rounds, nthread = n_threads, max_depth = 2, nrounds = n_rounds, nthread = n_threads,
objective = "multi:softprob", num_class = 3, eval_metric = "merror" objective = "multi:softprob", num_class = 3, eval_metric = "merror"
) )
@ -536,8 +518,8 @@ test_that("strict_shape works", {
X <- agaricus.train$data X <- agaricus.train$data
y <- agaricus.train$label y <- agaricus.train$label
bst <- xgboost( bst <- xgb.train(
data = X, label = y, max_depth = 2, nthread = n_threads, data = xgb.DMatrix(X, label = y), max_depth = 2, nthread = n_threads,
nrounds = n_rounds, objective = "binary:logistic", nrounds = n_rounds, objective = "binary:logistic",
eval_metric = "error", eval_metric = "auc", eval_metric = "logloss" eval_metric = "error", eval_metric = "auc", eval_metric = "logloss"
) )
@ -555,8 +537,8 @@ test_that("'predict' accepts CSR data", {
x_csc <- as(X[1L, , drop = FALSE], "CsparseMatrix") x_csc <- as(X[1L, , drop = FALSE], "CsparseMatrix")
x_csr <- as(x_csc, "RsparseMatrix") x_csr <- as(x_csc, "RsparseMatrix")
x_spv <- as(x_csc, "sparseVector") x_spv <- as(x_csc, "sparseVector")
bst <- xgboost( bst <- xgb.train(
data = X, label = y, objective = "binary:logistic", data = xgb.DMatrix(X, label = y), objective = "binary:logistic",
nrounds = 5L, verbose = FALSE, nthread = n_threads, nrounds = 5L, verbose = FALSE, nthread = n_threads,
) )
p_csc <- predict(bst, x_csc) p_csc <- predict(bst, x_csc)
@ -565,3 +547,234 @@ test_that("'predict' accepts CSR data", {
expect_equal(p_csc, p_csr) expect_equal(p_csc, p_csr)
expect_equal(p_csc, p_spv) expect_equal(p_csc, p_spv)
}) })
test_that("Quantile regression accepts multiple quantiles", {
data(mtcars)
y <- mtcars[, 1]
x <- as.matrix(mtcars[, -1])
dm <- xgb.DMatrix(data = x, label = y)
model <- xgb.train(
data = dm,
params = list(
objective = "reg:quantileerror",
tree_method = "exact",
quantile_alpha = c(0.05, 0.5, 0.95),
nthread = n_threads
),
nrounds = 15
)
pred <- predict(model, x, reshape = TRUE)
expect_equal(dim(pred)[1], nrow(x))
expect_equal(dim(pred)[2], 3)
expect_true(all(pred[, 1] <= pred[, 3]))
cors <- cor(y, pred)
expect_true(cors[2] > cors[1])
expect_true(cors[2] > cors[3])
expect_true(cors[2] > 0.85)
})
test_that("Can use multi-output labels with built-in objectives", {
data("mtcars")
y <- mtcars$mpg
x <- as.matrix(mtcars[, -1])
y_mirrored <- cbind(y, -y)
dm <- xgb.DMatrix(x, label = y_mirrored, nthread = n_threads)
model <- xgb.train(
params = list(
tree_method = "hist",
multi_strategy = "multi_output_tree",
objective = "reg:squarederror",
nthread = n_threads
),
data = dm,
nrounds = 5
)
pred <- predict(model, x, reshape = TRUE)
expect_equal(pred[, 1], -pred[, 2])
expect_true(cor(y, pred[, 1]) > 0.9)
expect_true(cor(y, pred[, 2]) < -0.9)
})
test_that("Can use multi-output labels with custom objectives", {
data("mtcars")
y <- mtcars$mpg
x <- as.matrix(mtcars[, -1])
y_mirrored <- cbind(y, -y)
dm <- xgb.DMatrix(x, label = y_mirrored, nthread = n_threads)
model <- xgb.train(
params = list(
tree_method = "hist",
multi_strategy = "multi_output_tree",
base_score = 0,
objective = function(pred, dtrain) {
y <- getinfo(dtrain, "label")
grad <- pred - y
hess <- rep(1, nrow(grad) * ncol(grad))
hess <- matrix(hess, nrow = nrow(grad))
return(list(grad = grad, hess = hess))
},
nthread = n_threads
),
data = dm,
nrounds = 5
)
pred <- predict(model, x, reshape = TRUE)
expect_equal(pred[, 1], -pred[, 2])
expect_true(cor(y, pred[, 1]) > 0.9)
expect_true(cor(y, pred[, 2]) < -0.9)
})
test_that("Can use ranking objectives with either 'qid' or 'group'", {
set.seed(123)
x <- matrix(rnorm(100 * 10), nrow = 100)
y <- sample(2, size = 100, replace = TRUE) - 1
qid <- c(rep(1, 20), rep(2, 20), rep(3, 60))
gr <- c(20, 20, 60)
dmat_qid <- xgb.DMatrix(x, label = y, qid = qid)
dmat_gr <- xgb.DMatrix(x, label = y, group = gr)
params <- list(tree_method = "hist",
lambdarank_num_pair_per_sample = 8,
objective = "rank:ndcg",
lambdarank_pair_method = "topk",
nthread = n_threads)
set.seed(123)
model_qid <- xgb.train(params, dmat_qid, nrounds = 5)
set.seed(123)
model_gr <- xgb.train(params, dmat_gr, nrounds = 5)
pred_qid <- predict(model_qid, x)
pred_gr <- predict(model_gr, x)
expect_equal(pred_qid, pred_gr)
})
test_that("Coefficients from gblinear have the expected shape and names", {
# Single-column coefficients
data(mtcars)
y <- mtcars$mpg
x <- as.matrix(mtcars[, -1])
mm <- model.matrix(~., data = mtcars[, -1])
dm <- xgb.DMatrix(x, label = y, nthread = 1)
model <- xgb.train(
data = dm,
params = list(
booster = "gblinear",
nthread = 1
),
nrounds = 3
)
coefs <- coef(model)
expect_equal(length(coefs), ncol(x) + 1)
expect_equal(names(coefs), c("(Intercept)", colnames(x)))
pred_auto <- predict(model, x)
pred_manual <- as.numeric(mm %*% coefs)
expect_equal(pred_manual, pred_auto, tolerance = 1e-5)
# Multi-column coefficients
data(iris)
y <- as.numeric(iris$Species) - 1
x <- as.matrix(iris[, -5])
dm <- xgb.DMatrix(x, label = y, nthread = 1)
mm <- model.matrix(~., data = iris[, -5])
model <- xgb.train(
data = dm,
params = list(
booster = "gblinear",
objective = "multi:softprob",
num_class = 3,
nthread = 1
),
nrounds = 3
)
coefs <- coef(model)
expect_equal(nrow(coefs), ncol(x) + 1)
expect_equal(ncol(coefs), 3)
expect_equal(row.names(coefs), c("(Intercept)", colnames(x)))
pred_auto <- predict(model, x, outputmargin = TRUE, reshape = TRUE)
pred_manual <- unname(mm %*% coefs)
expect_equal(pred_manual, pred_auto, tolerance = 1e-7)
})
test_that("Deep copies work as expected", {
data(mtcars)
y <- mtcars$mpg
x <- mtcars[, -1]
dm <- xgb.DMatrix(x, label = y, nthread = 1)
model <- xgb.train(
data = dm,
params = list(nthread = 1),
nrounds = 3
)
xgb.attr(model, "my_attr") <- 100
model_shallow_copy <- model
xgb.attr(model_shallow_copy, "my_attr") <- 333
attr_orig <- xgb.attr(model, "my_attr")
attr_shallow <- xgb.attr(model_shallow_copy, "my_attr")
expect_equal(attr_orig, attr_shallow)
model_deep_copy <- xgb.copy.Booster(model)
xgb.attr(model_deep_copy, "my_attr") <- 444
attr_orig <- xgb.attr(model, "my_attr")
attr_deep <- xgb.attr(model_deep_copy, "my_attr")
expect_false(attr_orig == attr_deep)
})
test_that("Pointer comparison works as expected", {
library(xgboost)
y <- mtcars$mpg
x <- as.matrix(mtcars[, -1])
model <- xgb.train(
params = list(nthread = 1),
data = xgb.DMatrix(x, label = y, nthread = 1),
nrounds = 3
)
model_shallow_copy <- model
expect_true(xgb.is.same.Booster(model, model_shallow_copy))
model_deep_copy <- xgb.copy.Booster(model)
expect_false(xgb.is.same.Booster(model, model_deep_copy))
xgb.attr(model_shallow_copy, "my_attr") <- 111
expect_equal(xgb.attr(model, "my_attr"), "111")
expect_null(xgb.attr(model_deep_copy, "my_attr"))
})
test_that("DMatrix field are set to booster when training", {
set.seed(123)
y <- rnorm(100)
x <- matrix(rnorm(100 * 3), nrow = 100)
x[, 2] <- abs(as.integer(x[, 2]))
dm_unnamed <- xgb.DMatrix(x, label = y, nthread = 1)
dm_feature_names <- xgb.DMatrix(x, label = y, feature_names = c("a", "b", "c"), nthread = 1)
dm_feature_types <- xgb.DMatrix(x, label = y)
setinfo(dm_feature_types, "feature_type", c("q", "c", "q"))
dm_both <- xgb.DMatrix(x, label = y, feature_names = c("a", "b", "c"), nthread = 1)
setinfo(dm_both, "feature_type", c("q", "c", "q"))
params <- list(nthread = 1)
model_unnamed <- xgb.train(data = dm_unnamed, params = params, nrounds = 3)
model_feature_names <- xgb.train(data = dm_feature_names, params = params, nrounds = 3)
model_feature_types <- xgb.train(data = dm_feature_types, params = params, nrounds = 3)
model_both <- xgb.train(data = dm_both, params = params, nrounds = 3)
expect_null(getinfo(model_unnamed, "feature_name"))
expect_equal(getinfo(model_feature_names, "feature_name"), c("a", "b", "c"))
expect_null(getinfo(model_feature_types, "feature_name"))
expect_equal(getinfo(model_both, "feature_name"), c("a", "b", "c"))
expect_null(variable.names(model_unnamed))
expect_equal(variable.names(model_feature_names), c("a", "b", "c"))
expect_null(variable.names(model_feature_types))
expect_equal(variable.names(model_both), c("a", "b", "c"))
expect_null(getinfo(model_unnamed, "feature_type"))
expect_null(getinfo(model_feature_names, "feature_type"))
expect_equal(getinfo(model_feature_types, "feature_type"), c("q", "c", "q"))
expect_equal(getinfo(model_both, "feature_type"), c("q", "c", "q"))
})

View File

@ -0,0 +1,67 @@
context("testing xgb.Booster slicing")
data(agaricus.train, package = "xgboost")
dm <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label, nthread = 1)
# Note: here need large step sizes in order for the predictions
# to have substantially different leaf assignments on each tree
model <- xgb.train(
params = list(objective = "binary:logistic", nthread = 1, max_depth = 4, eta = 0.5),
data = dm,
nrounds = 20
)
pred <- predict(model, dm, predleaf = TRUE, reshape = TRUE)
test_that("Slicing full model", {
new_model <- xgb.slice.Booster(model, 1, 0)
expect_equal(xgb.save.raw(new_model), xgb.save.raw(model))
new_model <- model[]
expect_equal(xgb.save.raw(new_model), xgb.save.raw(model))
new_model <- model[1:length(model)] # nolint
expect_equal(xgb.save.raw(new_model), xgb.save.raw(model))
})
test_that("Slicing sequence from start", {
new_model <- xgb.slice.Booster(model, 1, 10)
new_pred <- predict(new_model, dm, predleaf = TRUE, reshape = TRUE)
expect_equal(new_pred, pred[, seq(1, 10)])
new_model <- model[1:10]
new_pred <- predict(new_model, dm, predleaf = TRUE, reshape = TRUE)
expect_equal(new_pred, pred[, seq(1, 10)])
})
test_that("Slicing sequence from middle", {
new_model <- xgb.slice.Booster(model, 5, 10)
new_pred <- predict(new_model, dm, predleaf = TRUE, reshape = TRUE)
expect_equal(new_pred, pred[, seq(5, 10)])
new_model <- model[5:10]
new_pred <- predict(new_model, dm, predleaf = TRUE, reshape = TRUE)
expect_equal(new_pred, pred[, seq(5, 10)])
})
test_that("Slicing with non-unit step", {
for (s in 2:5) {
new_model <- xgb.slice.Booster(model, 1, 17, s)
new_pred <- predict(new_model, dm, predleaf = TRUE, reshape = TRUE)
expect_equal(new_pred, pred[, seq(1, 17, s)])
new_model <- model[seq(1, 17, s)]
new_pred <- predict(new_model, dm, predleaf = TRUE, reshape = TRUE)
expect_equal(new_pred, pred[, seq(1, 17, s)])
}
})
test_that("Slicing with non-unit step from middle", {
for (s in 2:5) {
new_model <- xgb.slice.Booster(model, 4, 17, s)
new_pred <- predict(new_model, dm, predleaf = TRUE, reshape = TRUE)
expect_equal(new_pred, pred[, seq(4, 17, s)])
new_model <- model[seq(4, 17, s)]
new_pred <- predict(new_model, dm, predleaf = TRUE, reshape = TRUE)
expect_equal(new_pred, pred[, seq(4, 17, s)])
}
})

View File

@ -57,7 +57,7 @@ test_that("cb.print.evaluation works as expected", {
expect_output(f5(), "\\[7\\]\ttrain-auc:0.900000\ttest-auc:0.800000") expect_output(f5(), "\\[7\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
bst_evaluation_err <- c('train-auc' = 0.1, 'test-auc' = 0.2) bst_evaluation_err <- c('train-auc' = 0.1, 'test-auc' = 0.2)
expect_output(f1(), "\\[7\\]\ttrain-auc:0.900000\\+0.100000\ttest-auc:0.800000\\+0.200000") expect_output(f1(), "\\[7\\]\ttrain-auc:0.900000±0.100000\ttest-auc:0.800000±0.200000")
}) })
test_that("cb.evaluation.log works as expected", { test_that("cb.evaluation.log works as expected", {
@ -111,9 +111,9 @@ test_that("can store evaluation_log without printing", {
expect_silent( expect_silent(
bst <- xgb.train(param, dtrain, nrounds = 10, watchlist, eta = 1, verbose = 0) bst <- xgb.train(param, dtrain, nrounds = 10, watchlist, eta = 1, verbose = 0)
) )
expect_false(is.null(bst$evaluation_log)) expect_false(is.null(attributes(bst)$evaluation_log))
expect_false(is.null(bst$evaluation_log$train_error)) expect_false(is.null(attributes(bst)$evaluation_log$train_error))
expect_lt(bst$evaluation_log[, min(train_error)], 0.2) expect_lt(attributes(bst)$evaluation_log[, min(train_error)], 0.2)
}) })
test_that("cb.reset.parameters works as expected", { test_that("cb.reset.parameters works as expected", {
@ -121,34 +121,34 @@ test_that("cb.reset.parameters works as expected", {
# fixed eta # fixed eta
set.seed(111) set.seed(111)
bst0 <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 0.9, verbose = 0) bst0 <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 0.9, verbose = 0)
expect_false(is.null(bst0$evaluation_log)) expect_false(is.null(attributes(bst0)$evaluation_log))
expect_false(is.null(bst0$evaluation_log$train_error)) expect_false(is.null(attributes(bst0)$evaluation_log$train_error))
# same eta but re-set as a vector parameter in the callback # same eta but re-set as a vector parameter in the callback
set.seed(111) set.seed(111)
my_par <- list(eta = c(0.9, 0.9)) my_par <- list(eta = c(0.9, 0.9))
bst1 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, bst1 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0,
callbacks = list(cb.reset.parameters(my_par))) callbacks = list(cb.reset.parameters(my_par)))
expect_false(is.null(bst1$evaluation_log$train_error)) expect_false(is.null(attributes(bst1)$evaluation_log$train_error))
expect_equal(bst0$evaluation_log$train_error, expect_equal(attributes(bst0)$evaluation_log$train_error,
bst1$evaluation_log$train_error) attributes(bst1)$evaluation_log$train_error)
# same eta but re-set via a function in the callback # same eta but re-set via a function in the callback
set.seed(111) set.seed(111)
my_par <- list(eta = function(itr, itr_end) 0.9) my_par <- list(eta = function(itr, itr_end) 0.9)
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0,
callbacks = list(cb.reset.parameters(my_par))) callbacks = list(cb.reset.parameters(my_par)))
expect_false(is.null(bst2$evaluation_log$train_error)) expect_false(is.null(attributes(bst2)$evaluation_log$train_error))
expect_equal(bst0$evaluation_log$train_error, expect_equal(attributes(bst0)$evaluation_log$train_error,
bst2$evaluation_log$train_error) attributes(bst2)$evaluation_log$train_error)
# different eta re-set as a vector parameter in the callback # different eta re-set as a vector parameter in the callback
set.seed(111) set.seed(111)
my_par <- list(eta = c(0.6, 0.5)) my_par <- list(eta = c(0.6, 0.5))
bst3 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, bst3 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0,
callbacks = list(cb.reset.parameters(my_par))) callbacks = list(cb.reset.parameters(my_par)))
expect_false(is.null(bst3$evaluation_log$train_error)) expect_false(is.null(attributes(bst3)$evaluation_log$train_error))
expect_false(all(bst0$evaluation_log$train_error == bst3$evaluation_log$train_error)) expect_false(all(attributes(bst0)$evaluation_log$train_error == attributes(bst3)$evaluation_log$train_error))
# resetting multiple parameters at the same time runs with no error # resetting multiple parameters at the same time runs with no error
my_par <- list(eta = c(1., 0.5), gamma = c(1, 2), max_depth = c(4, 8)) my_par <- list(eta = c(1., 0.5), gamma = c(1, 2), max_depth = c(4, 8))
@ -166,38 +166,39 @@ test_that("cb.reset.parameters works as expected", {
my_par <- list(eta = c(0., 0.)) my_par <- list(eta = c(0., 0.))
bstX <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, bstX <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0,
callbacks = list(cb.reset.parameters(my_par))) callbacks = list(cb.reset.parameters(my_par)))
expect_false(is.null(bstX$evaluation_log$train_error)) expect_false(is.null(attributes(bstX)$evaluation_log$train_error))
er <- unique(bstX$evaluation_log$train_error) er <- unique(attributes(bstX)$evaluation_log$train_error)
expect_length(er, 1) expect_length(er, 1)
expect_gt(er, 0.4) expect_gt(er, 0.4)
}) })
test_that("cb.save.model works as expected", { test_that("cb.save.model works as expected", {
files <- c('xgboost_01.json', 'xgboost_02.json', 'xgboost.json') files <- c('xgboost_01.json', 'xgboost_02.json', 'xgboost.json')
files <- unname(sapply(files, function(f) file.path(tempdir(), f)))
for (f in files) if (file.exists(f)) file.remove(f) for (f in files) if (file.exists(f)) file.remove(f)
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 1, verbose = 0, bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 1, verbose = 0,
save_period = 1, save_name = "xgboost_%02d.json") save_period = 1, save_name = file.path(tempdir(), "xgboost_%02d.json"))
expect_true(file.exists('xgboost_01.json')) expect_true(file.exists(files[1]))
expect_true(file.exists('xgboost_02.json')) expect_true(file.exists(files[2]))
b1 <- xgb.load('xgboost_01.json') b1 <- xgb.load(files[1])
xgb.parameters(b1) <- list(nthread = 2) xgb.parameters(b1) <- list(nthread = 2)
expect_equal(xgb.ntree(b1), 1) expect_equal(xgb.get.num.boosted.rounds(b1), 1)
b2 <- xgb.load('xgboost_02.json') b2 <- xgb.load(files[2])
xgb.parameters(b2) <- list(nthread = 2) xgb.parameters(b2) <- list(nthread = 2)
expect_equal(xgb.ntree(b2), 2) expect_equal(xgb.get.num.boosted.rounds(b2), 2)
xgb.config(b2) <- xgb.config(bst) xgb.config(b2) <- xgb.config(bst)
expect_equal(xgb.config(bst), xgb.config(b2)) expect_equal(xgb.config(bst), xgb.config(b2))
expect_equal(bst$raw, b2$raw) expect_equal(xgb.save.raw(bst), xgb.save.raw(b2))
# save_period = 0 saves the last iteration's model # save_period = 0 saves the last iteration's model
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 1, verbose = 0, bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 1, verbose = 0,
save_period = 0, save_name = 'xgboost.json') save_period = 0, save_name = file.path(tempdir(), 'xgboost.json'))
expect_true(file.exists('xgboost.json')) expect_true(file.exists(files[3]))
b2 <- xgb.load('xgboost.json') b2 <- xgb.load(files[3])
xgb.config(b2) <- xgb.config(bst) xgb.config(b2) <- xgb.config(bst)
expect_equal(bst$raw, b2$raw) expect_equal(xgb.save.raw(bst), xgb.save.raw(b2))
for (f in files) if (file.exists(f)) file.remove(f) for (f in files) if (file.exists(f)) file.remove(f)
}) })
@ -208,14 +209,13 @@ test_that("early stopping xgb.train works", {
bst <- xgb.train(param, dtrain, nrounds = 20, watchlist, eta = 0.3, bst <- xgb.train(param, dtrain, nrounds = 20, watchlist, eta = 0.3,
early_stopping_rounds = 3, maximize = FALSE) early_stopping_rounds = 3, maximize = FALSE)
, "Stopping. Best iteration") , "Stopping. Best iteration")
expect_false(is.null(bst$best_iteration)) expect_false(is.null(xgb.attr(bst, "best_iteration")))
expect_lt(bst$best_iteration, 19) expect_lt(xgb.attr(bst, "best_iteration"), 19)
expect_equal(bst$best_iteration, bst$best_ntreelimit)
pred <- predict(bst, dtest) pred <- predict(bst, dtest)
expect_equal(length(pred), 1611) expect_equal(length(pred), 1611)
err_pred <- err(ltest, pred) err_pred <- err(ltest, pred)
err_log <- bst$evaluation_log[bst$best_iteration, test_error] err_log <- attributes(bst)$evaluation_log[xgb.attr(bst, "best_iteration") + 1, test_error]
expect_equal(err_log, err_pred, tolerance = 5e-6) expect_equal(err_log, err_pred, tolerance = 5e-6)
set.seed(11) set.seed(11)
@ -223,16 +223,14 @@ test_that("early stopping xgb.train works", {
bst0 <- xgb.train(param, dtrain, nrounds = 20, watchlist, eta = 0.3, bst0 <- xgb.train(param, dtrain, nrounds = 20, watchlist, eta = 0.3,
early_stopping_rounds = 3, maximize = FALSE, verbose = 0) early_stopping_rounds = 3, maximize = FALSE, verbose = 0)
) )
expect_equal(bst$evaluation_log, bst0$evaluation_log) expect_equal(attributes(bst)$evaluation_log, attributes(bst0)$evaluation_log)
xgb.save(bst, "model.bin") fname <- file.path(tempdir(), "model.bin")
loaded <- xgb.load("model.bin") xgb.save(bst, fname)
loaded <- xgb.load(fname)
expect_false(is.null(loaded$best_iteration)) expect_false(is.null(xgb.attr(loaded, "best_iteration")))
expect_equal(loaded$best_iteration, bst$best_ntreelimit) expect_equal(xgb.attr(loaded, "best_iteration"), xgb.attr(bst, "best_iteration"))
expect_equal(loaded$best_ntreelimit, bst$best_ntreelimit)
file.remove("model.bin")
}) })
test_that("early stopping using a specific metric works", { test_that("early stopping using a specific metric works", {
@ -243,14 +241,13 @@ test_that("early stopping using a specific metric works", {
callbacks = list(cb.early.stop(stopping_rounds = 3, maximize = FALSE, callbacks = list(cb.early.stop(stopping_rounds = 3, maximize = FALSE,
metric_name = 'test_logloss'))) metric_name = 'test_logloss')))
, "Stopping. Best iteration") , "Stopping. Best iteration")
expect_false(is.null(bst$best_iteration)) expect_false(is.null(xgb.attr(bst, "best_iteration")))
expect_lt(bst$best_iteration, 19) expect_lt(xgb.attr(bst, "best_iteration"), 19)
expect_equal(bst$best_iteration, bst$best_ntreelimit)
pred <- predict(bst, dtest, ntreelimit = bst$best_ntreelimit) pred <- predict(bst, dtest, iterationrange = c(1, xgb.attr(bst, "best_iteration") + 1))
expect_equal(length(pred), 1611) expect_equal(length(pred), 1611)
logloss_pred <- sum(-ltest * log(pred) - (1 - ltest) * log(1 - pred)) / length(ltest) logloss_pred <- sum(-ltest * log(pred) - (1 - ltest) * log(1 - pred)) / length(ltest)
logloss_log <- bst$evaluation_log[bst$best_iteration, test_logloss] logloss_log <- attributes(bst)$evaluation_log[xgb.attr(bst, "best_iteration") + 1, test_logloss]
expect_equal(logloss_log, logloss_pred, tolerance = 1e-5) expect_equal(logloss_log, logloss_pred, tolerance = 1e-5)
}) })
@ -265,14 +262,14 @@ test_that("early stopping works with titanic", {
dtx <- model.matrix(~ 0 + ., data = titanic[, c("Pclass", "Sex")]) dtx <- model.matrix(~ 0 + ., data = titanic[, c("Pclass", "Sex")])
dty <- titanic$Survived dty <- titanic$Survived
xgboost::xgboost( xgboost::xgb.train(
data = dtx, data = xgb.DMatrix(dtx, label = dty),
label = dty,
objective = "binary:logistic", objective = "binary:logistic",
eval_metric = "auc", eval_metric = "auc",
nrounds = 100, nrounds = 100,
early_stopping_rounds = 3, early_stopping_rounds = 3,
nthread = n_threads nthread = n_threads,
watchlist = list(train = xgb.DMatrix(dtx, label = dty))
) )
expect_true(TRUE) # should not crash expect_true(TRUE) # should not crash
@ -286,7 +283,6 @@ test_that("early stopping xgb.cv works", {
, "Stopping. Best iteration") , "Stopping. Best iteration")
expect_false(is.null(cv$best_iteration)) expect_false(is.null(cv$best_iteration))
expect_lt(cv$best_iteration, 19) expect_lt(cv$best_iteration, 19)
expect_equal(cv$best_iteration, cv$best_ntreelimit)
# the best error is min error: # the best error is min error:
expect_true(cv$evaluation_log[, test_error_mean[cv$best_iteration] == min(test_error_mean)]) expect_true(cv$evaluation_log[, test_error_mean[cv$best_iteration] == min(test_error_mean)])
}) })
@ -354,3 +350,44 @@ test_that("prediction in xgb.cv for softprob works", {
expect_equal(dim(cv$pred), c(nrow(iris), 3)) expect_equal(dim(cv$pred), c(nrow(iris), 3))
expect_lt(diff(range(rowSums(cv$pred))), 1e-6) expect_lt(diff(range(rowSums(cv$pred))), 1e-6)
}) })
test_that("prediction in xgb.cv works for multi-quantile", {
data(mtcars)
y <- mtcars$mpg
x <- as.matrix(mtcars[, -1])
dm <- xgb.DMatrix(x, label = y, nthread = 1)
cv <- xgb.cv(
data = dm,
params = list(
objective = "reg:quantileerror",
quantile_alpha = c(0.1, 0.2, 0.5, 0.8, 0.9),
nthread = 1
),
nrounds = 5,
nfold = 3,
prediction = TRUE,
verbose = 0
)
expect_equal(dim(cv$pred), c(nrow(x), 5))
})
test_that("prediction in xgb.cv works for multi-output", {
data(mtcars)
y <- mtcars$mpg
x <- as.matrix(mtcars[, -1])
dm <- xgb.DMatrix(x, label = cbind(y, -y), nthread = 1)
cv <- xgb.cv(
data = dm,
params = list(
tree_method = "hist",
multi_strategy = "multi_output_tree",
objective = "reg:squarederror",
nthread = n_threads
),
nrounds = 5,
nfold = 3,
prediction = TRUE,
verbose = 0
)
expect_equal(dim(cv$pred), c(nrow(x), 2))
})

View File

@ -35,9 +35,9 @@ num_round <- 2
test_that("custom objective works", { test_that("custom objective works", {
bst <- xgb.train(param, dtrain, num_round, watchlist) bst <- xgb.train(param, dtrain, num_round, watchlist)
expect_equal(class(bst), "xgb.Booster") expect_equal(class(bst), "xgb.Booster")
expect_false(is.null(bst$evaluation_log)) expect_false(is.null(attributes(bst)$evaluation_log))
expect_false(is.null(bst$evaluation_log$eval_error)) expect_false(is.null(attributes(bst)$evaluation_log$eval_error))
expect_lt(bst$evaluation_log[num_round, eval_error], 0.03) expect_lt(attributes(bst)$evaluation_log[num_round, eval_error], 0.03)
}) })
test_that("custom objective in CV works", { test_that("custom objective in CV works", {
@ -50,7 +50,7 @@ test_that("custom objective in CV works", {
test_that("custom objective with early stop works", { test_that("custom objective with early stop works", {
bst <- xgb.train(param, dtrain, 10, watchlist) bst <- xgb.train(param, dtrain, 10, watchlist)
expect_equal(class(bst), "xgb.Booster") expect_equal(class(bst), "xgb.Booster")
train_log <- bst$evaluation_log$train_error train_log <- attributes(bst)$evaluation_log$train_error
expect_true(all(diff(train_log) <= 0)) expect_true(all(diff(train_log) <= 0))
}) })

Some files were not shown because too many files have changed in this diff Show More