Compare commits
111 Commits
dependabot
...
v2.0.0rc1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4301558a57 | ||
|
|
68be454cfa | ||
|
|
5188e27513 | ||
|
|
f380c10a93 | ||
|
|
12fe2fc06c | ||
|
|
b2e93d2742 | ||
|
|
c061e3ae50 | ||
|
|
b82e78c169 | ||
|
|
8463107013 | ||
|
|
19b59938b7 | ||
|
|
e3f624d8e7 | ||
|
|
2c84daeca7 | ||
|
|
344f90b67b | ||
|
|
05d7000096 | ||
|
|
f03463c45b | ||
|
|
fd4335d0bf | ||
|
|
801116c307 | ||
|
|
bb56183396 | ||
|
|
bdc1a3c178 | ||
|
|
428f6cbbe2 | ||
|
|
d638535581 | ||
|
|
44bd2981b2 | ||
|
|
9dbb71490c | ||
|
|
4359356d46 | ||
|
|
1caa93221a | ||
|
|
a57371ef7c | ||
|
|
f05a23b41c | ||
|
|
d495a180d8 | ||
|
|
7f854848d3 | ||
|
|
f05294a6f2 | ||
|
|
819098a48f | ||
|
|
c1b2cff874 | ||
|
|
7ce090e775 | ||
|
|
97fd5207dd | ||
|
|
54029a59af | ||
|
|
5bd163aa25 | ||
|
|
7fc57f3974 | ||
|
|
bde1ebc209 | ||
|
|
1aabc690ec | ||
|
|
04c99683c3 | ||
|
|
1332ff787f | ||
|
|
f958e32683 | ||
|
|
7129988847 | ||
|
|
e93a274823 | ||
|
|
c2b85ab68a | ||
|
|
a9da2e244a | ||
|
|
912e341d57 | ||
|
|
8f0efb4ab3 | ||
|
|
7579905e18 | ||
|
|
54579da4d7 | ||
|
|
3a9996173e | ||
|
|
1b657a5513 | ||
|
|
a196443a07 | ||
|
|
851cba931e | ||
|
|
01e00efc53 | ||
|
|
275da176ba | ||
|
|
22b0a55a04 | ||
|
|
0de7c47495 | ||
|
|
dbd5309b55 | ||
|
|
f7f673b00c | ||
|
|
7a0ccfbb49 | ||
|
|
0897477af0 | ||
|
|
e082718c66 | ||
|
|
6e18d3a290 | ||
|
|
2a0ff209ff | ||
|
|
f4fb2be101 | ||
|
|
2caceb157d | ||
|
|
b342ef951b | ||
|
|
0a07900b9f | ||
|
|
16eb41936d | ||
|
|
9da5050643 | ||
|
|
04aff3af8e | ||
|
|
2d0cd2817e | ||
|
|
a1367ea1f8 | ||
|
|
3632242e0b | ||
|
|
97ed944209 | ||
|
|
20c52f07d2 | ||
|
|
c3124813e8 | ||
|
|
59787b23af | ||
|
|
15ca12a77e | ||
|
|
41c6813496 | ||
|
|
b572a39919 | ||
|
|
645037e376 | ||
|
|
6c9c8a9001 | ||
|
|
bb2de1fd5d | ||
|
|
d0916849a6 | ||
|
|
6155394a06 | ||
|
|
e964654b8f | ||
|
|
39390cc2ee | ||
|
|
3a0f787703 | ||
|
|
f90771eec6 | ||
|
|
f4798718c7 | ||
|
|
bc267dd729 | ||
|
|
96c3071a8a | ||
|
|
cfa9c42eb4 | ||
|
|
6efe7c129f | ||
|
|
54da4b3185 | ||
|
|
4066d68261 | ||
|
|
6d22ea793c | ||
|
|
ee6809e642 | ||
|
|
d8beb517ed | ||
|
|
2718ff530c | ||
|
|
0df1272695 | ||
|
|
e70810be8a | ||
|
|
c2f0486d37 | ||
|
|
aad1313154 | ||
|
|
2b76061659 | ||
|
|
152e2fb072 | ||
|
|
ea0deeca68 | ||
|
|
8c1065f645 | ||
|
|
1fcc26a6f8 |
41
.github/workflows/python_tests.yml
vendored
41
.github/workflows/python_tests.yml
vendored
@@ -255,3 +255,44 @@ jobs:
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
pytest -s -v -rxXs --durations=0 ./tests/test_distributed/test_with_spark
|
||||
|
||||
python-system-installation-on-ubuntu:
|
||||
name: Test XGBoost Python package System Installation on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- name: Set up Python 3.8
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Install ninja
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get install -y ninja-build
|
||||
|
||||
- name: Build XGBoost on Ubuntu
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -GNinja
|
||||
ninja
|
||||
|
||||
- name: Copy lib to system lib
|
||||
run: |
|
||||
cp lib/* "$(python -c 'import sys; print(sys.base_prefix)')/lib"
|
||||
|
||||
- name: Install XGBoost in Virtual Environment
|
||||
run: |
|
||||
cd python-package
|
||||
pip install virtualenv
|
||||
virtualenv venv
|
||||
source venv/bin/activate && \
|
||||
pip install -v . --config-settings use_system_libxgboost=True && \
|
||||
python -c 'import xgboost'
|
||||
|
||||
6
.github/workflows/scorecards.yml
vendored
6
.github/workflows/scorecards.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@99c53751e09b9529366343771cc321ec74e9bd3d # tag=v2.0.6
|
||||
uses: ossf/scorecard-action@08b4669551908b1024bb425080c797723083c031 # tag=v2.2.0
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
@@ -41,7 +41,7 @@ jobs:
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@6673cd052c4cd6fcf4b4e6e60ea986c889389535 # tag=v3.0.0
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # tag=v3.1.2
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
@@ -49,6 +49,6 @@ jobs:
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@5f532563584d71fdef14ee64d17bafb34f751ce5 # tag=v1.0.26
|
||||
uses: github/codeql-action/upload-sarif@7b6664fa89524ee6e3c3e9749402d5afd69b3cd8 # tag=v2.14.1
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -48,6 +48,7 @@ Debug
|
||||
*.Rproj
|
||||
./xgboost.mpi
|
||||
./xgboost.mock
|
||||
*.bak
|
||||
#.Rbuildignore
|
||||
R-package.Rproj
|
||||
*.cache*
|
||||
|
||||
1
CITATION
1
CITATION
@@ -15,4 +15,3 @@
|
||||
address = {New York, NY, USA},
|
||||
keywords = {large-scale machine learning},
|
||||
}
|
||||
|
||||
|
||||
@@ -14,8 +14,24 @@ endif ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUA
|
||||
|
||||
message(STATUS "CMake version ${CMAKE_VERSION}")
|
||||
|
||||
if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0)
|
||||
message(FATAL_ERROR "GCC version must be at least 5.0!")
|
||||
# Check compiler versions
|
||||
# Use recent compilers to ensure that std::filesystem is available
|
||||
if(MSVC)
|
||||
if(MSVC_VERSION LESS 1920)
|
||||
message(FATAL_ERROR "Need Visual Studio 2019 or newer to build XGBoost")
|
||||
endif()
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "8.1")
|
||||
message(FATAL_ERROR "Need GCC 8.1 or newer to build XGBoost")
|
||||
endif()
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
|
||||
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "11.0")
|
||||
message(FATAL_ERROR "Need Xcode 11.0 (AppleClang 11.0) or newer to build XGBoost")
|
||||
endif()
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "9.0")
|
||||
message(FATAL_ERROR "Need Clang 9.0 or newer to build XGBoost")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
include(${xgboost_SOURCE_DIR}/cmake/FindPrefetchIntrinsics.cmake)
|
||||
@@ -50,6 +66,7 @@ option(HIDE_CXX_SYMBOLS "Build shared library and hide all C++ symbols" OFF)
|
||||
option(KEEP_BUILD_ARTIFACTS_IN_BINARY_DIR "Output build artifacts in CMake binary dir" OFF)
|
||||
## CUDA
|
||||
option(USE_CUDA "Build with GPU acceleration" OFF)
|
||||
option(USE_PER_THREAD_DEFAULT_STREAM "Build with per-thread default stream" ON)
|
||||
option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF)
|
||||
option(BUILD_WITH_SHARED_NCCL "Build with shared NCCL library." OFF)
|
||||
set(GPU_COMPUTE_VER "" CACHE STRING
|
||||
@@ -231,6 +248,15 @@ add_subdirectory(${xgboost_SOURCE_DIR}/plugin)
|
||||
|
||||
if (PLUGIN_RMM)
|
||||
find_package(rmm REQUIRED)
|
||||
|
||||
# Patch the rmm targets so they reference the static cudart
|
||||
# Remove this patch once RMM stops specifying cudart requirement
|
||||
# (since RMM is a header-only library, it should not specify cudart in its CMake config)
|
||||
get_target_property(rmm_link_libs rmm::rmm INTERFACE_LINK_LIBRARIES)
|
||||
list(REMOVE_ITEM rmm_link_libs CUDA::cudart)
|
||||
list(APPEND rmm_link_libs CUDA::cudart_static)
|
||||
set_target_properties(rmm::rmm PROPERTIES INTERFACE_LINK_LIBRARIES "${rmm_link_libs}")
|
||||
get_target_property(rmm_link_libs rmm::rmm INTERFACE_LINK_LIBRARIES)
|
||||
endif (PLUGIN_RMM)
|
||||
|
||||
#-- library
|
||||
|
||||
17
NEWS.md
17
NEWS.md
@@ -3,6 +3,23 @@ XGBoost Change Log
|
||||
|
||||
This file records the changes in xgboost library in reverse chronological order.
|
||||
|
||||
## 1.7.6 (2023 Jun 16)
|
||||
|
||||
This is a patch release for bug fixes. The CRAN package for the R binding is kept at 1.7.5.
|
||||
|
||||
### Bug Fixes
|
||||
* Fix distributed training with mixed dense and sparse partitions. (#9272)
|
||||
* Fix monotone constraints on CPU with large trees. (#9122)
|
||||
* [spark] Make the spark model have the same UID as its estimator (#9022)
|
||||
* Optimize prediction with `QuantileDMatrix`. (#9096)
|
||||
|
||||
### Document
|
||||
* Improve doxygen (#8959)
|
||||
* Update the cuDF pip index URL. (#9106)
|
||||
|
||||
### Maintenance
|
||||
* Fix tests with pandas 2.0. (#9014)
|
||||
|
||||
## 1.7.5 (2023 Mar 30)
|
||||
This is a patch release for bug fixes.
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ Package: xgboost
|
||||
Type: Package
|
||||
Title: Extreme Gradient Boosting
|
||||
Version: 2.0.0.1
|
||||
Date: 2022-10-18
|
||||
Date: 2023-08-16
|
||||
Authors@R: c(
|
||||
person("Tianqi", "Chen", role = c("aut"),
|
||||
email = "tianqi.tchen@gmail.com"),
|
||||
|
||||
@@ -511,7 +511,7 @@ cb.cv.predict <- function(save_models = FALSE) {
|
||||
if (save_models) {
|
||||
env$basket$models <- lapply(env$bst_folds, function(fd) {
|
||||
xgb.attr(fd$bst, 'niter') <- env$end_iteration - 1
|
||||
xgb.Booster.complete(xgb.handleToBooster(fd$bst), saveraw = TRUE)
|
||||
xgb.Booster.complete(xgb.handleToBooster(handle = fd$bst, raw = NULL), saveraw = TRUE)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -659,7 +659,7 @@ cb.gblinear.history <- function(sparse = FALSE) {
|
||||
} else { # xgb.cv:
|
||||
cf <- vector("list", length(env$bst_folds))
|
||||
for (i in seq_along(env$bst_folds)) {
|
||||
dmp <- xgb.dump(xgb.handleToBooster(env$bst_folds[[i]]$bst))
|
||||
dmp <- xgb.dump(xgb.handleToBooster(handle = env$bst_folds[[i]]$bst, raw = NULL))
|
||||
cf[[i]] <- as.numeric(grep('(booster|bias|weigh)', dmp, invert = TRUE, value = TRUE))
|
||||
if (sparse) cf[[i]] <- as(cf[[i]], "sparseVector")
|
||||
}
|
||||
|
||||
@@ -140,7 +140,7 @@ check.custom.eval <- function(env = parent.frame()) {
|
||||
|
||||
|
||||
# Update a booster handle for an iteration with dtrain data
|
||||
xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
|
||||
xgb.iter.update <- function(booster_handle, dtrain, iter, obj) {
|
||||
if (!identical(class(booster_handle), "xgb.Booster.handle")) {
|
||||
stop("booster_handle must be of xgb.Booster.handle class")
|
||||
}
|
||||
@@ -163,7 +163,7 @@ xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
|
||||
# Evaluate one iteration.
|
||||
# Returns a named vector of evaluation metrics
|
||||
# with the names in a 'datasetname-metricname' format.
|
||||
xgb.iter.eval <- function(booster_handle, watchlist, iter, feval = NULL) {
|
||||
xgb.iter.eval <- function(booster_handle, watchlist, iter, feval) {
|
||||
if (!identical(class(booster_handle), "xgb.Booster.handle"))
|
||||
stop("class of booster_handle must be xgb.Booster.handle")
|
||||
|
||||
@@ -234,7 +234,7 @@ generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
|
||||
y <- factor(y)
|
||||
}
|
||||
}
|
||||
folds <- xgb.createFolds(y, nfold)
|
||||
folds <- xgb.createFolds(y = y, k = nfold)
|
||||
} else {
|
||||
# make simple non-stratified folds
|
||||
kstep <- length(rnd_idx) %/% nfold
|
||||
@@ -251,7 +251,7 @@ generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
|
||||
# Creates CV folds stratified by the values of y.
|
||||
# It was borrowed from caret::createFolds and simplified
|
||||
# by always returning an unnamed list of fold indices.
|
||||
xgb.createFolds <- function(y, k = 10) {
|
||||
xgb.createFolds <- function(y, k) {
|
||||
if (is.numeric(y)) {
|
||||
## Group the numeric data based on their magnitudes
|
||||
## and sample within those groups.
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# Construct an internal xgboost Booster and return a handle to it.
|
||||
# internal utility function
|
||||
xgb.Booster.handle <- function(params = list(), cachelist = list(),
|
||||
modelfile = NULL, handle = NULL) {
|
||||
xgb.Booster.handle <- function(params, cachelist, modelfile, handle) {
|
||||
if (typeof(cachelist) != "list" ||
|
||||
!all(vapply(cachelist, inherits, logical(1), what = 'xgb.DMatrix'))) {
|
||||
stop("cachelist must be a list of xgb.DMatrix objects")
|
||||
@@ -12,7 +11,7 @@ xgb.Booster.handle <- function(params = list(), cachelist = list(),
|
||||
## A filename
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
modelfile <- path.expand(modelfile)
|
||||
.Call(XGBoosterLoadModel_R, handle, modelfile[1])
|
||||
.Call(XGBoosterLoadModel_R, handle, enc2utf8(modelfile[1]))
|
||||
class(handle) <- "xgb.Booster.handle"
|
||||
if (length(params) > 0) {
|
||||
xgb.parameters(handle) <- params
|
||||
@@ -44,7 +43,7 @@ xgb.Booster.handle <- function(params = list(), cachelist = list(),
|
||||
|
||||
# Convert xgb.Booster.handle to xgb.Booster
|
||||
# internal utility function
|
||||
xgb.handleToBooster <- function(handle, raw = NULL) {
|
||||
xgb.handleToBooster <- function(handle, raw) {
|
||||
bst <- list(handle = handle, raw = raw)
|
||||
class(bst) <- "xgb.Booster"
|
||||
return(bst)
|
||||
@@ -129,7 +128,12 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
stop("argument type must be xgb.Booster")
|
||||
|
||||
if (is.null.handle(object$handle)) {
|
||||
object$handle <- xgb.Booster.handle(modelfile = object$raw, handle = object$handle)
|
||||
object$handle <- xgb.Booster.handle(
|
||||
params = list(),
|
||||
cachelist = list(),
|
||||
modelfile = object$raw,
|
||||
handle = object$handle
|
||||
)
|
||||
} else {
|
||||
if (is.null(object$raw) && saveraw) {
|
||||
object$raw <- xgb.serialize(object$handle)
|
||||
@@ -475,7 +479,7 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
|
||||
#' @export
|
||||
predict.xgb.Booster.handle <- function(object, ...) {
|
||||
|
||||
bst <- xgb.handleToBooster(object)
|
||||
bst <- xgb.handleToBooster(handle = object, raw = NULL)
|
||||
|
||||
ret <- predict(bst, ...)
|
||||
return(ret)
|
||||
|
||||
@@ -88,7 +88,7 @@ xgb.DMatrix <- function(data, info = list(), missing = NA, silent = FALSE, nthre
|
||||
|
||||
# get dmatrix from data, label
|
||||
# internal helper method
|
||||
xgb.get.DMatrix <- function(data, label = NULL, missing = NA, weight = NULL, nthread = NULL) {
|
||||
xgb.get.DMatrix <- function(data, label, missing, weight, nthread) {
|
||||
if (inherits(data, "dgCMatrix") || is.matrix(data)) {
|
||||
if (is.null(label)) {
|
||||
stop("label must be provided when data is a matrix")
|
||||
|
||||
@@ -135,9 +135,6 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
|
||||
check.custom.obj()
|
||||
check.custom.eval()
|
||||
|
||||
#if (is.null(params[['eval_metric']]) && is.null(feval))
|
||||
# stop("Either 'eval_metric' or 'feval' must be provided for CV")
|
||||
|
||||
# Check the labels
|
||||
if ((inherits(data, 'xgb.DMatrix') && is.null(getinfo(data, 'label'))) ||
|
||||
(!inherits(data, 'xgb.DMatrix') && is.null(label))) {
|
||||
@@ -161,10 +158,6 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
|
||||
folds <- generate.cv.folds(nfold, nrow(data), stratified, cv_label, params)
|
||||
}
|
||||
|
||||
# Potential TODO: sequential CV
|
||||
#if (strategy == 'sequential')
|
||||
# stop('Sequential CV strategy is not yet implemented')
|
||||
|
||||
# verbosity & evaluation printing callback:
|
||||
params <- c(params, list(silent = 1))
|
||||
print_every_n <- max(as.integer(print_every_n), 1L)
|
||||
@@ -194,7 +187,13 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
|
||||
|
||||
# create the booster-folds
|
||||
# train_folds
|
||||
dall <- xgb.get.DMatrix(data, label, missing, nthread = params$nthread)
|
||||
dall <- xgb.get.DMatrix(
|
||||
data = data,
|
||||
label = label,
|
||||
missing = missing,
|
||||
weight = NULL,
|
||||
nthread = params$nthread
|
||||
)
|
||||
bst_folds <- lapply(seq_along(folds), function(k) {
|
||||
dtest <- slice(dall, folds[[k]])
|
||||
# code originally contributed by @RolandASc on stackoverflow
|
||||
@@ -202,7 +201,12 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
|
||||
dtrain <- slice(dall, unlist(folds[-k]))
|
||||
else
|
||||
dtrain <- slice(dall, train_folds[[k]])
|
||||
handle <- xgb.Booster.handle(params, list(dtrain, dtest))
|
||||
handle <- xgb.Booster.handle(
|
||||
params = params,
|
||||
cachelist = list(dtrain, dtest),
|
||||
modelfile = NULL,
|
||||
handle = NULL
|
||||
)
|
||||
list(dtrain = dtrain, bst = handle, watchlist = list(train = dtrain, test = dtest), index = folds[[k]])
|
||||
})
|
||||
rm(dall)
|
||||
@@ -223,8 +227,18 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
|
||||
for (f in cb$pre_iter) f()
|
||||
|
||||
msg <- lapply(bst_folds, function(fd) {
|
||||
xgb.iter.update(fd$bst, fd$dtrain, iteration - 1, obj)
|
||||
xgb.iter.eval(fd$bst, fd$watchlist, iteration - 1, feval)
|
||||
xgb.iter.update(
|
||||
booster_handle = fd$bst,
|
||||
dtrain = fd$dtrain,
|
||||
iter = iteration - 1,
|
||||
obj = obj
|
||||
)
|
||||
xgb.iter.eval(
|
||||
booster_handle = fd$bst,
|
||||
watchlist = fd$watchlist,
|
||||
iter = iteration - 1,
|
||||
feval = feval
|
||||
)
|
||||
})
|
||||
msg <- simplify2array(msg)
|
||||
bst_evaluation <- rowMeans(msg)
|
||||
|
||||
@@ -142,6 +142,7 @@ xgb.ggplot.shap.summary <- function(data, shap_contrib = NULL, features = NULL,
|
||||
#'
|
||||
#' @return A data.table containing the observation ID, the feature name, the
|
||||
#' feature value (normalized if specified), and the SHAP contribution value.
|
||||
#' @noRd
|
||||
prepare.ggplot.shap.data <- function(data_list, normalize = FALSE) {
|
||||
data <- data_list[["data"]]
|
||||
shap_contrib <- data_list[["shap_contrib"]]
|
||||
@@ -170,6 +171,7 @@ prepare.ggplot.shap.data <- function(data_list, normalize = FALSE) {
|
||||
#' @param x Numeric vector
|
||||
#'
|
||||
#' @return Numeric vector with mean 0 and sd 1.
|
||||
#' @noRd
|
||||
normalize <- function(x) {
|
||||
loc <- mean(x, na.rm = TRUE)
|
||||
scale <- stats::sd(x, na.rm = TRUE)
|
||||
@@ -181,7 +183,7 @@ normalize <- function(x) {
|
||||
# ... the plots
|
||||
# cols number of columns
|
||||
# internal utility function
|
||||
multiplot <- function(..., cols = 1) {
|
||||
multiplot <- function(..., cols) {
|
||||
plots <- list(...)
|
||||
num_plots <- length(plots)
|
||||
|
||||
|
||||
@@ -35,7 +35,12 @@ xgb.load <- function(modelfile) {
|
||||
if (is.null(modelfile))
|
||||
stop("xgb.load: modelfile cannot be NULL")
|
||||
|
||||
handle <- xgb.Booster.handle(modelfile = modelfile)
|
||||
handle <- xgb.Booster.handle(
|
||||
params = list(),
|
||||
cachelist = list(),
|
||||
modelfile = modelfile,
|
||||
handle = NULL
|
||||
)
|
||||
# re-use modelfile if it is raw so we do not need to serialize
|
||||
if (typeof(modelfile) == "raw") {
|
||||
warning(
|
||||
@@ -45,9 +50,9 @@ xgb.load <- function(modelfile) {
|
||||
" `xgb.unserialize` instead. "
|
||||
)
|
||||
)
|
||||
bst <- xgb.handleToBooster(handle, modelfile)
|
||||
bst <- xgb.handleToBooster(handle = handle, raw = modelfile)
|
||||
} else {
|
||||
bst <- xgb.handleToBooster(handle, NULL)
|
||||
bst <- xgb.handleToBooster(handle = handle, raw = NULL)
|
||||
}
|
||||
bst <- xgb.Booster.complete(bst, saveraw = TRUE)
|
||||
return(bst)
|
||||
|
||||
@@ -86,8 +86,7 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
|
||||
text <- xgb.dump(model = model, with_stats = TRUE)
|
||||
}
|
||||
|
||||
if (length(text) < 2 ||
|
||||
sum(grepl('leaf=(\\d+)', text)) < 1) {
|
||||
if (length(text) < 2 || !any(grepl('leaf=(\\d+)', text))) {
|
||||
stop("Non-tree model detected! This function can only be used with tree models.")
|
||||
}
|
||||
|
||||
|
||||
@@ -136,7 +136,7 @@ get.leaf.depth <- function(dt_tree) {
|
||||
# list of paths to each leaf in a tree
|
||||
paths <- lapply(paths_tmp$vpath, names)
|
||||
# combine into a resulting path lengths table for a tree
|
||||
data.table(Depth = sapply(paths, length), ID = To[Leaf == TRUE])
|
||||
data.table(Depth = lengths(paths), ID = To[Leaf == TRUE])
|
||||
}, by = Tree]
|
||||
}
|
||||
|
||||
|
||||
@@ -193,7 +193,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
#' hence allows us to see which features have a negative / positive contribution
|
||||
#' on the model prediction, and whether the contribution is different for larger
|
||||
#' or smaller values of the feature. We effectively try to replicate the
|
||||
#' \code{summary_plot} function from https://github.com/slundberg/shap.
|
||||
#' \code{summary_plot} function from https://github.com/shap/shap.
|
||||
#'
|
||||
#' @inheritParams xgb.plot.shap
|
||||
#'
|
||||
@@ -202,7 +202,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
#'
|
||||
#' @examples # See \code{\link{xgb.plot.shap}}.
|
||||
#' @seealso \code{\link{xgb.plot.shap}}, \code{\link{xgb.ggplot.shap.summary}},
|
||||
#' \url{https://github.com/slundberg/shap}
|
||||
#' \url{https://github.com/shap/shap}
|
||||
xgb.plot.shap.summary <- function(data, shap_contrib = NULL, features = NULL, top_n = 10, model = NULL,
|
||||
trees = NULL, target_class = NULL, approxcontrib = FALSE, subsample = NULL) {
|
||||
# Only ggplot implementation is available.
|
||||
|
||||
@@ -43,6 +43,6 @@ xgb.save <- function(model, fname) {
|
||||
}
|
||||
model <- xgb.Booster.complete(model, saveraw = FALSE)
|
||||
fname <- path.expand(fname)
|
||||
.Call(XGBoosterSaveModel_R, model$handle, fname[1])
|
||||
.Call(XGBoosterSaveModel_R, model$handle, enc2utf8(fname[1]))
|
||||
return(TRUE)
|
||||
}
|
||||
|
||||
@@ -363,8 +363,13 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
is_update <- NVL(params[['process_type']], '.') == 'update'
|
||||
|
||||
# Construct a booster (either a new one or load from xgb_model)
|
||||
handle <- xgb.Booster.handle(params, append(watchlist, dtrain), xgb_model)
|
||||
bst <- xgb.handleToBooster(handle)
|
||||
handle <- xgb.Booster.handle(
|
||||
params = params,
|
||||
cachelist = append(watchlist, dtrain),
|
||||
modelfile = xgb_model,
|
||||
handle = NULL
|
||||
)
|
||||
bst <- xgb.handleToBooster(handle = handle, raw = NULL)
|
||||
|
||||
# extract parameters that can affect the relationship b/w #trees and #iterations
|
||||
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1)
|
||||
@@ -390,10 +395,21 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
|
||||
for (f in cb$pre_iter) f()
|
||||
|
||||
xgb.iter.update(bst$handle, dtrain, iteration - 1, obj)
|
||||
xgb.iter.update(
|
||||
booster_handle = bst$handle,
|
||||
dtrain = dtrain,
|
||||
iter = iteration - 1,
|
||||
obj = obj
|
||||
)
|
||||
|
||||
if (length(watchlist) > 0)
|
||||
bst_evaluation <- xgb.iter.eval(bst$handle, watchlist, iteration - 1, feval) # nolint: object_usage_linter
|
||||
if (length(watchlist) > 0) {
|
||||
bst_evaluation <- xgb.iter.eval( # nolint: object_usage_linter
|
||||
booster_handle = bst$handle,
|
||||
watchlist = watchlist,
|
||||
iter = iteration - 1,
|
||||
feval = feval
|
||||
)
|
||||
}
|
||||
|
||||
xgb.attr(bst$handle, 'niter') <- iteration - 1
|
||||
|
||||
|
||||
@@ -10,7 +10,13 @@ xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL,
|
||||
save_period = NULL, save_name = "xgboost.model",
|
||||
xgb_model = NULL, callbacks = list(), ...) {
|
||||
merged <- check.booster.params(params, ...)
|
||||
dtrain <- xgb.get.DMatrix(data, label, missing, weight, nthread = merged$nthread)
|
||||
dtrain <- xgb.get.DMatrix(
|
||||
data = data,
|
||||
label = label,
|
||||
missing = missing,
|
||||
weight = weight,
|
||||
nthread = merged$nthread
|
||||
)
|
||||
|
||||
watchlist <- list(train = dtrain)
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ treeInteractions <- function(input_tree, input_max_depth) {
|
||||
|
||||
# Remove non-interactions (same variable)
|
||||
interaction_list <- lapply(interaction_list, unique) # remove same variables
|
||||
interaction_length <- sapply(interaction_list, length)
|
||||
interaction_length <- lengths(interaction_list)
|
||||
interaction_list <- interaction_list[interaction_length > 1]
|
||||
interaction_list <- unique(lapply(interaction_list, sort))
|
||||
return(interaction_list)
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.ggplot.R
|
||||
\name{normalize}
|
||||
\alias{normalize}
|
||||
\title{Scale feature value to have mean 0, standard deviation 1}
|
||||
\usage{
|
||||
normalize(x)
|
||||
}
|
||||
\arguments{
|
||||
\item{x}{Numeric vector}
|
||||
}
|
||||
\value{
|
||||
Numeric vector with mean 0 and sd 1.
|
||||
}
|
||||
\description{
|
||||
This is used to compare multiple features on the same plot.
|
||||
Internal utility function
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.ggplot.R
|
||||
\name{prepare.ggplot.shap.data}
|
||||
\alias{prepare.ggplot.shap.data}
|
||||
\title{Combine and melt feature values and SHAP contributions for sample
|
||||
observations.}
|
||||
\usage{
|
||||
prepare.ggplot.shap.data(data_list, normalize = FALSE)
|
||||
}
|
||||
\arguments{
|
||||
\item{data_list}{List containing 'data' and 'shap_contrib' returned by
|
||||
\code{xgb.shap.data()}.}
|
||||
|
||||
\item{normalize}{Whether to standardize feature values to have mean 0 and
|
||||
standard deviation 1 (useful for comparing multiple features on the same
|
||||
plot). Default \code{FALSE}.}
|
||||
}
|
||||
\value{
|
||||
A data.table containing the observation ID, the feature name, the
|
||||
feature value (normalized if specified), and the SHAP contribution value.
|
||||
}
|
||||
\description{
|
||||
Conforms to data format required for ggplot functions.
|
||||
}
|
||||
\details{
|
||||
Internal utility function.
|
||||
}
|
||||
@@ -67,12 +67,12 @@ Each point (observation) is coloured based on its feature value. The plot
|
||||
hence allows us to see which features have a negative / positive contribution
|
||||
on the model prediction, and whether the contribution is different for larger
|
||||
or smaller values of the feature. We effectively try to replicate the
|
||||
\code{summary_plot} function from https://github.com/slundberg/shap.
|
||||
\code{summary_plot} function from https://github.com/shap/shap.
|
||||
}
|
||||
\examples{
|
||||
# See \code{\link{xgb.plot.shap}}.
|
||||
}
|
||||
\seealso{
|
||||
\code{\link{xgb.plot.shap}}, \code{\link{xgb.ggplot.shap.summary}},
|
||||
\url{https://github.com/slundberg/shap}
|
||||
\url{https://github.com/shap/shap}
|
||||
}
|
||||
|
||||
@@ -47,6 +47,7 @@ OBJECTS= \
|
||||
$(PKGROOT)/src/data/data.o \
|
||||
$(PKGROOT)/src/data/sparse_page_raw_format.o \
|
||||
$(PKGROOT)/src/data/ellpack_page.o \
|
||||
$(PKGROOT)/src/data/file_iterator.o \
|
||||
$(PKGROOT)/src/data/gradient_index.o \
|
||||
$(PKGROOT)/src/data/gradient_index_page_source.o \
|
||||
$(PKGROOT)/src/data/gradient_index_format.o \
|
||||
@@ -68,6 +69,8 @@ OBJECTS= \
|
||||
$(PKGROOT)/src/tree/updater_quantile_hist.o \
|
||||
$(PKGROOT)/src/tree/updater_refresh.o \
|
||||
$(PKGROOT)/src/tree/updater_sync.o \
|
||||
$(PKGROOT)/src/tree/hist/param.o \
|
||||
$(PKGROOT)/src/tree/hist/histogram.o \
|
||||
$(PKGROOT)/src/linear/linear_updater.o \
|
||||
$(PKGROOT)/src/linear/updater_coordinate.o \
|
||||
$(PKGROOT)/src/linear/updater_shotgun.o \
|
||||
@@ -82,6 +85,7 @@ OBJECTS= \
|
||||
$(PKGROOT)/src/common/charconv.o \
|
||||
$(PKGROOT)/src/common/column_matrix.o \
|
||||
$(PKGROOT)/src/common/common.o \
|
||||
$(PKGROOT)/src/common/error_msg.o \
|
||||
$(PKGROOT)/src/common/hist_util.o \
|
||||
$(PKGROOT)/src/common/host_device_vector.o \
|
||||
$(PKGROOT)/src/common/io.o \
|
||||
|
||||
@@ -47,6 +47,7 @@ OBJECTS= \
|
||||
$(PKGROOT)/src/data/data.o \
|
||||
$(PKGROOT)/src/data/sparse_page_raw_format.o \
|
||||
$(PKGROOT)/src/data/ellpack_page.o \
|
||||
$(PKGROOT)/src/data/file_iterator.o \
|
||||
$(PKGROOT)/src/data/gradient_index.o \
|
||||
$(PKGROOT)/src/data/gradient_index_page_source.o \
|
||||
$(PKGROOT)/src/data/gradient_index_format.o \
|
||||
@@ -68,6 +69,8 @@ OBJECTS= \
|
||||
$(PKGROOT)/src/tree/updater_quantile_hist.o \
|
||||
$(PKGROOT)/src/tree/updater_refresh.o \
|
||||
$(PKGROOT)/src/tree/updater_sync.o \
|
||||
$(PKGROOT)/src/tree/hist/param.o \
|
||||
$(PKGROOT)/src/tree/hist/histogram.o \
|
||||
$(PKGROOT)/src/linear/linear_updater.o \
|
||||
$(PKGROOT)/src/linear/updater_coordinate.o \
|
||||
$(PKGROOT)/src/linear/updater_shotgun.o \
|
||||
@@ -82,6 +85,7 @@ OBJECTS= \
|
||||
$(PKGROOT)/src/common/charconv.o \
|
||||
$(PKGROOT)/src/common/column_matrix.o \
|
||||
$(PKGROOT)/src/common/common.o \
|
||||
$(PKGROOT)/src/common/error_msg.o \
|
||||
$(PKGROOT)/src/common/hist_util.o \
|
||||
$(PKGROOT)/src/common/host_device_vector.o \
|
||||
$(PKGROOT)/src/common/io.o \
|
||||
|
||||
@@ -85,9 +85,18 @@ test_that("dart prediction works", {
|
||||
rnorm(100)
|
||||
|
||||
set.seed(1994)
|
||||
booster_by_xgboost <- xgboost(data = d, label = y, max_depth = 2, booster = "dart",
|
||||
rate_drop = 0.5, one_drop = TRUE,
|
||||
eta = 1, nthread = 2, nrounds = nrounds, objective = "reg:squarederror")
|
||||
booster_by_xgboost <- xgboost(
|
||||
data = d,
|
||||
label = y,
|
||||
max_depth = 2,
|
||||
booster = "dart",
|
||||
rate_drop = 0.5,
|
||||
one_drop = TRUE,
|
||||
eta = 1,
|
||||
nthread = 2,
|
||||
nrounds = nrounds,
|
||||
objective = "reg:squarederror"
|
||||
)
|
||||
pred_by_xgboost_0 <- predict(booster_by_xgboost, newdata = d, ntreelimit = 0)
|
||||
pred_by_xgboost_1 <- predict(booster_by_xgboost, newdata = d, ntreelimit = nrounds)
|
||||
expect_true(all(matrix(pred_by_xgboost_0, byrow = TRUE) == matrix(pred_by_xgboost_1, byrow = TRUE)))
|
||||
@@ -97,14 +106,14 @@ test_that("dart prediction works", {
|
||||
|
||||
set.seed(1994)
|
||||
dtrain <- xgb.DMatrix(data = d, info = list(label = y))
|
||||
booster_by_train <- xgb.train(params = list(
|
||||
booster_by_train <- xgb.train(
|
||||
params = list(
|
||||
booster = "dart",
|
||||
max_depth = 2,
|
||||
eta = 1,
|
||||
rate_drop = 0.5,
|
||||
one_drop = TRUE,
|
||||
nthread = 1,
|
||||
tree_method = "exact",
|
||||
objective = "reg:squarederror"
|
||||
),
|
||||
data = dtrain,
|
||||
@@ -399,7 +408,7 @@ test_that("colsample_bytree works", {
|
||||
xgb.importance(model = bst)
|
||||
# If colsample_bytree works properly, a variety of features should be used
|
||||
# in the 100 trees
|
||||
expect_gte(nrow(xgb.importance(model = bst)), 30)
|
||||
expect_gte(nrow(xgb.importance(model = bst)), 28)
|
||||
})
|
||||
|
||||
test_that("Configuration works", {
|
||||
|
||||
@@ -72,6 +72,7 @@ test_that("xgb.DMatrix: saving, loading", {
|
||||
tmp <- c("0 1:1 2:1", "1 3:1", "0 1:1")
|
||||
tmp_file <- tempfile(fileext = ".libsvm")
|
||||
writeLines(tmp, tmp_file)
|
||||
expect_true(file.exists(tmp_file))
|
||||
dtest4 <- xgb.DMatrix(paste(tmp_file, "?format=libsvm", sep = ""), silent = TRUE)
|
||||
expect_equal(dim(dtest4), c(3, 4))
|
||||
expect_equal(getinfo(dtest4, 'label'), c(0, 1, 0))
|
||||
|
||||
@@ -189,7 +189,7 @@ test_that("SHAPs sum to predictions, with or without DART", {
|
||||
tol <- 1e-5
|
||||
|
||||
expect_equal(rowSums(shap), pred, tol = tol)
|
||||
expect_equal(apply(shapi, 1, sum), pred, tol = tol)
|
||||
expect_equal(rowSums(shapi), pred, tol = tol)
|
||||
for (i in seq_len(nrow(d)))
|
||||
for (f in list(rowSums, colSums))
|
||||
expect_equal(f(shapi[i, , ]), shap[i, ], tol = tol)
|
||||
|
||||
@@ -76,8 +76,6 @@ test_that("Models from previous versions of XGBoost can be loaded", {
|
||||
name <- m[3]
|
||||
is_rds <- endsWith(model_file, '.rds')
|
||||
is_json <- endsWith(model_file, '.json')
|
||||
|
||||
cpp_warning <- capture.output({
|
||||
# Expect an R warning when a model is loaded from RDS and it was generated by version < 1.1.x
|
||||
if (is_rds && compareVersion(model_xgb_ver, '1.1.1.1') < 0) {
|
||||
booster <- readRDS(model_file)
|
||||
@@ -94,14 +92,4 @@ test_that("Models from previous versions of XGBoost can be loaded", {
|
||||
run_booster_check(booster, name)
|
||||
}
|
||||
})
|
||||
cpp_warning <- paste0(cpp_warning, collapse = ' ')
|
||||
if (is_rds && compareVersion(model_xgb_ver, '1.1.1.1') >= 0) {
|
||||
# Expect a C++ warning when a model is loaded from RDS and it was generated by old XGBoost`
|
||||
m <- grepl(paste0('.*If you are loading a serialized model ',
|
||||
'\\(like pickle in Python, RDS in R\\).*',
|
||||
'for more details about differences between ',
|
||||
'saving model and serializing.*'), cpp_warning, perl = TRUE)
|
||||
expect_true(length(m) > 0 && all(m))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
21
R-package/tests/testthat/test_unicode.R
Normal file
21
R-package/tests/testthat/test_unicode.R
Normal file
@@ -0,0 +1,21 @@
|
||||
context("Test Unicode handling")
|
||||
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
set.seed(1994)
|
||||
|
||||
test_that("Can save and load models with Unicode paths", {
|
||||
nrounds <- 2
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = nrounds, objective = "binary:logistic",
|
||||
eval_metric = "error")
|
||||
tmpdir <- tempdir()
|
||||
lapply(c("모델.json", "がうる・ぐら.json", "类继承.ubj"), function(x) {
|
||||
path <- file.path(tmpdir, x)
|
||||
xgb.save(bst, path)
|
||||
bst2 <- xgb.load(path)
|
||||
expect_equal(predict(bst, test$data), predict(bst2, test$data))
|
||||
})
|
||||
})
|
||||
@@ -13,7 +13,10 @@ test_that("updating the model works", {
|
||||
watchlist <- list(train = dtrain, test = dtest)
|
||||
|
||||
# no-subsampling
|
||||
p1 <- list(objective = "binary:logistic", max_depth = 2, eta = 0.05, nthread = 2)
|
||||
p1 <- list(
|
||||
objective = "binary:logistic", max_depth = 2, eta = 0.05, nthread = 2,
|
||||
updater = "grow_colmaker,prune"
|
||||
)
|
||||
set.seed(11)
|
||||
bst1 <- xgb.train(p1, dtrain, nrounds = 10, watchlist, verbose = 0)
|
||||
tr1 <- xgb.model.dt.tree(model = bst1)
|
||||
|
||||
@@ -51,24 +51,24 @@ A *categorical* variable has a fixed number of different values. For instance, i
|
||||
>
|
||||
> Type `?factor` in the console for more information.
|
||||
|
||||
To answer the question above we will convert *categorical* variables to `numeric` one.
|
||||
To answer the question above we will convert *categorical* variables to `numeric` ones.
|
||||
|
||||
### Conversion from categorical to numeric variables
|
||||
|
||||
#### Looking at the raw data
|
||||
|
||||
In this Vignette we will see how to transform a *dense* `data.frame` (*dense* = few zeroes in the matrix) with *categorical* variables to a very *sparse* matrix (*sparse* = lots of zero in the matrix) of `numeric` features.
|
||||
+In this Vignette we will see how to transform a *dense* `data.frame` (*dense* = the majority of the matrix is non-zero) with *categorical* variables to a very *sparse* matrix (*sparse* = lots of zero entries in the matrix) of `numeric` features.
|
||||
|
||||
The method we are going to see is usually called [one-hot encoding](https://en.wikipedia.org/wiki/One-hot).
|
||||
|
||||
The first step is to load `Arthritis` dataset in memory and wrap it with `data.table` package.
|
||||
The first step is to load the `Arthritis` dataset in memory and wrap it with the `data.table` package.
|
||||
|
||||
```{r, results='hide'}
|
||||
data(Arthritis)
|
||||
df <- data.table(Arthritis, keep.rownames = FALSE)
|
||||
```
|
||||
|
||||
> `data.table` is 100% compliant with **R** `data.frame` but its syntax is more consistent and its performance for large dataset is [best in class](https://stackoverflow.com/questions/21435339/data-table-vs-dplyr-can-one-do-something-well-the-other-cant-or-does-poorly) (`dplyr` from **R** and `Pandas` from **Python** [included](https://github.com/Rdatatable/data.table/wiki/Benchmarks-%3A-Grouping)). Some parts of **XGBoost** **R** package use `data.table`.
|
||||
> `data.table` is 100% compliant with **R** `data.frame` but its syntax is more consistent and its performance for large dataset is [best in class](https://stackoverflow.com/questions/21435339/data-table-vs-dplyr-can-one-do-something-well-the-other-cant-or-does-poorly) (`dplyr` from **R** and `Pandas` from **Python** [included](https://github.com/Rdatatable/data.table/wiki/Benchmarks-%3A-Grouping)). Some parts of **XGBoost's** **R** package use `data.table`.
|
||||
|
||||
The first thing we want to do is to have a look to the first few lines of the `data.table`:
|
||||
|
||||
@@ -95,19 +95,19 @@ We will add some new *categorical* features to see if it helps.
|
||||
|
||||
##### Grouping per 10 years
|
||||
|
||||
For the first feature we create groups of age by rounding the real age.
|
||||
For the first features we create groups of age by rounding the real age.
|
||||
|
||||
Note that we transform it to `factor` so the algorithm treat these age groups as independent values.
|
||||
Note that we transform it to `factor` so the algorithm treats these age groups as independent values.
|
||||
|
||||
Therefore, 20 is not closer to 30 than 60. To make it short, the distance between ages is lost in this transformation.
|
||||
Therefore, 20 is not closer to 30 than 60. In other words, the distance between ages is lost in this transformation.
|
||||
|
||||
```{r}
|
||||
head(df[, AgeDiscret := as.factor(round(Age / 10, 0))])
|
||||
```
|
||||
|
||||
##### Random split into two groups
|
||||
##### Randomly split into two groups
|
||||
|
||||
Following is an even stronger simplification of the real age with an arbitrary split at 30 years old. We choose this value **based on nothing**. We will see later if simplifying the information based on arbitrary values is a good strategy (you may already have an idea of how well it will work...).
|
||||
The following is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value **based on nothing**. We will see later if simplifying the information based on arbitrary values is a good strategy (you may already have an idea of how well it will work...).
|
||||
|
||||
```{r}
|
||||
head(df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))])
|
||||
@@ -119,7 +119,7 @@ These new features are highly correlated to the `Age` feature because they are s
|
||||
|
||||
For many machine learning algorithms, using correlated features is not a good idea. It may sometimes make prediction less accurate, and most of the time make interpretation of the model almost impossible. GLM, for instance, assumes that the features are uncorrelated.
|
||||
|
||||
Fortunately, decision tree algorithms (including boosted trees) are very robust to these features. Therefore we have nothing to do to manage this situation.
|
||||
Fortunately, decision tree algorithms (including boosted trees) are very robust to these features. Therefore we don't have to do anything to manage this situation.
|
||||
|
||||
##### Cleaning data
|
||||
|
||||
@@ -144,7 +144,7 @@ We will use the [dummy contrast coding](https://stats.oarc.ucla.edu/r/library/r-
|
||||
|
||||
The purpose is to transform each value of each *categorical* feature into a *binary* feature `{0, 1}`.
|
||||
|
||||
For example, the column `Treatment` will be replaced by two columns, `TreatmentPlacebo`, and `TreatmentTreated`. Each of them will be *binary*. Therefore, an observation which has the value `Placebo` in column `Treatment` before the transformation will have after the transformation the value `1` in the new column `TreatmentPlacebo` and the value `0` in the new column `TreatmentTreated`. The column `TreatmentPlacebo` will disappear during the contrast encoding, as it would be absorbed into a common constant intercept column.
|
||||
For example, the column `Treatment` will be replaced by two columns, `TreatmentPlacebo`, and `TreatmentTreated`. Each of them will be *binary*. Therefore, an observation which has the value `Placebo` in column `Treatment` before the transformation will have the value `1` in the new column `TreatmentPlacebo` and the value `0` in the new column `TreatmentTreated` after the transformation. The column `TreatmentPlacebo` will disappear during the contrast encoding, as it would be absorbed into a common constant intercept column.
|
||||
|
||||
Column `Improved` is excluded because it will be our `label` column, the one we want to predict.
|
||||
|
||||
@@ -176,13 +176,9 @@ bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 4,
|
||||
|
||||
```
|
||||
|
||||
You can see some `train-error: 0.XXXXX` lines followed by a number. It decreases. Each line shows how well the model explains your data. Lower is better.
|
||||
You can see some `train-logloss: 0.XXXXX` lines followed by a number. It decreases. Each line shows how well the model explains the data. Lower is better.
|
||||
|
||||
A small value for training error may be a symptom of [overfitting](https://en.wikipedia.org/wiki/Overfitting), meaning the model will not accurately predict the future values.
|
||||
|
||||
> Here you can see the numbers decrease until line 7 and then increase.
|
||||
>
|
||||
> It probably means we are overfitting. To fix that I should reduce the number of rounds to `nrounds = 4`. I will let things like that because I don't really care for the purpose of this example :-)
|
||||
A small value for training error may be a symptom of [overfitting](https://en.wikipedia.org/wiki/Overfitting), meaning the model will not accurately predict unseen values.
|
||||
|
||||
Feature importance
|
||||
------------------
|
||||
@@ -199,64 +195,35 @@ importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bs
|
||||
head(importance)
|
||||
```
|
||||
|
||||
> The column `Gain` provide the information we are looking for.
|
||||
> The column `Gain` provides the information we are looking for.
|
||||
>
|
||||
> As you can see, features are classified by `Gain`.
|
||||
|
||||
`Gain` is the improvement in accuracy brought by a feature to the branches it is on. The idea is that before adding a new split on a feature X to the branch there was some wrongly classified elements, after adding the split on this feature, there are two new branches, and each of these branch is more accurate (one branch saying if your observation is on this branch then it should be classified as `1`, and the other branch saying the exact opposite).
|
||||
`Gain` is the improvement in accuracy brought by a feature to the branches it is on. The idea is that before adding a new split on a feature X to the branch there were some wrongly classified elements; after adding the split on this feature, there are two new branches, and each of these branches is more accurate (one branch saying if your observation is on this branch then it should be classified as `1`, and the other branch saying the exact opposite).
|
||||
|
||||
`Cover` measures the relative quantity of observations concerned by a feature.
|
||||
`Cover` is related to the second order derivative (or Hessian) of the loss function with respect to a particular variable; thus, a large value indicates a variable has a large potential impact on the loss function and so is important.
|
||||
|
||||
`Frequency` is a simpler way to measure the `Gain`. It just counts the number of times a feature is used in all generated trees. You should not use it (unless you know why you want to use it).
|
||||
|
||||
#### Improvement in the interpretability of feature importance data.table
|
||||
|
||||
We can go deeper in the analysis of the model. In the `data.table` above, we have discovered which features counts to predict if the illness will go or not. But we don't yet know the role of these features. For instance, one of the question we may want to answer would be: does receiving a placebo treatment helps to recover from the illness?
|
||||
|
||||
One simple solution is to count the co-occurrences of a feature and a class of the classification.
|
||||
|
||||
For that purpose we will execute the same function as above but using two more parameters, `data` and `label`.
|
||||
|
||||
```{r}
|
||||
importanceRaw <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst, data = sparse_matrix, label = output_vector)
|
||||
|
||||
# Cleaning for better display
|
||||
importanceClean <- importanceRaw[, `:=`(Cover = NULL, Frequency = NULL)]
|
||||
|
||||
head(importanceClean)
|
||||
```
|
||||
|
||||
> In the table above we have removed two not needed columns and select only the first lines.
|
||||
|
||||
First thing you notice is the new column `Split`. It is the split applied to the feature on a branch of one of the tree. Each split is present, therefore a feature can appear several times in this table. Here we can see the feature `Age` is used several times with different splits.
|
||||
|
||||
How the split is applied to count the co-occurrences? It is always `<`. For instance, in the second line, we measure the number of persons under 61.5 years with the illness gone after the treatment.
|
||||
|
||||
The two other new columns are `RealCover` and `RealCover %`. In the first column it measures the number of observations in the dataset where the split is respected and the label marked as `1`. The second column is the percentage of the whole population that `RealCover` represents.
|
||||
|
||||
Therefore, according to our findings, getting a placebo doesn't seem to help but being younger than 61 years may help (seems logic).
|
||||
|
||||
> You may wonder how to interpret the `< 1.00001` on the first line. Basically, in a sparse `Matrix`, there is no `0`, therefore, looking for one hot-encoded categorical observations validating the rule `< 1.00001` is like just looking for `1` for this feature.
|
||||
|
||||
### Plotting the feature importance
|
||||
|
||||
|
||||
All these things are nice, but it would be even better to plot the results.
|
||||
|
||||
```{r, fig.width=8, fig.height=5, fig.align='center'}
|
||||
xgb.plot.importance(importance_matrix = importance)
|
||||
```
|
||||
|
||||
Feature have automatically been divided in 2 clusters: the interesting features... and the others.
|
||||
Running this line of code, you should get a bar chart showing the importance of the 6 features (containing the same data as the output we saw earlier, but displaying it visually for easier consumption). Note that `xgb.ggplot.importance` is also available for all the ggplot2 fans!
|
||||
|
||||
> Depending of the dataset and the learning parameters you may have more than two clusters. Default value is to limit them to `10`, but you can increase this limit. Look at the function documentation for more information.
|
||||
|
||||
According to the plot above, the most important features in this dataset to predict if the treatment will work are :
|
||||
|
||||
* the Age ;
|
||||
* having received a placebo or not ;
|
||||
* the sex is third but already included in the not interesting features group ;
|
||||
* then we see our generated features (AgeDiscret). We can see that their contribution is very low.
|
||||
* An individual's age;
|
||||
* Having received a placebo or not;
|
||||
* Gender;
|
||||
* Our generated feature AgeDiscret. We can see that its contribution is very low.
|
||||
|
||||
|
||||
### Do these results make sense?
|
||||
|
||||
@@ -270,53 +237,53 @@ c2 <- chisq.test(df$Age, output_vector)
|
||||
print(c2)
|
||||
```
|
||||
|
||||
Pearson correlation between Age and illness disappearing is **`r round(c2$statistic, 2 )`**.
|
||||
The Pearson correlation between Age and illness disappearing is **`r round(c2$statistic, 2 )`**.
|
||||
|
||||
```{r, warning=FALSE, message=FALSE}
|
||||
c2 <- chisq.test(df$AgeDiscret, output_vector)
|
||||
print(c2)
|
||||
```
|
||||
|
||||
Our first simplification of Age gives a Pearson correlation is **`r round(c2$statistic, 2)`**.
|
||||
Our first simplification of Age gives a Pearson correlation of **`r round(c2$statistic, 2)`**.
|
||||
|
||||
```{r, warning=FALSE, message=FALSE}
|
||||
c2 <- chisq.test(df$AgeCat, output_vector)
|
||||
print(c2)
|
||||
```
|
||||
|
||||
The perfectly random split I did between young and old at 30 years old have a low correlation of **`r round(c2$statistic, 2)`**. It's a result we may expect as may be in my mind > 30 years is being old (I am 32 and starting feeling old, this may explain that), but for the illness we are studying, the age to be vulnerable is not the same.
|
||||
The perfectly random split we did between young and old at 30 years old has a low correlation of **2.36**. This suggests that, for the particular illness we are studying, the age at which someone is vulnerable to this disease is likely very different from 30.
|
||||
|
||||
Morality: don't let your *gut* lower the quality of your model.
|
||||
Moral of the story: don't let your *gut* lower the quality of your model.
|
||||
|
||||
In *data science* expression, there is the word *science* :-)
|
||||
In *data science*, there is the word *science* :-)
|
||||
|
||||
Conclusion
|
||||
----------
|
||||
|
||||
As you can see, in general *destroying information by simplifying it won't improve your model*. **Chi2** just demonstrates that.
|
||||
|
||||
But in more complex cases, creating a new feature based on existing one which makes link with the outcome more obvious may help the algorithm and improve the model.
|
||||
But in more complex cases, creating a new feature from an existing one may help the algorithm and improve the model.
|
||||
|
||||
The case studied here is not enough complex to show that. Check [Kaggle website](http://www.kaggle.com/) for some challenging datasets. However it's almost always worse when you add some arbitrary rules.
|
||||
+The case studied here is not complex enough to show that. Check [Kaggle website](https://www.kaggle.com/) for some challenging datasets.
|
||||
|
||||
Moreover, you can notice that even if we have added some not useful new features highly correlated with other features, the boosting tree algorithm have been able to choose the best one, which in this case is the Age.
|
||||
Moreover, you can see that even if we have added some new features which are not very useful/highly correlated with other features, the boosting tree algorithm was still able to choose the best one (which in this case is the Age).
|
||||
|
||||
Linear model may not be that smart in this scenario.
|
||||
Linear models may not perform as well.
|
||||
|
||||
Special Note: What about Random Forests™?
|
||||
-----------------------------------------
|
||||
|
||||
As you may know, [Random Forests](https://en.wikipedia.org/wiki/Random_forest) algorithm is cousin with boosting and both are part of the [ensemble learning](https://en.wikipedia.org/wiki/Ensemble_learning) family.
|
||||
As you may know, the [Random Forests](https://en.wikipedia.org/wiki/Random_forest) algorithm is cousin with boosting and both are part of the [ensemble learning](https://en.wikipedia.org/wiki/Ensemble_learning) family.
|
||||
|
||||
Both trains several decision trees for one dataset. The *main* difference is that in Random Forests, trees are independent and in boosting, the tree `N+1` focus its learning on the loss (<=> what has not been well modeled by the tree `N`).
|
||||
Both train several decision trees for one dataset. The *main* difference is that in Random Forests, trees are independent and in boosting, the `N+1`-st tree focuses its learning on the loss (<=> what has not been well modeled by the tree `N`).
|
||||
|
||||
This difference have an impact on a corner case in feature importance analysis: the *correlated features*.
|
||||
This difference can have an impact on a edge case in feature importance analysis: *correlated features*.
|
||||
|
||||
Imagine two features perfectly correlated, feature `A` and feature `B`. For one specific tree, if the algorithm needs one of them, it will choose randomly (true in both boosting and Random Forests).
|
||||
|
||||
However, in Random Forests this random choice will be done for each tree, because each tree is independent from the others. Therefore, approximatively, depending of your parameters, 50% of the trees will choose feature `A` and the other 50% will choose feature `B`. So the *importance* of the information contained in `A` and `B` (which is the same, because they are perfectly correlated) is diluted in `A` and `B`. So you won't easily know this information is important to predict what you want to predict! It is even worse when you have 10 correlated features...
|
||||
However, in Random Forests this random choice will be done for each tree, because each tree is independent from the others. Therefore, approximately (and depending on your parameters) 50% of the trees will choose feature `A` and the other 50% will choose feature `B`. So the *importance* of the information contained in `A` and `B` (which is the same, because they are perfectly correlated) is diluted in `A` and `B`. So you won't easily know this information is important to predict what you want to predict! It is even worse when you have 10 correlated features...
|
||||
|
||||
In boosting, when a specific link between feature and outcome have been learned by the algorithm, it will try to not refocus on it (in theory it is what happens, reality is not always that simple). Therefore, all the importance will be on feature `A` or on feature `B` (but not both). You will know that one feature have an important role in the link between the observations and the label. It is still up to you to search for the correlated features to the one detected as important if you need to know all of them.
|
||||
In boosting, when a specific link between feature and outcome have been learned by the algorithm, it will try to not refocus on it (in theory it is what happens, reality is not always that simple). Therefore, all the importance will be on feature `A` or on feature `B` (but not both). You will know that one feature has an important role in the link between the observations and the label. It is still up to you to search for the correlated features to the one detected as important if you need to know all of them.
|
||||
|
||||
If you want to try Random Forests algorithm, you can tweak XGBoost parameters!
|
||||
|
||||
|
||||
@@ -18,13 +18,11 @@
|
||||
publisher={Institute of Mathematical Statistics}
|
||||
}
|
||||
|
||||
|
||||
@misc{
|
||||
Bache+Lichman:2013 ,
|
||||
author = "K. Bache and M. Lichman",
|
||||
year = "2013",
|
||||
title = "{UCI} Machine Learning Repository",
|
||||
url = "http://archive.ics.uci.edu/ml/",
|
||||
url = "https://archive.ics.uci.edu/",
|
||||
institution = "University of California, Irvine, School of Information and Computer Sciences"
|
||||
}
|
||||
|
||||
|
||||
@@ -48,7 +48,6 @@ Become a sponsor and get a logo here. See details at [Sponsoring the XGBoost Pro
|
||||
|
||||
<a href="https://www.nvidia.com/en-us/" target="_blank"><img src="https://raw.githubusercontent.com/xgboost-ai/xgboost-ai.github.io/master/images/sponsors/nvidia.jpg" alt="NVIDIA" width="72" height="72"></a>
|
||||
<a href="https://www.intel.com/" target="_blank"><img src="https://images.opencollective.com/intel-corporation/2fa85c1/logo/256.png" width="72" height="72"></a>
|
||||
<a href="https://getkoffie.com/?utm_source=opencollective&utm_medium=github&utm_campaign=xgboost" target="_blank"><img src="https://images.opencollective.com/koffielabs/f391ab8/logo/256.png" width="72" height="72"></a>
|
||||
|
||||
### Backers
|
||||
[[Become a backer](https://opencollective.com/xgboost#backer)]
|
||||
|
||||
@@ -90,8 +90,8 @@ function(format_gencode_flags flags out)
|
||||
endif()
|
||||
# Set up architecture flags
|
||||
if(NOT flags)
|
||||
if (CUDA_VERSION VERSION_GREATER_EQUAL "11.1")
|
||||
set(flags "50;60;70;80")
|
||||
if (CUDA_VERSION VERSION_GREATER_EQUAL "11.8")
|
||||
set(flags "50;60;70;80;90")
|
||||
elseif (CUDA_VERSION VERSION_GREATER_EQUAL "11.0")
|
||||
set(flags "50;60;70;80")
|
||||
elseif(CUDA_VERSION VERSION_GREATER_EQUAL "10.0")
|
||||
@@ -133,6 +133,11 @@ function(xgboost_set_cuda_flags target)
|
||||
$<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler=${OpenMP_CXX_FLAGS}>
|
||||
$<$<COMPILE_LANGUAGE:CUDA>:-Xfatbin=-compress-all>)
|
||||
|
||||
if (USE_PER_THREAD_DEFAULT_STREAM)
|
||||
target_compile_options(${target} PRIVATE
|
||||
$<$<COMPILE_LANGUAGE:CUDA>:--default-stream per-thread>)
|
||||
endif (USE_PER_THREAD_DEFAULT_STREAM)
|
||||
|
||||
if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.18")
|
||||
set_property(TARGET ${target} PROPERTY CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES})
|
||||
endif (CMAKE_VERSION VERSION_GREATER_EQUAL "3.18")
|
||||
@@ -172,7 +177,8 @@ function(xgboost_set_cuda_flags target)
|
||||
set_target_properties(${target} PROPERTIES
|
||||
CUDA_STANDARD 17
|
||||
CUDA_STANDARD_REQUIRED ON
|
||||
CUDA_SEPARABLE_COMPILATION OFF)
|
||||
CUDA_SEPARABLE_COMPILATION OFF
|
||||
CUDA_RUNTIME_LIBRARY Static)
|
||||
endfunction(xgboost_set_cuda_flags)
|
||||
|
||||
macro(xgboost_link_nccl target)
|
||||
@@ -274,6 +280,7 @@ macro(xgboost_target_link_libraries target)
|
||||
|
||||
if (USE_CUDA)
|
||||
xgboost_set_cuda_flags(${target})
|
||||
target_link_libraries(${target} PUBLIC CUDA::cudart_static)
|
||||
endif (USE_CUDA)
|
||||
|
||||
if (PLUGIN_RMM)
|
||||
|
||||
@@ -52,11 +52,11 @@ endif (BUILD_WITH_SHARED_NCCL)
|
||||
|
||||
find_path(NCCL_INCLUDE_DIR
|
||||
NAMES nccl.h
|
||||
PATHS $ENV{NCCL_ROOT}/include ${NCCL_ROOT}/include)
|
||||
HINTS ${NCCL_ROOT}/include $ENV{NCCL_ROOT}/include)
|
||||
|
||||
find_library(NCCL_LIBRARY
|
||||
NAMES ${NCCL_LIB_NAME}
|
||||
PATHS $ENV{NCCL_ROOT}/lib/ ${NCCL_ROOT}/lib)
|
||||
HINTS ${NCCL_ROOT}/lib $ENV{NCCL_ROOT}/lib/)
|
||||
|
||||
message(STATUS "Using nccl library: ${NCCL_LIBRARY}")
|
||||
|
||||
|
||||
@@ -106,7 +106,7 @@ Please send pull requests if you find ones that are missing here.
|
||||
- Prarthana Bhat, 2nd place winner in [DYD Competition](https://datahack.analyticsvidhya.com/contest/date-your-data/). Link to [Solution](https://github.com/analyticsvidhya/DateYourData/blob/master/Prathna_Bhat_Model.R).
|
||||
|
||||
## Talks
|
||||
- [XGBoost: A Scalable Tree Boosting System](http://datascience.la/xgboost-workshop-and-meetup-talk-with-tianqi-chen/) (video+slides) by Tianqi Chen at the Los Angeles Data Science meetup
|
||||
- XGBoost: A Scalable Tree Boosting System ([video] (https://www.youtube.com/watch?v=Vly8xGnNiWs) + [slides](https://speakerdeck.com/datasciencela/tianqi-chen-xgboost-overview-and-latest-news-la-meetup-talk)) by Tianqi Chen at the Los Angeles Data Science meetup
|
||||
|
||||
## Tutorials
|
||||
|
||||
|
||||
@@ -11,17 +11,27 @@ import numpy as np
|
||||
|
||||
import xgboost as xgb
|
||||
|
||||
plt.rcParams.update({'font.size': 13})
|
||||
plt.rcParams.update({"font.size": 13})
|
||||
|
||||
|
||||
# Function to visualize censored labels
|
||||
def plot_censored_labels(X, y_lower, y_upper):
|
||||
def replace_inf(x, target_value):
|
||||
def plot_censored_labels(
|
||||
X: np.ndarray, y_lower: np.ndarray, y_upper: np.ndarray
|
||||
) -> None:
|
||||
def replace_inf(x: np.ndarray, target_value: float) -> np.ndarray:
|
||||
x[np.isinf(x)] = target_value
|
||||
return x
|
||||
plt.plot(X, y_lower, 'o', label='y_lower', color='blue')
|
||||
plt.plot(X, y_upper, 'o', label='y_upper', color='fuchsia')
|
||||
plt.vlines(X, ymin=replace_inf(y_lower, 0.01), ymax=replace_inf(y_upper, 1000),
|
||||
label='Range for y', color='gray')
|
||||
|
||||
plt.plot(X, y_lower, "o", label="y_lower", color="blue")
|
||||
plt.plot(X, y_upper, "o", label="y_upper", color="fuchsia")
|
||||
plt.vlines(
|
||||
X,
|
||||
ymin=replace_inf(y_lower, 0.01),
|
||||
ymax=replace_inf(y_upper, 1000.0),
|
||||
label="Range for y",
|
||||
color="gray",
|
||||
)
|
||||
|
||||
|
||||
# Toy data
|
||||
X = np.array([1, 2, 3, 4, 5]).reshape((-1, 1))
|
||||
@@ -33,11 +43,11 @@ y_upper = np.array([INF, INF, 20, 50, INF])
|
||||
plt.figure(figsize=(5, 4))
|
||||
plot_censored_labels(X, y_lower, y_upper)
|
||||
plt.ylim((6, 200))
|
||||
plt.legend(loc='lower right')
|
||||
plt.title('Toy data')
|
||||
plt.xlabel('Input feature')
|
||||
plt.ylabel('Label')
|
||||
plt.yscale('log')
|
||||
plt.legend(loc="lower right")
|
||||
plt.title("Toy data")
|
||||
plt.xlabel("Input feature")
|
||||
plt.ylabel("Label")
|
||||
plt.yscale("log")
|
||||
plt.tight_layout()
|
||||
plt.show(block=True)
|
||||
|
||||
@@ -46,54 +56,83 @@ grid_pts = np.linspace(0.8, 5.2, 1000).reshape((-1, 1))
|
||||
|
||||
# Train AFT model using XGBoost
|
||||
dmat = xgb.DMatrix(X)
|
||||
dmat.set_float_info('label_lower_bound', y_lower)
|
||||
dmat.set_float_info('label_upper_bound', y_upper)
|
||||
params = {'max_depth': 3, 'objective':'survival:aft', 'min_child_weight': 0}
|
||||
dmat.set_float_info("label_lower_bound", y_lower)
|
||||
dmat.set_float_info("label_upper_bound", y_upper)
|
||||
params = {"max_depth": 3, "objective": "survival:aft", "min_child_weight": 0}
|
||||
|
||||
accuracy_history = []
|
||||
def plot_intermediate_model_callback(env):
|
||||
"""Custom callback to plot intermediate models"""
|
||||
# Compute y_pred = prediction using the intermediate model, at current boosting iteration
|
||||
y_pred = env.model.predict(dmat)
|
||||
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper) includes
|
||||
# the corresponding predicted label (y_pred)
|
||||
acc = np.sum(np.logical_and(y_pred >= y_lower, y_pred <= y_upper)/len(X) * 100)
|
||||
|
||||
|
||||
class PlotIntermediateModel(xgb.callback.TrainingCallback):
|
||||
"""Custom callback to plot intermediate models."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
def after_iteration(
|
||||
self,
|
||||
model: xgb.Booster,
|
||||
epoch: int,
|
||||
evals_log: xgb.callback.TrainingCallback.EvalsLog,
|
||||
) -> bool:
|
||||
"""Run after training is finished."""
|
||||
# Compute y_pred = prediction using the intermediate model, at current boosting
|
||||
# iteration
|
||||
y_pred = model.predict(dmat)
|
||||
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper)
|
||||
# includes the corresponding predicted label (y_pred)
|
||||
acc = np.sum(
|
||||
np.logical_and(y_pred >= y_lower, y_pred <= y_upper) / len(X) * 100
|
||||
)
|
||||
accuracy_history.append(acc)
|
||||
|
||||
# Plot ranged labels as well as predictions by the model
|
||||
plt.subplot(5, 3, env.iteration + 1)
|
||||
plt.subplot(5, 3, epoch + 1)
|
||||
plot_censored_labels(X, y_lower, y_upper)
|
||||
y_pred_grid_pts = env.model.predict(xgb.DMatrix(grid_pts))
|
||||
plt.plot(grid_pts, y_pred_grid_pts, 'r-', label='XGBoost AFT model', linewidth=4)
|
||||
plt.title('Iteration {}'.format(env.iteration), x=0.5, y=0.8)
|
||||
y_pred_grid_pts = model.predict(xgb.DMatrix(grid_pts))
|
||||
plt.plot(
|
||||
grid_pts, y_pred_grid_pts, "r-", label="XGBoost AFT model", linewidth=4
|
||||
)
|
||||
plt.title("Iteration {}".format(epoch), x=0.5, y=0.8)
|
||||
plt.xlim((0.8, 5.2))
|
||||
plt.ylim((1 if np.min(y_pred) < 6 else 6, 200))
|
||||
plt.yscale('log')
|
||||
plt.yscale("log")
|
||||
return False
|
||||
|
||||
res = {}
|
||||
|
||||
res: xgb.callback.TrainingCallback.EvalsLog = {}
|
||||
plt.figure(figsize=(12, 13))
|
||||
bst = xgb.train(params, dmat, 15, [(dmat, 'train')], evals_result=res,
|
||||
callbacks=[plot_intermediate_model_callback])
|
||||
bst = xgb.train(
|
||||
params,
|
||||
dmat,
|
||||
15,
|
||||
[(dmat, "train")],
|
||||
evals_result=res,
|
||||
callbacks=[PlotIntermediateModel()],
|
||||
)
|
||||
plt.tight_layout()
|
||||
plt.legend(loc='lower center', ncol=4,
|
||||
plt.legend(
|
||||
loc="lower center",
|
||||
ncol=4,
|
||||
bbox_to_anchor=(0.5, 0),
|
||||
bbox_transform=plt.gcf().transFigure)
|
||||
bbox_transform=plt.gcf().transFigure,
|
||||
)
|
||||
plt.tight_layout()
|
||||
|
||||
# Plot negative log likelihood over boosting iterations
|
||||
plt.figure(figsize=(8, 3))
|
||||
plt.subplot(1, 2, 1)
|
||||
plt.plot(res['train']['aft-nloglik'], 'b-o', label='aft-nloglik')
|
||||
plt.xlabel('# Boosting Iterations')
|
||||
plt.legend(loc='best')
|
||||
plt.plot(res["train"]["aft-nloglik"], "b-o", label="aft-nloglik")
|
||||
plt.xlabel("# Boosting Iterations")
|
||||
plt.legend(loc="best")
|
||||
|
||||
# Plot "accuracy" over boosting iterations
|
||||
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper) includes
|
||||
# the corresponding predicted label (y_pred)
|
||||
plt.subplot(1, 2, 2)
|
||||
plt.plot(accuracy_history, 'r-o', label='Accuracy (%)')
|
||||
plt.xlabel('# Boosting Iterations')
|
||||
plt.legend(loc='best')
|
||||
plt.plot(accuracy_history, "r-o", label="Accuracy (%)")
|
||||
plt.xlabel("# Boosting Iterations")
|
||||
plt.legend(loc="best")
|
||||
plt.tight_layout()
|
||||
|
||||
plt.show()
|
||||
|
||||
@@ -53,15 +53,7 @@ int main() {
|
||||
// configure the training
|
||||
// available parameters are described here:
|
||||
// https://xgboost.readthedocs.io/en/latest/parameter.html
|
||||
safe_xgboost(XGBoosterSetParam(booster, "tree_method", use_gpu ? "gpu_hist" : "hist"));
|
||||
if (use_gpu) {
|
||||
// set the GPU to use;
|
||||
// this is not necessary, but provided here as an illustration
|
||||
safe_xgboost(XGBoosterSetParam(booster, "gpu_id", "0"));
|
||||
} else {
|
||||
// avoid evaluating objective and metric on a GPU
|
||||
safe_xgboost(XGBoosterSetParam(booster, "gpu_id", "-1"));
|
||||
}
|
||||
safe_xgboost(XGBoosterSetParam(booster, "device", use_gpu ? "cuda" : "cpu"));
|
||||
|
||||
safe_xgboost(XGBoosterSetParam(booster, "objective", "binary:logistic"));
|
||||
safe_xgboost(XGBoosterSetParam(booster, "min_child_weight", "1"));
|
||||
|
||||
@@ -18,43 +18,45 @@ def main(client):
|
||||
# The Veterans' Administration Lung Cancer Trial
|
||||
# The Statistical Analysis of Failure Time Data by Kalbfleisch J. and Prentice R (1980)
|
||||
CURRENT_DIR = os.path.dirname(__file__)
|
||||
df = dd.read_csv(os.path.join(CURRENT_DIR, os.pardir, 'data', 'veterans_lung_cancer.csv'))
|
||||
df = dd.read_csv(
|
||||
os.path.join(CURRENT_DIR, os.pardir, "data", "veterans_lung_cancer.csv")
|
||||
)
|
||||
|
||||
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
|
||||
# DMatrix scatter around workers.
|
||||
# For AFT survival, you'd need to extract the lower and upper bounds for the label
|
||||
# and pass them as arguments to DaskDMatrix.
|
||||
y_lower_bound = df['Survival_label_lower_bound']
|
||||
y_upper_bound = df['Survival_label_upper_bound']
|
||||
X = df.drop(['Survival_label_lower_bound',
|
||||
'Survival_label_upper_bound'], axis=1)
|
||||
dtrain = DaskDMatrix(client, X, label_lower_bound=y_lower_bound,
|
||||
label_upper_bound=y_upper_bound)
|
||||
y_lower_bound = df["Survival_label_lower_bound"]
|
||||
y_upper_bound = df["Survival_label_upper_bound"]
|
||||
X = df.drop(["Survival_label_lower_bound", "Survival_label_upper_bound"], axis=1)
|
||||
dtrain = DaskDMatrix(
|
||||
client, X, label_lower_bound=y_lower_bound, label_upper_bound=y_upper_bound
|
||||
)
|
||||
|
||||
# Use train method from xgboost.dask instead of xgboost. This
|
||||
# distributed version of train returns a dictionary containing the
|
||||
# resulting booster and evaluation history obtained from
|
||||
# evaluation metrics.
|
||||
params = {'verbosity': 1,
|
||||
'objective': 'survival:aft',
|
||||
'eval_metric': 'aft-nloglik',
|
||||
'learning_rate': 0.05,
|
||||
'aft_loss_distribution_scale': 1.20,
|
||||
'aft_loss_distribution': 'normal',
|
||||
'max_depth': 6,
|
||||
'lambda': 0.01,
|
||||
'alpha': 0.02}
|
||||
output = xgb.dask.train(client,
|
||||
params,
|
||||
dtrain,
|
||||
num_boost_round=100,
|
||||
evals=[(dtrain, 'train')])
|
||||
bst = output['booster']
|
||||
history = output['history']
|
||||
params = {
|
||||
"verbosity": 1,
|
||||
"objective": "survival:aft",
|
||||
"eval_metric": "aft-nloglik",
|
||||
"learning_rate": 0.05,
|
||||
"aft_loss_distribution_scale": 1.20,
|
||||
"aft_loss_distribution": "normal",
|
||||
"max_depth": 6,
|
||||
"lambda": 0.01,
|
||||
"alpha": 0.02,
|
||||
}
|
||||
output = xgb.dask.train(
|
||||
client, params, dtrain, num_boost_round=100, evals=[(dtrain, "train")]
|
||||
)
|
||||
bst = output["booster"]
|
||||
history = output["history"]
|
||||
|
||||
# you can pass output directly into `predict` too.
|
||||
prediction = xgb.dask.predict(client, bst, dtrain)
|
||||
print('Evaluation history: ', history)
|
||||
print("Evaluation history: ", history)
|
||||
|
||||
# Uncomment the following line to save the model to the disk
|
||||
# bst.save_model('survival_model.json')
|
||||
@@ -62,7 +64,7 @@ def main(client):
|
||||
return prediction
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
# or use other clusters for scaling
|
||||
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
|
||||
with Client(cluster) as client:
|
||||
|
||||
@@ -25,21 +25,23 @@ def main(client):
|
||||
# distributed version of train returns a dictionary containing the
|
||||
# resulting booster and evaluation history obtained from
|
||||
# evaluation metrics.
|
||||
output = xgb.dask.train(client,
|
||||
{'verbosity': 1,
|
||||
'tree_method': 'hist'},
|
||||
output = xgb.dask.train(
|
||||
client,
|
||||
{"verbosity": 1, "tree_method": "hist"},
|
||||
dtrain,
|
||||
num_boost_round=4, evals=[(dtrain, 'train')])
|
||||
bst = output['booster']
|
||||
history = output['history']
|
||||
num_boost_round=4,
|
||||
evals=[(dtrain, "train")],
|
||||
)
|
||||
bst = output["booster"]
|
||||
history = output["history"]
|
||||
|
||||
# you can pass output directly into `predict` too.
|
||||
prediction = xgb.dask.predict(client, bst, dtrain)
|
||||
print('Evaluation history:', history)
|
||||
print("Evaluation history:", history)
|
||||
return prediction
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
# or use other clusters for scaling
|
||||
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
|
||||
with Client(cluster) as client:
|
||||
|
||||
@@ -13,33 +13,38 @@ from xgboost import dask as dxgb
|
||||
from xgboost.dask import DaskDMatrix
|
||||
|
||||
|
||||
def using_dask_matrix(client: Client, X, y):
|
||||
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
|
||||
# DMatrix scatter around workers.
|
||||
def using_dask_matrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
|
||||
# DaskDMatrix acts like normal DMatrix, works as a proxy for local DMatrix scatter
|
||||
# around workers.
|
||||
dtrain = DaskDMatrix(client, X, y)
|
||||
|
||||
# Use train method from xgboost.dask instead of xgboost. This
|
||||
# distributed version of train returns a dictionary containing the
|
||||
# resulting booster and evaluation history obtained from
|
||||
# evaluation metrics.
|
||||
output = xgb.dask.train(client,
|
||||
{'verbosity': 2,
|
||||
# Use train method from xgboost.dask instead of xgboost. This distributed version
|
||||
# of train returns a dictionary containing the resulting booster and evaluation
|
||||
# history obtained from evaluation metrics.
|
||||
output = xgb.dask.train(
|
||||
client,
|
||||
{
|
||||
"verbosity": 2,
|
||||
"tree_method": "hist",
|
||||
# Golden line for GPU training
|
||||
'tree_method': 'gpu_hist'},
|
||||
"device": "cuda",
|
||||
},
|
||||
dtrain,
|
||||
num_boost_round=4, evals=[(dtrain, 'train')])
|
||||
bst = output['booster']
|
||||
history = output['history']
|
||||
num_boost_round=4,
|
||||
evals=[(dtrain, "train")],
|
||||
)
|
||||
bst = output["booster"]
|
||||
history = output["history"]
|
||||
|
||||
# you can pass output directly into `predict` too.
|
||||
prediction = xgb.dask.predict(client, bst, dtrain)
|
||||
print('Evaluation history:', history)
|
||||
print("Evaluation history:", history)
|
||||
return prediction
|
||||
|
||||
|
||||
def using_quantile_device_dmatrix(client: Client, X, y):
|
||||
"""`DaskQuantileDMatrix` is a data type specialized for `gpu_hist` and `hist` tree
|
||||
methods for reducing memory usage.
|
||||
def using_quantile_device_dmatrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
|
||||
"""`DaskQuantileDMatrix` is a data type specialized for `hist` tree methods for
|
||||
reducing memory usage.
|
||||
|
||||
.. versionadded:: 1.2.0
|
||||
|
||||
@@ -52,17 +57,19 @@ def using_quantile_device_dmatrix(client: Client, X, y):
|
||||
# the `ref` argument of `DaskQuantileDMatrix`.
|
||||
dtrain = dxgb.DaskQuantileDMatrix(client, X, y)
|
||||
output = xgb.dask.train(
|
||||
client, {"verbosity": 2, "tree_method": "gpu_hist"}, dtrain, num_boost_round=4
|
||||
client,
|
||||
{"verbosity": 2, "tree_method": "hist", "device": "cuda"},
|
||||
dtrain,
|
||||
num_boost_round=4,
|
||||
)
|
||||
|
||||
prediction = xgb.dask.predict(client, output, X)
|
||||
return prediction
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
# `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here
|
||||
# `n_workers` represents the number of GPUs since we use one GPU per worker
|
||||
# process.
|
||||
# `n_workers` represents the number of GPUs since we use one GPU per worker process.
|
||||
with LocalCUDACluster(n_workers=2, threads_per_worker=4) as cluster:
|
||||
with Client(cluster) as client:
|
||||
# generate some random data for demonstration
|
||||
@@ -71,7 +78,7 @@ if __name__ == '__main__':
|
||||
X = da.random.random(size=(m, n), chunks=10000)
|
||||
y = da.random.random(size=(m,), chunks=10000)
|
||||
|
||||
print('Using DaskQuantileDMatrix')
|
||||
print("Using DaskQuantileDMatrix")
|
||||
from_ddqdm = using_quantile_device_dmatrix(client, X, y)
|
||||
print('Using DMatrix')
|
||||
print("Using DMatrix")
|
||||
from_dmatrix = using_dask_matrix(client, X, y)
|
||||
|
||||
@@ -21,7 +21,8 @@ def main(client):
|
||||
y = da.random.random(m, partition_size)
|
||||
|
||||
regressor = xgboost.dask.DaskXGBRegressor(verbosity=1)
|
||||
regressor.set_params(tree_method='gpu_hist')
|
||||
# set the device to CUDA
|
||||
regressor.set_params(tree_method="hist", device="cuda")
|
||||
# assigning client here is optional
|
||||
regressor.client = client
|
||||
|
||||
@@ -31,13 +32,13 @@ def main(client):
|
||||
bst = regressor.get_booster()
|
||||
history = regressor.evals_result()
|
||||
|
||||
print('Evaluation history:', history)
|
||||
print("Evaluation history:", history)
|
||||
# returned prediction is always a dask array.
|
||||
assert isinstance(prediction, da.Array)
|
||||
return bst # returning the trained model
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
# With dask cuda, one can scale up XGBoost to arbitrary GPU clusters.
|
||||
# `LocalCUDACluster` used here is only for demonstration purpose.
|
||||
with LocalCUDACluster() as cluster:
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
# GPU Acceleration Demo
|
||||
|
||||
`cover_type.py` shows how to train a model on the [forest cover type](https://archive.ics.uci.edu/ml/datasets/covertype) dataset using GPU acceleration. The forest cover type dataset has 581,012 rows and 54 features, making it time consuming to process. We compare the run-time and accuracy of the GPU and CPU histogram algorithms.
|
||||
|
||||
`shap.ipynb` demonstrates using GPU acceleration to compute SHAP values for feature importance.
|
||||
8
demo/gpu_acceleration/README.rst
Normal file
8
demo/gpu_acceleration/README.rst
Normal file
@@ -0,0 +1,8 @@
|
||||
:orphan:
|
||||
|
||||
GPU Acceleration Demo
|
||||
=====================
|
||||
|
||||
This is a collection of demonstration scripts to showcase the basic usage of GPU. Please
|
||||
see :doc:`/gpu/index` for more info. There are other demonstrations for distributed GPU
|
||||
training using dask or spark.
|
||||
@@ -1,41 +1,49 @@
|
||||
"""
|
||||
Using xgboost on GPU devices
|
||||
============================
|
||||
|
||||
Shows how to train a model on the `forest cover type
|
||||
<https://archive.ics.uci.edu/ml/datasets/covertype>`_ dataset using GPU
|
||||
acceleration. The forest cover type dataset has 581,012 rows and 54 features, making it
|
||||
time consuming to process. We compare the run-time and accuracy of the GPU and CPU
|
||||
histogram algorithms.
|
||||
|
||||
In addition, The demo showcases using GPU with other GPU-related libraries including
|
||||
cupy and cuml. These libraries are not strictly required.
|
||||
|
||||
"""
|
||||
import time
|
||||
|
||||
import cupy as cp
|
||||
from cuml.model_selection import train_test_split
|
||||
from sklearn.datasets import fetch_covtype
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
import xgboost as xgb
|
||||
|
||||
# Fetch dataset using sklearn
|
||||
cov = fetch_covtype()
|
||||
X = cov.data
|
||||
y = cov.target
|
||||
X, y = fetch_covtype(return_X_y=True)
|
||||
X = cp.array(X)
|
||||
y = cp.array(y)
|
||||
y -= y.min()
|
||||
|
||||
# Create 0.75/0.25 train/test split
|
||||
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, train_size=0.75,
|
||||
random_state=42)
|
||||
X_train, X_test, y_train, y_test = train_test_split(
|
||||
X, y, test_size=0.25, train_size=0.75, random_state=42
|
||||
)
|
||||
|
||||
# Specify sufficient boosting iterations to reach a minimum
|
||||
num_round = 3000
|
||||
|
||||
# Leave most parameters as default
|
||||
param = {'objective': 'multi:softmax', # Specify multiclass classification
|
||||
'num_class': 8, # Number of possible output classes
|
||||
'tree_method': 'gpu_hist' # Use GPU accelerated algorithm
|
||||
}
|
||||
|
||||
# Convert input data from numpy to XGBoost format
|
||||
dtrain = xgb.DMatrix(X_train, label=y_train)
|
||||
dtest = xgb.DMatrix(X_test, label=y_test)
|
||||
|
||||
gpu_res = {} # Store accuracy result
|
||||
tmp = time.time()
|
||||
clf = xgb.XGBClassifier(device="cuda", n_estimators=num_round)
|
||||
# Train model
|
||||
xgb.train(param, dtrain, num_round, evals=[(dtest, 'test')], evals_result=gpu_res)
|
||||
print("GPU Training Time: %s seconds" % (str(time.time() - tmp)))
|
||||
start = time.time()
|
||||
clf.fit(X_train, y_train, eval_set=[(X_test, y_test)])
|
||||
gpu_res = clf.evals_result()
|
||||
print("GPU Training Time: %s seconds" % (str(time.time() - start)))
|
||||
|
||||
# Repeat for CPU algorithm
|
||||
tmp = time.time()
|
||||
param['tree_method'] = 'hist'
|
||||
cpu_res = {}
|
||||
xgb.train(param, dtrain, num_round, evals=[(dtest, 'test')], evals_result=cpu_res)
|
||||
print("CPU Training Time: %s seconds" % (str(time.time() - tmp)))
|
||||
clf = xgb.XGBClassifier(device="cpu", n_estimators=num_round)
|
||||
start = time.time()
|
||||
cpu_res = clf.evals_result()
|
||||
print("CPU Training Time: %s seconds" % (str(time.time() - start)))
|
||||
|
||||
File diff suppressed because one or more lines are too long
55
demo/gpu_acceleration/tree_shap.py
Normal file
55
demo/gpu_acceleration/tree_shap.py
Normal file
@@ -0,0 +1,55 @@
|
||||
"""
|
||||
Use GPU to speedup SHAP value computation
|
||||
=========================================
|
||||
|
||||
Demonstrates using GPU acceleration to compute SHAP values for feature importance.
|
||||
|
||||
"""
|
||||
import shap
|
||||
from sklearn.datasets import fetch_california_housing
|
||||
|
||||
import xgboost as xgb
|
||||
|
||||
# Fetch dataset using sklearn
|
||||
data = fetch_california_housing()
|
||||
print(data.DESCR)
|
||||
X = data.data
|
||||
y = data.target
|
||||
|
||||
num_round = 500
|
||||
|
||||
param = {
|
||||
"eta": 0.05,
|
||||
"max_depth": 10,
|
||||
"tree_method": "hist",
|
||||
"device": "cuda",
|
||||
}
|
||||
|
||||
# GPU accelerated training
|
||||
dtrain = xgb.DMatrix(X, label=y, feature_names=data.feature_names)
|
||||
model = xgb.train(param, dtrain, num_round)
|
||||
|
||||
# Compute shap values using GPU with xgboost
|
||||
model.set_param({"device": "cuda"})
|
||||
shap_values = model.predict(dtrain, pred_contribs=True)
|
||||
|
||||
# Compute shap interaction values using GPU
|
||||
shap_interaction_values = model.predict(dtrain, pred_interactions=True)
|
||||
|
||||
|
||||
# shap will call the GPU accelerated version as long as the device parameter is set to
|
||||
# "cuda"
|
||||
explainer = shap.TreeExplainer(model)
|
||||
shap_values = explainer.shap_values(X)
|
||||
|
||||
# visualize the first prediction's explanation
|
||||
shap.force_plot(
|
||||
explainer.expected_value,
|
||||
shap_values[0, :],
|
||||
X[0, :],
|
||||
feature_names=data.feature_names,
|
||||
matplotlib=True,
|
||||
)
|
||||
|
||||
# Show a summary of feature importance
|
||||
shap.summary_plot(shap_values, X, plot_type="bar", feature_names=data.feature_names)
|
||||
@@ -1,9 +1,9 @@
|
||||
'''
|
||||
"""
|
||||
Demo for using and defining callback functions
|
||||
==============================================
|
||||
|
||||
.. versionadded:: 1.3.0
|
||||
'''
|
||||
"""
|
||||
import argparse
|
||||
import os
|
||||
import tempfile
|
||||
@@ -17,10 +17,11 @@ import xgboost as xgb
|
||||
|
||||
|
||||
class Plotting(xgb.callback.TrainingCallback):
|
||||
'''Plot evaluation result during training. Only for demonstration purpose as it's quite
|
||||
"""Plot evaluation result during training. Only for demonstration purpose as it's quite
|
||||
slow to draw.
|
||||
|
||||
'''
|
||||
"""
|
||||
|
||||
def __init__(self, rounds):
|
||||
self.fig = plt.figure()
|
||||
self.ax = self.fig.add_subplot(111)
|
||||
@@ -31,16 +32,16 @@ class Plotting(xgb.callback.TrainingCallback):
|
||||
plt.ion()
|
||||
|
||||
def _get_key(self, data, metric):
|
||||
return f'{data}-{metric}'
|
||||
return f"{data}-{metric}"
|
||||
|
||||
def after_iteration(self, model, epoch, evals_log):
|
||||
'''Update the plot.'''
|
||||
"""Update the plot."""
|
||||
if not self.lines:
|
||||
for data, metric in evals_log.items():
|
||||
for metric_name, log in metric.items():
|
||||
key = self._get_key(data, metric_name)
|
||||
expanded = log + [0] * (self.rounds - len(log))
|
||||
self.lines[key], = self.ax.plot(self.x, expanded, label=key)
|
||||
(self.lines[key],) = self.ax.plot(self.x, expanded, label=key)
|
||||
self.ax.legend()
|
||||
else:
|
||||
# https://pythonspot.com/matplotlib-update-plot/
|
||||
@@ -55,8 +56,8 @@ class Plotting(xgb.callback.TrainingCallback):
|
||||
|
||||
|
||||
def custom_callback():
|
||||
'''Demo for defining a custom callback function that plots evaluation result during
|
||||
training.'''
|
||||
"""Demo for defining a custom callback function that plots evaluation result during
|
||||
training."""
|
||||
X, y = load_breast_cancer(return_X_y=True)
|
||||
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
|
||||
|
||||
@@ -69,14 +70,16 @@ def custom_callback():
|
||||
# Pass it to the `callbacks` parameter as a list.
|
||||
xgb.train(
|
||||
{
|
||||
'objective': 'binary:logistic',
|
||||
'eval_metric': ['error', 'rmse'],
|
||||
'tree_method': 'gpu_hist'
|
||||
"objective": "binary:logistic",
|
||||
"eval_metric": ["error", "rmse"],
|
||||
"tree_method": "hist",
|
||||
"device": "cuda",
|
||||
},
|
||||
D_train,
|
||||
evals=[(D_train, 'Train'), (D_valid, 'Valid')],
|
||||
evals=[(D_train, "Train"), (D_valid, "Valid")],
|
||||
num_boost_round=num_boost_round,
|
||||
callbacks=[plotting])
|
||||
callbacks=[plotting],
|
||||
)
|
||||
|
||||
|
||||
def check_point_callback():
|
||||
@@ -89,10 +92,10 @@ def check_point_callback():
|
||||
if i == 0:
|
||||
continue
|
||||
if as_pickle:
|
||||
path = os.path.join(tmpdir, 'model_' + str(i) + '.pkl')
|
||||
path = os.path.join(tmpdir, "model_" + str(i) + ".pkl")
|
||||
else:
|
||||
path = os.path.join(tmpdir, 'model_' + str(i) + '.json')
|
||||
assert(os.path.exists(path))
|
||||
path = os.path.join(tmpdir, "model_" + str(i) + ".json")
|
||||
assert os.path.exists(path)
|
||||
|
||||
X, y = load_breast_cancer(return_X_y=True)
|
||||
m = xgb.DMatrix(X, y)
|
||||
@@ -100,31 +103,36 @@ def check_point_callback():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
# Use callback class from xgboost.callback
|
||||
# Feel free to subclass/customize it to suit your need.
|
||||
check_point = xgb.callback.TrainingCheckPoint(directory=tmpdir,
|
||||
iterations=rounds,
|
||||
name='model')
|
||||
xgb.train({'objective': 'binary:logistic'}, m,
|
||||
check_point = xgb.callback.TrainingCheckPoint(
|
||||
directory=tmpdir, iterations=rounds, name="model"
|
||||
)
|
||||
xgb.train(
|
||||
{"objective": "binary:logistic"},
|
||||
m,
|
||||
num_boost_round=10,
|
||||
verbose_eval=False,
|
||||
callbacks=[check_point])
|
||||
callbacks=[check_point],
|
||||
)
|
||||
check(False)
|
||||
|
||||
# This version of checkpoint saves everything including parameters and
|
||||
# model. See: doc/tutorials/saving_model.rst
|
||||
check_point = xgb.callback.TrainingCheckPoint(directory=tmpdir,
|
||||
iterations=rounds,
|
||||
as_pickle=True,
|
||||
name='model')
|
||||
xgb.train({'objective': 'binary:logistic'}, m,
|
||||
check_point = xgb.callback.TrainingCheckPoint(
|
||||
directory=tmpdir, iterations=rounds, as_pickle=True, name="model"
|
||||
)
|
||||
xgb.train(
|
||||
{"objective": "binary:logistic"},
|
||||
m,
|
||||
num_boost_round=10,
|
||||
verbose_eval=False,
|
||||
callbacks=[check_point])
|
||||
callbacks=[check_point],
|
||||
)
|
||||
check(True)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--plot', default=1, type=int)
|
||||
parser.add_argument("--plot", default=1, type=int)
|
||||
args = parser.parse_args()
|
||||
|
||||
check_point_callback()
|
||||
|
||||
@@ -63,7 +63,8 @@ def load_cat_in_the_dat() -> tuple[pd.DataFrame, pd.Series]:
|
||||
|
||||
|
||||
params = {
|
||||
"tree_method": "gpu_hist",
|
||||
"tree_method": "hist",
|
||||
"device": "cuda",
|
||||
"n_estimators": 32,
|
||||
"colsample_bylevel": 0.7,
|
||||
}
|
||||
|
||||
@@ -58,13 +58,13 @@ def main() -> None:
|
||||
# Specify `enable_categorical` to True, also we use onehot encoding based split
|
||||
# here for demonstration. For details see the document of `max_cat_to_onehot`.
|
||||
reg = xgb.XGBRegressor(
|
||||
tree_method="gpu_hist", enable_categorical=True, max_cat_to_onehot=5
|
||||
tree_method="hist", enable_categorical=True, max_cat_to_onehot=5, device="cuda"
|
||||
)
|
||||
reg.fit(X, y, eval_set=[(X, y)])
|
||||
|
||||
# Pass in already encoded data
|
||||
X_enc, y_enc = make_categorical(100, 10, 4, True)
|
||||
reg_enc = xgb.XGBRegressor(tree_method="gpu_hist")
|
||||
reg_enc = xgb.XGBRegressor(tree_method="hist", device="cuda")
|
||||
reg_enc.fit(X_enc, y_enc, eval_set=[(X_enc, y_enc)])
|
||||
|
||||
reg_results = np.array(reg.evals_result()["validation_0"]["rmse"])
|
||||
|
||||
@@ -22,7 +22,10 @@ import xgboost
|
||||
|
||||
|
||||
def make_batches(
|
||||
n_samples_per_batch: int, n_features: int, n_batches: int, tmpdir: str,
|
||||
n_samples_per_batch: int,
|
||||
n_features: int,
|
||||
n_batches: int,
|
||||
tmpdir: str,
|
||||
) -> List[Tuple[str, str]]:
|
||||
files: List[Tuple[str, str]] = []
|
||||
rng = np.random.RandomState(1994)
|
||||
@@ -38,6 +41,7 @@ def make_batches(
|
||||
|
||||
class Iterator(xgboost.DataIter):
|
||||
"""A custom iterator for loading files in batches."""
|
||||
|
||||
def __init__(self, file_paths: List[Tuple[str, str]]):
|
||||
self._file_paths = file_paths
|
||||
self._it = 0
|
||||
@@ -82,10 +86,11 @@ def main(tmpdir: str) -> xgboost.Booster:
|
||||
missing = np.NaN
|
||||
Xy = xgboost.DMatrix(it, missing=missing, enable_categorical=False)
|
||||
|
||||
# Other tree methods including ``hist`` and ``gpu_hist`` also work, see tutorial in
|
||||
# ``approx`` is also supported, but less efficient due to sketching. GPU behaves
|
||||
# differently than CPU tree methods as it uses a hybrid approach. See tutorial in
|
||||
# doc for details.
|
||||
booster = xgboost.train(
|
||||
{"tree_method": "approx", "max_depth": 2},
|
||||
{"tree_method": "hist", "max_depth": 4},
|
||||
Xy,
|
||||
evals=[(Xy, "Train")],
|
||||
num_boost_round=10,
|
||||
|
||||
214
demo/guide-python/learning_to_rank.py
Normal file
214
demo/guide-python/learning_to_rank.py
Normal file
@@ -0,0 +1,214 @@
|
||||
"""
|
||||
Getting started with learning to rank
|
||||
=====================================
|
||||
|
||||
.. versionadded:: 2.0.0
|
||||
|
||||
This is a demonstration of using XGBoost for learning to rank tasks using the
|
||||
MSLR_10k_letor dataset. For more infomation about the dataset, please visit its
|
||||
`description page <https://www.microsoft.com/en-us/research/project/mslr/>`_.
|
||||
|
||||
This is a two-part demo, the first one contains a basic example of using XGBoost to
|
||||
train on relevance degree, and the second part simulates click data and enable the
|
||||
position debiasing training.
|
||||
|
||||
For an overview of learning to rank in XGBoost, please see
|
||||
:doc:`Learning to Rank </tutorials/learning_to_rank>`.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import pickle as pkl
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from sklearn.datasets import load_svmlight_file
|
||||
|
||||
import xgboost as xgb
|
||||
from xgboost.testing.data import RelDataCV, simulate_clicks, sort_ltr_samples
|
||||
|
||||
|
||||
def load_mlsr_10k(data_path: str, cache_path: str) -> RelDataCV:
|
||||
"""Load the MSLR10k dataset from data_path and cache a pickle object in cache_path.
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
A list of tuples [(X, y, qid), ...].
|
||||
|
||||
"""
|
||||
root_path = os.path.expanduser(args.data)
|
||||
cacheroot_path = os.path.expanduser(args.cache)
|
||||
cache_path = os.path.join(cacheroot_path, "MSLR_10K_LETOR.pkl")
|
||||
|
||||
# Use only the Fold1 for demo:
|
||||
# Train, Valid, Test
|
||||
# {S1,S2,S3}, S4, S5
|
||||
fold = 1
|
||||
|
||||
if not os.path.exists(cache_path):
|
||||
fold_path = os.path.join(root_path, f"Fold{fold}")
|
||||
train_path = os.path.join(fold_path, "train.txt")
|
||||
valid_path = os.path.join(fold_path, "vali.txt")
|
||||
test_path = os.path.join(fold_path, "test.txt")
|
||||
X_train, y_train, qid_train = load_svmlight_file(
|
||||
train_path, query_id=True, dtype=np.float32
|
||||
)
|
||||
y_train = y_train.astype(np.int32)
|
||||
qid_train = qid_train.astype(np.int32)
|
||||
|
||||
X_valid, y_valid, qid_valid = load_svmlight_file(
|
||||
valid_path, query_id=True, dtype=np.float32
|
||||
)
|
||||
y_valid = y_valid.astype(np.int32)
|
||||
qid_valid = qid_valid.astype(np.int32)
|
||||
|
||||
X_test, y_test, qid_test = load_svmlight_file(
|
||||
test_path, query_id=True, dtype=np.float32
|
||||
)
|
||||
y_test = y_test.astype(np.int32)
|
||||
qid_test = qid_test.astype(np.int32)
|
||||
|
||||
data = RelDataCV(
|
||||
train=(X_train, y_train, qid_train),
|
||||
test=(X_test, y_test, qid_test),
|
||||
max_rel=4,
|
||||
)
|
||||
|
||||
with open(cache_path, "wb") as fd:
|
||||
pkl.dump(data, fd)
|
||||
|
||||
with open(cache_path, "rb") as fd:
|
||||
data = pkl.load(fd)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def ranking_demo(args: argparse.Namespace) -> None:
|
||||
"""Demonstration for learning to rank with relevance degree."""
|
||||
data = load_mlsr_10k(args.data, args.cache)
|
||||
|
||||
# Sort data according to query index
|
||||
X_train, y_train, qid_train = data.train
|
||||
sorted_idx = np.argsort(qid_train)
|
||||
X_train = X_train[sorted_idx]
|
||||
y_train = y_train[sorted_idx]
|
||||
qid_train = qid_train[sorted_idx]
|
||||
|
||||
X_test, y_test, qid_test = data.test
|
||||
sorted_idx = np.argsort(qid_test)
|
||||
X_test = X_test[sorted_idx]
|
||||
y_test = y_test[sorted_idx]
|
||||
qid_test = qid_test[sorted_idx]
|
||||
|
||||
ranker = xgb.XGBRanker(
|
||||
tree_method="hist",
|
||||
device="cuda",
|
||||
lambdarank_pair_method="topk",
|
||||
lambdarank_num_pair_per_sample=13,
|
||||
eval_metric=["ndcg@1", "ndcg@8"],
|
||||
)
|
||||
ranker.fit(
|
||||
X_train,
|
||||
y_train,
|
||||
qid=qid_train,
|
||||
eval_set=[(X_test, y_test)],
|
||||
eval_qid=[qid_test],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
|
||||
def click_data_demo(args: argparse.Namespace) -> None:
|
||||
"""Demonstration for learning to rank with click data."""
|
||||
data = load_mlsr_10k(args.data, args.cache)
|
||||
train, test = simulate_clicks(data)
|
||||
assert test is not None
|
||||
|
||||
assert train.X.shape[0] == train.click.size
|
||||
assert test.X.shape[0] == test.click.size
|
||||
assert test.score.dtype == np.float32
|
||||
assert test.click.dtype == np.int32
|
||||
|
||||
X_train, clicks_train, y_train, qid_train = sort_ltr_samples(
|
||||
train.X,
|
||||
train.y,
|
||||
train.qid,
|
||||
train.click,
|
||||
train.pos,
|
||||
)
|
||||
X_test, clicks_test, y_test, qid_test = sort_ltr_samples(
|
||||
test.X,
|
||||
test.y,
|
||||
test.qid,
|
||||
test.click,
|
||||
test.pos,
|
||||
)
|
||||
|
||||
class ShowPosition(xgb.callback.TrainingCallback):
|
||||
def after_iteration(
|
||||
self,
|
||||
model: xgb.Booster,
|
||||
epoch: int,
|
||||
evals_log: xgb.callback.TrainingCallback.EvalsLog,
|
||||
) -> bool:
|
||||
config = json.loads(model.save_config())
|
||||
ti_plus = np.array(config["learner"]["objective"]["ti+"])
|
||||
tj_minus = np.array(config["learner"]["objective"]["tj-"])
|
||||
df = pd.DataFrame({"ti+": ti_plus, "tj-": tj_minus})
|
||||
print(df)
|
||||
return False
|
||||
|
||||
ranker = xgb.XGBRanker(
|
||||
n_estimators=512,
|
||||
tree_method="hist",
|
||||
device="cuda",
|
||||
learning_rate=0.01,
|
||||
reg_lambda=1.5,
|
||||
subsample=0.8,
|
||||
sampling_method="gradient_based",
|
||||
# LTR specific parameters
|
||||
objective="rank:ndcg",
|
||||
# - Enable bias estimation
|
||||
lambdarank_unbiased=True,
|
||||
# - normalization (1 / (norm + 1))
|
||||
lambdarank_bias_norm=1,
|
||||
# - Focus on the top 12 documents
|
||||
lambdarank_num_pair_per_sample=12,
|
||||
lambdarank_pair_method="topk",
|
||||
ndcg_exp_gain=True,
|
||||
eval_metric=["ndcg@1", "ndcg@3", "ndcg@5", "ndcg@10"],
|
||||
callbacks=[ShowPosition()],
|
||||
)
|
||||
ranker.fit(
|
||||
X_train,
|
||||
clicks_train,
|
||||
qid=qid_train,
|
||||
eval_set=[(X_test, y_test), (X_test, clicks_test)],
|
||||
eval_qid=[qid_test, qid_test],
|
||||
verbose=True,
|
||||
)
|
||||
ranker.predict(X_test)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Demonstration of learning to rank using XGBoost."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--data",
|
||||
type=str,
|
||||
help="Root directory of the MSLR-WEB10K data.",
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--cache",
|
||||
type=str,
|
||||
help="Directory for caching processed data.",
|
||||
required=True,
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
ranking_demo(args)
|
||||
click_data_demo(args)
|
||||
@@ -28,17 +28,18 @@ BATCHES = 32
|
||||
|
||||
|
||||
class IterForDMatrixDemo(xgboost.core.DataIter):
|
||||
'''A data iterator for XGBoost DMatrix.
|
||||
"""A data iterator for XGBoost DMatrix.
|
||||
|
||||
`reset` and `next` are required for any data iterator, other functions here
|
||||
are utilites for demonstration's purpose.
|
||||
|
||||
'''
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
'''Generate some random data for demostration.
|
||||
"""Generate some random data for demostration.
|
||||
|
||||
Actual data can be anything that is currently supported by XGBoost.
|
||||
'''
|
||||
"""
|
||||
self.rows = ROWS_PER_BATCH
|
||||
self.cols = COLS
|
||||
rng = cupy.random.RandomState(1994)
|
||||
@@ -59,27 +60,26 @@ class IterForDMatrixDemo(xgboost.core.DataIter):
|
||||
return cupy.concatenate(self._weights)
|
||||
|
||||
def data(self):
|
||||
'''Utility function for obtaining current batch of data.'''
|
||||
"""Utility function for obtaining current batch of data."""
|
||||
return self._data[self.it]
|
||||
|
||||
def labels(self):
|
||||
'''Utility function for obtaining current batch of label.'''
|
||||
"""Utility function for obtaining current batch of label."""
|
||||
return self._labels[self.it]
|
||||
|
||||
def weights(self):
|
||||
return self._weights[self.it]
|
||||
|
||||
def reset(self):
|
||||
'''Reset the iterator'''
|
||||
"""Reset the iterator"""
|
||||
self.it = 0
|
||||
|
||||
def next(self, input_data):
|
||||
'''Yield next batch of data.'''
|
||||
"""Yield next batch of data."""
|
||||
if self.it == len(self._data):
|
||||
# Return 0 when there's no more batch.
|
||||
return 0
|
||||
input_data(data=self.data(), label=self.labels(),
|
||||
weight=self.weights())
|
||||
input_data(data=self.data(), label=self.labels(), weight=self.weights())
|
||||
self.it += 1
|
||||
return 1
|
||||
|
||||
@@ -103,18 +103,19 @@ def main():
|
||||
|
||||
assert m_with_it.num_col() == m.num_col()
|
||||
assert m_with_it.num_row() == m.num_row()
|
||||
# Tree meethod must be one of the `hist` or `gpu_hist`. We use `gpu_hist` for GPU
|
||||
# input here.
|
||||
# Tree meethod must be `hist`.
|
||||
reg_with_it = xgboost.train(
|
||||
{"tree_method": "gpu_hist"}, m_with_it, num_boost_round=rounds
|
||||
{"tree_method": "hist", "device": "cuda"}, m_with_it, num_boost_round=rounds
|
||||
)
|
||||
predict_with_it = reg_with_it.predict(m_with_it)
|
||||
|
||||
reg = xgboost.train({"tree_method": "gpu_hist"}, m, num_boost_round=rounds)
|
||||
reg = xgboost.train(
|
||||
{"tree_method": "hist", "device": "cuda"}, m, num_boost_round=rounds
|
||||
)
|
||||
predict = reg.predict(m)
|
||||
|
||||
numpy.testing.assert_allclose(predict_with_it, predict, rtol=1e6)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -7,6 +7,11 @@ Quantile Regression
|
||||
The script is inspired by this awesome example in sklearn:
|
||||
https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html
|
||||
|
||||
.. note::
|
||||
|
||||
The feature is only supported using the Python package. In addition, quantile
|
||||
crossing can happen due to limitation in the algorithm.
|
||||
|
||||
"""
|
||||
import argparse
|
||||
from typing import Dict
|
||||
|
||||
@@ -24,7 +24,7 @@ def main():
|
||||
Xy = xgb.DMatrix(X_train, y_train)
|
||||
evals_result: xgb.callback.EvaluationMonitor.EvalsLog = {}
|
||||
booster = xgb.train(
|
||||
{"tree_method": "gpu_hist", "max_depth": 6},
|
||||
{"tree_method": "hist", "max_depth": 6, "device": "cuda"},
|
||||
Xy,
|
||||
num_boost_round=n_rounds,
|
||||
evals=[(Xy, "Train")],
|
||||
@@ -87,7 +87,7 @@ def main():
|
||||
np.testing.assert_allclose(
|
||||
np.array(prune_result["Original"]["rmse"]),
|
||||
np.array(prune_result["Train"]["rmse"]),
|
||||
atol=1e-5
|
||||
atol=1e-5,
|
||||
)
|
||||
|
||||
|
||||
|
||||
1
demo/nvflare/.gitignore
vendored
Normal file
1
demo/nvflare/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
!config
|
||||
23
demo/nvflare/config/config_fed_client.json
Normal file
23
demo/nvflare/config/config_fed_client.json
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"format_version": 2,
|
||||
"executors": [
|
||||
{
|
||||
"tasks": [
|
||||
"train"
|
||||
],
|
||||
"executor": {
|
||||
"path": "trainer.XGBoostTrainer",
|
||||
"args": {
|
||||
"server_address": "localhost:9091",
|
||||
"world_size": 2,
|
||||
"server_cert_path": "server-cert.pem",
|
||||
"client_key_path": "client-key.pem",
|
||||
"client_cert_path": "client-cert.pem",
|
||||
"use_gpus": false
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"task_result_filters": [],
|
||||
"task_data_filters": []
|
||||
}
|
||||
22
demo/nvflare/config/config_fed_server.json
Normal file
22
demo/nvflare/config/config_fed_server.json
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"format_version": 2,
|
||||
"server": {
|
||||
"heart_beat_timeout": 600
|
||||
},
|
||||
"task_data_filters": [],
|
||||
"task_result_filters": [],
|
||||
"workflows": [
|
||||
{
|
||||
"id": "server_workflow",
|
||||
"path": "controller.XGBoostController",
|
||||
"args": {
|
||||
"port": 9091,
|
||||
"world_size": 2,
|
||||
"server_key_path": "server-key.pem",
|
||||
"server_cert_path": "server-cert.pem",
|
||||
"client_cert_path": "client-cert.pem"
|
||||
}
|
||||
}
|
||||
],
|
||||
"components": []
|
||||
}
|
||||
@@ -6,7 +6,7 @@ This directory contains a demo of Horizontal Federated Learning using
|
||||
## Training with CPU only
|
||||
|
||||
To run the demo, first build XGBoost with the federated learning plugin enabled (see the
|
||||
[README](../../plugin/federated/README.md)).
|
||||
[README](../../../plugin/federated/README.md)).
|
||||
|
||||
Install NVFlare (note that currently NVFlare only supports Python 3.8):
|
||||
```shell
|
||||
|
||||
@@ -70,8 +70,7 @@ class XGBoostTrainer(Executor):
|
||||
param = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'}
|
||||
if self._use_gpus:
|
||||
self.log_info(fl_ctx, f'Training with GPU {rank}')
|
||||
param['tree_method'] = 'gpu_hist'
|
||||
param['gpu_id'] = rank
|
||||
param['device'] = f"cuda:{rank}"
|
||||
|
||||
# Specify validations set to watch performance
|
||||
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
|
||||
|
||||
@@ -16,7 +16,7 @@ split -n l/${world_size} --numeric-suffixes=1 -a 1 ../../data/agaricus.txt.test
|
||||
|
||||
nvflare poc -n 2 --prepare
|
||||
mkdir -p /tmp/nvflare/poc/admin/transfer/horizontal-xgboost
|
||||
cp -fr config custom /tmp/nvflare/poc/admin/transfer/horizontal-xgboost
|
||||
cp -fr ../config custom /tmp/nvflare/poc/admin/transfer/horizontal-xgboost
|
||||
cp server-*.pem client-cert.pem /tmp/nvflare/poc/server/
|
||||
for (( site=1; site<=world_size; site++ )); do
|
||||
cp server-cert.pem client-*.pem /tmp/nvflare/poc/site-"$site"/
|
||||
|
||||
@@ -6,7 +6,7 @@ This directory contains a demo of Vertical Federated Learning using
|
||||
## Training with CPU only
|
||||
|
||||
To run the demo, first build XGBoost with the federated learning plugin enabled (see the
|
||||
[README](../../plugin/federated/README.md)).
|
||||
[README](../../../plugin/federated/README.md)).
|
||||
|
||||
Install NVFlare (note that currently NVFlare only supports Python 3.8):
|
||||
```shell
|
||||
|
||||
@@ -16,7 +16,7 @@ class SupportedTasks(object):
|
||||
|
||||
class XGBoostTrainer(Executor):
|
||||
def __init__(self, server_address: str, world_size: int, server_cert_path: str,
|
||||
client_key_path: str, client_cert_path: str):
|
||||
client_key_path: str, client_cert_path: str, use_gpus: bool):
|
||||
"""Trainer for federated XGBoost.
|
||||
|
||||
Args:
|
||||
@@ -32,6 +32,7 @@ class XGBoostTrainer(Executor):
|
||||
self._server_cert_path = server_cert_path
|
||||
self._client_key_path = client_key_path
|
||||
self._client_cert_path = client_cert_path
|
||||
self._use_gpus = use_gpus
|
||||
|
||||
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext,
|
||||
abort_signal: Signal) -> Shareable:
|
||||
@@ -81,6 +82,8 @@ class XGBoostTrainer(Executor):
|
||||
'objective': 'binary:logistic',
|
||||
'eval_metric': 'auc',
|
||||
}
|
||||
if self._use_gpus:
|
||||
self.log_info(fl_ctx, 'GPUs are not currently supported by vertical federated XGBoost')
|
||||
|
||||
# specify validations set to watch performance
|
||||
watchlist = [(dtest, "eval"), (dtrain, "train")]
|
||||
|
||||
@@ -56,7 +56,7 @@ fi
|
||||
|
||||
nvflare poc -n 2 --prepare
|
||||
mkdir -p /tmp/nvflare/poc/admin/transfer/vertical-xgboost
|
||||
cp -fr config custom /tmp/nvflare/poc/admin/transfer/vertical-xgboost
|
||||
cp -fr ../config custom /tmp/nvflare/poc/admin/transfer/vertical-xgboost
|
||||
cp server-*.pem client-cert.pem /tmp/nvflare/poc/server/
|
||||
for (( site=1; site<=world_size; site++ )); do
|
||||
cp server-cert.pem client-*.pem /tmp/nvflare/poc/site-"${site}"/
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
Using XGBoost with RAPIDS Memory Manager (RMM) plugin (EXPERIMENTAL)
|
||||
====================================================================
|
||||
[RAPIDS Memory Manager (RMM)](https://github.com/rapidsai/rmm) library provides a collection of
|
||||
efficient memory allocators for NVIDIA GPUs. It is now possible to use XGBoost with memory
|
||||
allocators provided by RMM, by enabling the RMM integration plugin.
|
||||
|
||||
The demos in this directory highlights one RMM allocator in particular: **the pool sub-allocator**.
|
||||
This allocator addresses the slow speed of `cudaMalloc()` by allocating a large chunk of memory
|
||||
upfront. Subsequent allocations will draw from the pool of already allocated memory and thus avoid
|
||||
the overhead of calling `cudaMalloc()` directly. See
|
||||
[this GTC talk slides](https://on-demand.gputechconf.com/gtc/2015/presentation/S5530-Stephen-Jones.pdf)
|
||||
for more details.
|
||||
|
||||
Before running the demos, ensure that XGBoost is compiled with the RMM plugin enabled. To do this,
|
||||
run CMake with option `-DPLUGIN_RMM=ON` (`-DUSE_CUDA=ON` also required):
|
||||
```
|
||||
cmake .. -DUSE_CUDA=ON -DUSE_NCCL=ON -DPLUGIN_RMM=ON
|
||||
make -j4
|
||||
```
|
||||
CMake will attempt to locate the RMM library in your build environment. You may choose to build
|
||||
RMM from the source, or install it using the Conda package manager. If CMake cannot find RMM, you
|
||||
should specify the location of RMM with the CMake prefix:
|
||||
```
|
||||
# If using Conda:
|
||||
cmake .. -DUSE_CUDA=ON -DUSE_NCCL=ON -DPLUGIN_RMM=ON -DCMAKE_PREFIX_PATH=$CONDA_PREFIX
|
||||
# If using RMM installed with a custom location
|
||||
cmake .. -DUSE_CUDA=ON -DUSE_NCCL=ON -DPLUGIN_RMM=ON -DCMAKE_PREFIX_PATH=/path/to/rmm
|
||||
```
|
||||
|
||||
# Informing XGBoost about RMM pool
|
||||
|
||||
When XGBoost is compiled with RMM, most of the large size allocation will go through RMM
|
||||
allocators, but some small allocations in performance critical areas are using a different
|
||||
caching allocator so that we can have better control over memory allocation behavior.
|
||||
Users can override this behavior and force the use of rmm for all allocations by setting
|
||||
the global configuration ``use_rmm``:
|
||||
|
||||
``` python
|
||||
with xgb.config_context(use_rmm=True):
|
||||
clf = xgb.XGBClassifier(tree_method="gpu_hist")
|
||||
```
|
||||
|
||||
Depending on the choice of memory pool size or type of allocator, this may have negative
|
||||
performance impact.
|
||||
|
||||
* [Using RMM with a single GPU](./rmm_singlegpu.py)
|
||||
* [Using RMM with a local Dask cluster consisting of multiple GPUs](./rmm_mgpu_with_dask.py)
|
||||
51
demo/rmm_plugin/README.rst
Normal file
51
demo/rmm_plugin/README.rst
Normal file
@@ -0,0 +1,51 @@
|
||||
Using XGBoost with RAPIDS Memory Manager (RMM) plugin (EXPERIMENTAL)
|
||||
====================================================================
|
||||
|
||||
`RAPIDS Memory Manager (RMM) <https://github.com/rapidsai/rmm>`__ library provides a
|
||||
collection of efficient memory allocators for NVIDIA GPUs. It is now possible to use
|
||||
XGBoost with memory allocators provided by RMM, by enabling the RMM integration plugin.
|
||||
|
||||
The demos in this directory highlights one RMM allocator in particular: **the pool
|
||||
sub-allocator**. This allocator addresses the slow speed of ``cudaMalloc()`` by
|
||||
allocating a large chunk of memory upfront. Subsequent allocations will draw from the pool
|
||||
of already allocated memory and thus avoid the overhead of calling ``cudaMalloc()``
|
||||
directly. See `this GTC talk slides
|
||||
<https://on-demand.gputechconf.com/gtc/2015/presentation/S5530-Stephen-Jones.pdf>`_ for
|
||||
more details.
|
||||
|
||||
Before running the demos, ensure that XGBoost is compiled with the RMM plugin enabled. To do this,
|
||||
run CMake with option ``-DPLUGIN_RMM=ON`` (``-DUSE_CUDA=ON`` also required):
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
cmake .. -DUSE_CUDA=ON -DUSE_NCCL=ON -DPLUGIN_RMM=ON
|
||||
make -j$(nproc)
|
||||
|
||||
CMake will attempt to locate the RMM library in your build environment. You may choose to build
|
||||
RMM from the source, or install it using the Conda package manager. If CMake cannot find RMM, you
|
||||
should specify the location of RMM with the CMake prefix:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
# If using Conda:
|
||||
cmake .. -DUSE_CUDA=ON -DUSE_NCCL=ON -DPLUGIN_RMM=ON -DCMAKE_PREFIX_PATH=$CONDA_PREFIX
|
||||
# If using RMM installed with a custom location
|
||||
cmake .. -DUSE_CUDA=ON -DUSE_NCCL=ON -DPLUGIN_RMM=ON -DCMAKE_PREFIX_PATH=/path/to/rmm
|
||||
|
||||
********************************
|
||||
Informing XGBoost about RMM pool
|
||||
********************************
|
||||
|
||||
When XGBoost is compiled with RMM, most of the large size allocation will go through RMM
|
||||
allocators, but some small allocations in performance critical areas are using a different
|
||||
caching allocator so that we can have better control over memory allocation behavior.
|
||||
Users can override this behavior and force the use of rmm for all allocations by setting
|
||||
the global configuration ``use_rmm``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
with xgb.config_context(use_rmm=True):
|
||||
clf = xgb.XGBClassifier(tree_method="hist", device="cuda")
|
||||
|
||||
Depending on the choice of memory pool size or type of allocator, this may have negative
|
||||
performance impact.
|
||||
@@ -1,3 +1,7 @@
|
||||
"""
|
||||
Using rmm with Dask
|
||||
===================
|
||||
"""
|
||||
import dask
|
||||
from dask.distributed import Client
|
||||
from dask_cuda import LocalCUDACluster
|
||||
@@ -11,25 +15,33 @@ def main(client):
|
||||
# xgb.set_config(use_rmm=True)
|
||||
|
||||
X, y = make_classification(n_samples=10000, n_informative=5, n_classes=3)
|
||||
# In pratice one should prefer loading the data with dask collections instead of using
|
||||
# `from_array`.
|
||||
# In pratice one should prefer loading the data with dask collections instead of
|
||||
# using `from_array`.
|
||||
X = dask.array.from_array(X)
|
||||
y = dask.array.from_array(y)
|
||||
dtrain = xgb.dask.DaskDMatrix(client, X, label=y)
|
||||
|
||||
params = {'max_depth': 8, 'eta': 0.01, 'objective': 'multi:softprob', 'num_class': 3,
|
||||
'tree_method': 'gpu_hist', 'eval_metric': 'merror'}
|
||||
output = xgb.dask.train(client, params, dtrain, num_boost_round=100,
|
||||
evals=[(dtrain, 'train')])
|
||||
bst = output['booster']
|
||||
history = output['history']
|
||||
for i, e in enumerate(history['train']['merror']):
|
||||
print(f'[{i}] train-merror: {e}')
|
||||
params = {
|
||||
"max_depth": 8,
|
||||
"eta": 0.01,
|
||||
"objective": "multi:softprob",
|
||||
"num_class": 3,
|
||||
"tree_method": "hist",
|
||||
"eval_metric": "merror",
|
||||
"device": "cuda",
|
||||
}
|
||||
output = xgb.dask.train(
|
||||
client, params, dtrain, num_boost_round=100, evals=[(dtrain, "train")]
|
||||
)
|
||||
bst = output["booster"]
|
||||
history = output["history"]
|
||||
for i, e in enumerate(history["train"]["merror"]):
|
||||
print(f"[{i}] train-merror: {e}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# To use RMM pool allocator with a GPU Dask cluster, just add rmm_pool_size option to
|
||||
# LocalCUDACluster constructor.
|
||||
with LocalCUDACluster(rmm_pool_size='2GB') as cluster:
|
||||
if __name__ == "__main__":
|
||||
# To use RMM pool allocator with a GPU Dask cluster, just add rmm_pool_size option
|
||||
# to LocalCUDACluster constructor.
|
||||
with LocalCUDACluster(rmm_pool_size="2GB") as cluster:
|
||||
with Client(cluster) as client:
|
||||
main(client)
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
"""
|
||||
Using rmm on a single node device
|
||||
=================================
|
||||
"""
|
||||
import rmm
|
||||
from sklearn.datasets import make_classification
|
||||
|
||||
@@ -16,7 +20,8 @@ params = {
|
||||
"eta": 0.01,
|
||||
"objective": "multi:softprob",
|
||||
"num_class": 3,
|
||||
"tree_method": "gpu_hist",
|
||||
"tree_method": "hist",
|
||||
"device": "cuda",
|
||||
}
|
||||
# XGBoost will automatically use the RMM pool allocator
|
||||
bst = xgb.train(params, dtrain, num_boost_round=100, evals=[(dtrain, "train")])
|
||||
|
||||
2
doc/.gitignore
vendored
2
doc/.gitignore
vendored
@@ -6,3 +6,5 @@ doxygen
|
||||
parser.py
|
||||
*.pyc
|
||||
web-data
|
||||
# generated by doxygen
|
||||
tmp
|
||||
@@ -1,70 +1,76 @@
|
||||
# Understand your dataset with XGBoost
|
||||
|
||||
Understand your dataset with XGBoost
|
||||
====================================
|
||||
## Introduction
|
||||
|
||||
Introduction
|
||||
------------
|
||||
The purpose of this vignette is to show you how to use **XGBoost** to
|
||||
discover and understand your own dataset better.
|
||||
|
||||
The purpose of this Vignette is to show you how to use **XGBoost** to discover and understand your own dataset better.
|
||||
|
||||
This Vignette is not about predicting anything (see [XGBoost presentation](https://github.com/dmlc/xgboost/blob/master/R-package/vignettes/xgboostPresentation.Rmd)). We will explain how to use **XGBoost** to highlight the *link* between the *features* of your data and the *outcome*.
|
||||
This vignette is not about predicting anything (see [XGBoost
|
||||
presentation](https://github.com/dmlc/xgboost/blob/master/R-package/vignettes/xgboostPresentation.Rmd)).
|
||||
We will explain how to use **XGBoost** to highlight the *link* between
|
||||
the *features* of your data and the *outcome*.
|
||||
|
||||
Package loading:
|
||||
|
||||
|
||||
```r
|
||||
require(xgboost)
|
||||
require(Matrix)
|
||||
require(data.table)
|
||||
if (!require('vcd')) install.packages('vcd')
|
||||
```
|
||||
if (!require('vcd')) {
|
||||
install.packages('vcd')
|
||||
}
|
||||
|
||||
> **VCD** package is used for one of its embedded dataset only.
|
||||
|
||||
Preparation of the dataset
|
||||
--------------------------
|
||||
|
||||
### Numeric VS categorical variables
|
||||
## Preparation of the dataset
|
||||
|
||||
### Numeric v.s. categorical variables
|
||||
|
||||
**XGBoost** manages only `numeric` vectors.
|
||||
|
||||
What to do when you have *categorical* data?
|
||||
|
||||
A *categorical* variable has a fixed number of different values. For instance, if a variable called *Colour* can have only one of these three values, *red*, *blue* or *green*, then *Colour* is a *categorical* variable.
|
||||
A *categorical* variable has a fixed number of different values. For
|
||||
instance, if a variable called *Colour* can have only one of these three
|
||||
values, *red*, *blue* or *green*, then *Colour* is a *categorical*
|
||||
variable.
|
||||
|
||||
> In **R**, a *categorical* variable is called `factor`.
|
||||
>
|
||||
> Type `?factor` in the console for more information.
|
||||
|
||||
To answer the question above we will convert *categorical* variables to `numeric` one.
|
||||
To answer the question above we will convert *categorical* variables to
|
||||
`numeric` ones.
|
||||
|
||||
### Conversion from categorical to numeric variables
|
||||
|
||||
#### Looking at the raw data
|
||||
|
||||
In this Vignette we will see how to transform a *dense* `data.frame` (*dense* = few zeroes in the matrix) with *categorical* variables to a very *sparse* matrix (*sparse* = lots of zero in the matrix) of `numeric` features.
|
||||
+In this Vignette we will see how to transform a *dense* `data.frame`
|
||||
(*dense* = the majority of the matrix is non-zero) with *categorical*
|
||||
variables to a very *sparse* matrix (*sparse* = lots of zero entries in
|
||||
the matrix) of `numeric` features.
|
||||
|
||||
The method we are going to see is usually called [one-hot encoding](http://en.wikipedia.org/wiki/One-hot).
|
||||
The method we are going to see is usually called [one-hot
|
||||
encoding](https://en.wikipedia.org/wiki/One-hot).
|
||||
|
||||
The first step is to load `Arthritis` dataset in memory and wrap it with `data.table` package.
|
||||
The first step is to load the `Arthritis` dataset in memory and wrap it
|
||||
with the `data.table` package.
|
||||
|
||||
|
||||
```r
|
||||
data(Arthritis)
|
||||
df <- data.table(Arthritis, keep.rownames = FALSE)
|
||||
```
|
||||
|
||||
> `data.table` is 100% compliant with **R** `data.frame` but its syntax is more consistent and its performance for large dataset is [best in class](http://stackoverflow.com/questions/21435339/data-table-vs-dplyr-can-one-do-something-well-the-other-cant-or-does-poorly) (`dplyr` from **R** and `Pandas` from **Python** [included](https://github.com/Rdatatable/data.table/wiki/Benchmarks-%3A-Grouping)). Some parts of **XGBoost** **R** package use `data.table`.
|
||||
> `data.table` is 100% compliant with **R** `data.frame` but its syntax
|
||||
> is more consistent and its performance for large dataset is [best in
|
||||
> class](https://stackoverflow.com/questions/21435339/data-table-vs-dplyr-can-one-do-something-well-the-other-cant-or-does-poorly)
|
||||
> (`dplyr` from **R** and `Pandas` from **Python**
|
||||
> [included](https://github.com/Rdatatable/data.table/wiki/Benchmarks-%3A-Grouping)).
|
||||
> Some parts of **XGBoost’s** **R** package use `data.table`.
|
||||
|
||||
The first thing we want to do is to have a look to the first lines of the `data.table`:
|
||||
The first thing we want to do is to have a look to the first few lines
|
||||
of the `data.table`:
|
||||
|
||||
|
||||
```r
|
||||
head(df)
|
||||
```
|
||||
|
||||
```
|
||||
## ID Treatment Sex Age Improved
|
||||
## 1: 57 Treated Male 27 Some
|
||||
## 2: 46 Treated Male 29 None
|
||||
@@ -72,16 +78,11 @@ head(df)
|
||||
## 4: 17 Treated Male 32 Marked
|
||||
## 5: 36 Treated Male 46 Marked
|
||||
## 6: 23 Treated Male 58 Marked
|
||||
```
|
||||
|
||||
Now we will check the format of each column.
|
||||
|
||||
|
||||
```r
|
||||
str(df)
|
||||
```
|
||||
|
||||
```
|
||||
## Classes 'data.table' and 'data.frame': 84 obs. of 5 variables:
|
||||
## $ ID : int 57 46 77 17 36 23 75 39 33 55 ...
|
||||
## $ Treatment: Factor w/ 2 levels "Placebo","Treated": 2 2 2 2 2 2 2 2 2 2 ...
|
||||
@@ -89,14 +90,14 @@ str(df)
|
||||
## $ Age : int 27 29 30 32 46 58 59 59 63 63 ...
|
||||
## $ Improved : Ord.factor w/ 3 levels "None"<"Some"<..: 2 1 1 3 3 3 1 3 1 1 ...
|
||||
## - attr(*, ".internal.selfref")=<externalptr>
|
||||
```
|
||||
|
||||
2 columns have `factor` type, one has `ordinal` type.
|
||||
|
||||
> `ordinal` variable :
|
||||
>
|
||||
> * can take a limited number of values (like `factor`) ;
|
||||
> * these values are ordered (unlike `factor`). Here these ordered values are: `Marked > Some > None`
|
||||
> - can take a limited number of values (like `factor`) ;
|
||||
> - these values are ordered (unlike `factor`). Here these ordered
|
||||
> values are: `Marked > Some > None`
|
||||
|
||||
#### Creation of new features based on old ones
|
||||
|
||||
@@ -104,18 +105,16 @@ We will add some new *categorical* features to see if it helps.
|
||||
|
||||
##### Grouping per 10 years
|
||||
|
||||
For the first feature we create groups of age by rounding the real age.
|
||||
For the first features we create groups of age by rounding the real age.
|
||||
|
||||
Note that we transform it to `factor` so the algorithm treat these age groups as independent values.
|
||||
Note that we transform it to `factor` so the algorithm treats these age
|
||||
groups as independent values.
|
||||
|
||||
Therefore, 20 is not closer to 30 than 60. To make it short, the distance between ages is lost in this transformation.
|
||||
Therefore, 20 is not closer to 30 than 60. In other words, the distance
|
||||
between ages is lost in this transformation.
|
||||
|
||||
|
||||
```r
|
||||
head(df[, AgeDiscret := as.factor(round(Age / 10, 0))])
|
||||
```
|
||||
|
||||
```
|
||||
## ID Treatment Sex Age Improved AgeDiscret
|
||||
## 1: 57 Treated Male 27 Some 3
|
||||
## 2: 46 Treated Male 29 None 3
|
||||
@@ -123,18 +122,17 @@ head(df[,AgeDiscret := as.factor(round(Age/10,0))])
|
||||
## 4: 17 Treated Male 32 Marked 3
|
||||
## 5: 36 Treated Male 46 Marked 5
|
||||
## 6: 23 Treated Male 58 Marked 6
|
||||
```
|
||||
|
||||
##### Random split in two groups
|
||||
##### Randomly split into two groups
|
||||
|
||||
Following is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value **based on nothing**. We will see later if simplifying the information based on arbitrary values is a good strategy (you may already have an idea of how well it will work...).
|
||||
The following is an even stronger simplification of the real age with an
|
||||
arbitrary split at 30 years old. I choose this value **based on
|
||||
nothing**. We will see later if simplifying the information based on
|
||||
arbitrary values is a good strategy (you may already have an idea of how
|
||||
well it will work…).
|
||||
|
||||
|
||||
```r
|
||||
head(df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))])
|
||||
```
|
||||
|
||||
```
|
||||
## ID Treatment Sex Age Improved AgeDiscret AgeCat
|
||||
## 1: 57 Treated Male 27 Some 3 Young
|
||||
## 2: 46 Treated Male 29 None 3 Young
|
||||
@@ -142,330 +140,336 @@ head(df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))])
|
||||
## 4: 17 Treated Male 32 Marked 3 Old
|
||||
## 5: 36 Treated Male 46 Marked 5 Old
|
||||
## 6: 23 Treated Male 58 Marked 6 Old
|
||||
```
|
||||
|
||||
##### Risks in adding correlated features
|
||||
|
||||
These new features are highly correlated to the `Age` feature because they are simple transformations of this feature.
|
||||
These new features are highly correlated to the `Age` feature because
|
||||
they are simple transformations of this feature.
|
||||
|
||||
For many machine learning algorithms, using correlated features is not a good idea. It may sometimes make prediction less accurate, and most of the time make interpretation of the model almost impossible. GLM, for instance, assumes that the features are uncorrelated.
|
||||
For many machine learning algorithms, using correlated features is not a
|
||||
good idea. It may sometimes make prediction less accurate, and most of
|
||||
the time make interpretation of the model almost impossible. GLM, for
|
||||
instance, assumes that the features are uncorrelated.
|
||||
|
||||
Fortunately, decision tree algorithms (including boosted trees) are very robust to these features. Therefore we have nothing to do to manage this situation.
|
||||
Fortunately, decision tree algorithms (including boosted trees) are very
|
||||
robust to these features. Therefore we don’t have to do anything to
|
||||
manage this situation.
|
||||
|
||||
##### Cleaning data
|
||||
|
||||
We remove ID as there is nothing to learn from this feature (it would just add some noise).
|
||||
We remove ID as there is nothing to learn from this feature (it would
|
||||
just add some noise).
|
||||
|
||||
|
||||
```r
|
||||
df[, ID := NULL]
|
||||
```
|
||||
|
||||
We will list the different values for the column `Treatment`:
|
||||
|
||||
|
||||
```r
|
||||
levels(df[, Treatment])
|
||||
```
|
||||
|
||||
```
|
||||
## [1] "Placebo" "Treated"
|
||||
```
|
||||
|
||||
|
||||
#### One-hot encoding
|
||||
#### Encoding categorical features
|
||||
|
||||
Next step, we will transform the categorical data to dummy variables.
|
||||
This is the [one-hot encoding](http://en.wikipedia.org/wiki/One-hot) step.
|
||||
Several encoding methods exist, e.g., [one-hot
|
||||
encoding](https://en.wikipedia.org/wiki/One-hot) is a common approach.
|
||||
We will use the [dummy contrast
|
||||
coding](https://stats.oarc.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/)
|
||||
which is popular because it produces “full rank” encoding (also see
|
||||
[this blog post by Max
|
||||
Kuhn](http://appliedpredictivemodeling.com/blog/2013/10/23/the-basics-of-encoding-categorical-data-for-predictive-models)).
|
||||
|
||||
The purpose is to transform each value of each *categorical* feature in a *binary* feature `{0, 1}`.
|
||||
The purpose is to transform each value of each *categorical* feature
|
||||
into a *binary* feature `{0, 1}`.
|
||||
|
||||
For example, the column `Treatment` will be replaced by two columns, `Placebo`, and `Treated`. Each of them will be *binary*. Therefore, an observation which has the value `Placebo` in column `Treatment` before the transformation will have after the transformation the value `1` in the new column `Placebo` and the value `0` in the new column `Treated`. The column `Treatment` will disappear during the one-hot encoding.
|
||||
For example, the column `Treatment` will be replaced by two columns,
|
||||
`TreatmentPlacebo`, and `TreatmentTreated`. Each of them will be
|
||||
*binary*. Therefore, an observation which has the value `Placebo` in
|
||||
column `Treatment` before the transformation will have the value `1` in
|
||||
the new column `TreatmentPlacebo` and the value `0` in the new column
|
||||
`TreatmentTreated` after the transformation. The column
|
||||
`TreatmentPlacebo` will disappear during the contrast encoding, as it
|
||||
would be absorbed into a common constant intercept column.
|
||||
|
||||
Column `Improved` is excluded because it will be our `label` column, the one we want to predict.
|
||||
Column `Improved` is excluded because it will be our `label` column, the
|
||||
one we want to predict.
|
||||
|
||||
|
||||
```r
|
||||
sparse_matrix <- sparse.model.matrix(Improved~.-1, data = df)
|
||||
sparse_matrix <- sparse.model.matrix(Improved ~ ., data = df)[, -1]
|
||||
head(sparse_matrix)
|
||||
```
|
||||
|
||||
```
|
||||
## 6 x 10 sparse Matrix of class "dgCMatrix"
|
||||
##
|
||||
## 1 . 1 1 27 1 . . . . 1
|
||||
## 2 . 1 1 29 1 . . . . 1
|
||||
## 3 . 1 1 30 1 . . . . 1
|
||||
## 4 . 1 1 32 1 . . . . .
|
||||
## 5 . 1 1 46 . . 1 . . .
|
||||
## 6 . 1 1 58 . . . 1 . .
|
||||
```
|
||||
## 6 x 9 sparse Matrix of class "dgCMatrix"
|
||||
## TreatmentTreated SexMale Age AgeDiscret3 AgeDiscret4 AgeDiscret5 AgeDiscret6
|
||||
## 1 1 1 27 1 . . .
|
||||
## 2 1 1 29 1 . . .
|
||||
## 3 1 1 30 1 . . .
|
||||
## 4 1 1 32 1 . . .
|
||||
## 5 1 1 46 . . 1 .
|
||||
## 6 1 1 58 . . . 1
|
||||
## AgeDiscret7 AgeCatYoung
|
||||
## 1 . 1
|
||||
## 2 . 1
|
||||
## 3 . 1
|
||||
## 4 . .
|
||||
## 5 . .
|
||||
## 6 . .
|
||||
|
||||
> Formulae `Improved~.-1` used above means transform all *categorical* features but column `Improved` to binary values. The `-1` is here to remove the first column which is full of `1` (this column is generated by the conversion). For more information, you can type `?sparse.model.matrix` in the console.
|
||||
> Formula `Improved ~ .` used above means transform all *categorical*
|
||||
> features but column `Improved` to binary values. The `-1` column
|
||||
> selection removes the intercept column which is full of `1` (this
|
||||
> column is generated by the conversion). For more information, you can
|
||||
> type `?sparse.model.matrix` in the console.
|
||||
|
||||
Create the output `numeric` vector (not as a sparse `Matrix`):
|
||||
|
||||
|
||||
```r
|
||||
output_vector = df[,Improved] == "Marked"
|
||||
```
|
||||
output_vector <- df[, Improved] == "Marked"
|
||||
|
||||
1. set `Y` vector to `0`;
|
||||
2. set `Y` to `1` for rows where `Improved == Marked` is `TRUE` ;
|
||||
3. return `Y` vector.
|
||||
|
||||
Build the model
|
||||
---------------
|
||||
## Build the model
|
||||
|
||||
The code below is very usual. For more information, you can look at the documentation of `xgboost` function (or at the vignette [XGBoost presentation](https://github.com/dmlc/xgboost/blob/master/R-package/vignettes/xgboostPresentation.Rmd)).
|
||||
The code below is very usual. For more information, you can look at the
|
||||
documentation of `xgboost` function (or at the vignette [XGBoost
|
||||
presentation](https://github.com/dmlc/xgboost/blob/master/R-package/vignettes/xgboostPresentation.Rmd)).
|
||||
|
||||
|
||||
```r
|
||||
bst <- xgboost(data = sparse_matrix, label = output_vector, max.depth = 4,
|
||||
bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 4,
|
||||
eta = 1, nthread = 2, nrounds = 10, objective = "binary:logistic")
|
||||
```
|
||||
|
||||
```
|
||||
## [0] train-error:0.202381
|
||||
## [1] train-error:0.166667
|
||||
## [2] train-error:0.166667
|
||||
## [3] train-error:0.166667
|
||||
## [4] train-error:0.154762
|
||||
## [5] train-error:0.154762
|
||||
## [6] train-error:0.154762
|
||||
## [7] train-error:0.166667
|
||||
## [8] train-error:0.166667
|
||||
## [9] train-error:0.166667
|
||||
```
|
||||
## [1] train-logloss:0.485466
|
||||
## [2] train-logloss:0.438534
|
||||
## [3] train-logloss:0.412250
|
||||
## [4] train-logloss:0.395828
|
||||
## [5] train-logloss:0.384264
|
||||
## [6] train-logloss:0.374028
|
||||
## [7] train-logloss:0.365005
|
||||
## [8] train-logloss:0.351233
|
||||
## [9] train-logloss:0.341678
|
||||
## [10] train-logloss:0.334465
|
||||
|
||||
You can see some `train-error: 0.XXXXX` lines followed by a number. It decreases. Each line shows how well the model explains your data. Lower is better.
|
||||
You can see some `train-logloss: 0.XXXXX` lines followed by a number. It
|
||||
decreases. Each line shows how well the model explains the data. Lower
|
||||
is better.
|
||||
|
||||
A model which fits too well may [overfit](http://en.wikipedia.org/wiki/Overfitting) (meaning it copy/paste too much the past, and won't be that good to predict the future).
|
||||
A small value for training error may be a symptom of
|
||||
[overfitting](https://en.wikipedia.org/wiki/Overfitting), meaning the
|
||||
model will not accurately predict unseen values.
|
||||
|
||||
> Here you can see the numbers decrease until line 7 and then increase.
|
||||
>
|
||||
> It probably means we are overfitting. To fix that I should reduce the number of rounds to `nrounds = 4`. I will let things like that because I don't really care for the purpose of this example :-)
|
||||
|
||||
Feature importance
|
||||
------------------
|
||||
## Feature importance
|
||||
|
||||
## Measure feature importance
|
||||
|
||||
|
||||
### Build the feature importance data.table
|
||||
|
||||
In the code below, `sparse_matrix@Dimnames[[2]]` represents the column names of the sparse matrix. These names are the original values of the features (remember, each binary column == one value of one *categorical* feature).
|
||||
Remember, each binary column corresponds to a single value of one of
|
||||
*categorical* features.
|
||||
|
||||
|
||||
```r
|
||||
importance <- xgb.importance(feature_names = sparse_matrix@Dimnames[[2]], model = bst)
|
||||
importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst)
|
||||
head(importance)
|
||||
```
|
||||
|
||||
```
|
||||
## Feature Gain Cover Frequency
|
||||
## 1: Age 0.622031651 0.67251706 0.67241379
|
||||
## 2: TreatmentPlacebo 0.285750607 0.11916656 0.10344828
|
||||
## 3: SexMale 0.048744054 0.04522027 0.08620690
|
||||
## 4: AgeDiscret6 0.016604647 0.04784637 0.05172414
|
||||
## 5: AgeDiscret3 0.016373791 0.08028939 0.05172414
|
||||
## 6: AgeDiscret4 0.009270558 0.02858801 0.01724138
|
||||
```
|
||||
## 1: Age 0.622031769 0.67251696 0.67241379
|
||||
## 2: TreatmentTreated 0.285750540 0.11916651 0.10344828
|
||||
## 3: SexMale 0.048744022 0.04522028 0.08620690
|
||||
## 4: AgeDiscret6 0.016604639 0.04784639 0.05172414
|
||||
## 5: AgeDiscret3 0.016373781 0.08028951 0.05172414
|
||||
## 6: AgeDiscret4 0.009270557 0.02858801 0.01724138
|
||||
|
||||
> The column `Gain` provide the information we are looking for.
|
||||
> The column `Gain` provides the information we are looking for.
|
||||
>
|
||||
> As you can see, features are classified by `Gain`.
|
||||
|
||||
`Gain` is the improvement in accuracy brought by a feature to the branches it is on. The idea is that before adding a new split on a feature X to the branch there was some wrongly classified elements, after adding the split on this feature, there are two new branches, and each of these branch is more accurate (one branch saying if your observation is on this branch then it should be classified as `1`, and the other branch saying the exact opposite).
|
||||
`Gain` is the improvement in accuracy brought by a feature to the
|
||||
branches it is on. The idea is that before adding a new split on a
|
||||
feature X to the branch there were some wrongly classified elements;
|
||||
after adding the split on this feature, there are two new branches, and
|
||||
each of these branches is more accurate (one branch saying if your
|
||||
observation is on this branch then it should be classified as `1`, and
|
||||
the other branch saying the exact opposite).
|
||||
|
||||
`Cover` measures the relative quantity of observations concerned by a feature.
|
||||
`Cover` is related to the second order derivative (or Hessian) of the
|
||||
loss function with respect to a particular variable; thus, a large value
|
||||
indicates a variable has a large potential impact on the loss function
|
||||
and so is important.
|
||||
|
||||
`Frequency` is a simpler way to measure the `Gain`. It just counts the number of times a feature is used in all generated trees. You should not use it (unless you know why you want to use it).
|
||||
|
||||
#### Improvement in the interpretability of feature importance data.table
|
||||
|
||||
We can go deeper in the analysis of the model. In the `data.table` above, we have discovered which features counts to predict if the illness will go or not. But we don't yet know the role of these features. For instance, one of the question we may want to answer would be: does receiving a placebo treatment helps to recover from the illness?
|
||||
|
||||
One simple solution is to count the co-occurrences of a feature and a class of the classification.
|
||||
|
||||
For that purpose we will execute the same function as above but using two more parameters, `data` and `label`.
|
||||
|
||||
|
||||
```r
|
||||
importanceRaw <- xgb.importance(feature_names = sparse_matrix@Dimnames[[2]], model = bst, data = sparse_matrix, label = output_vector)
|
||||
|
||||
# Cleaning for better display
|
||||
importanceClean <- importanceRaw[,`:=`(Cover=NULL, Frequency=NULL)]
|
||||
|
||||
head(importanceClean)
|
||||
```
|
||||
|
||||
```
|
||||
## Feature Split Gain RealCover RealCover %
|
||||
## 1: TreatmentPlacebo -1.00136e-05 0.28575061 7 0.2500000
|
||||
## 2: Age 61.5 0.16374034 12 0.4285714
|
||||
## 3: Age 39 0.08705750 8 0.2857143
|
||||
## 4: Age 57.5 0.06947553 11 0.3928571
|
||||
## 5: SexMale -1.00136e-05 0.04874405 4 0.1428571
|
||||
## 6: Age 53.5 0.04620627 10 0.3571429
|
||||
```
|
||||
|
||||
> In the table above we have removed two not needed columns and select only the first lines.
|
||||
|
||||
First thing you notice is the new column `Split`. It is the split applied to the feature on a branch of one of the tree. Each split is present, therefore a feature can appear several times in this table. Here we can see the feature `Age` is used several times with different splits.
|
||||
|
||||
How the split is applied to count the co-occurrences? It is always `<`. For instance, in the second line, we measure the number of persons under 61.5 years with the illness gone after the treatment.
|
||||
|
||||
The two other new columns are `RealCover` and `RealCover %`. In the first column it measures the number of observations in the dataset where the split is respected and the label marked as `1`. The second column is the percentage of the whole population that `RealCover` represents.
|
||||
|
||||
Therefore, according to our findings, getting a placebo doesn't seem to help but being younger than 61 years may help (seems logic).
|
||||
|
||||
> You may wonder how to interpret the `< 1.00001` on the first line. Basically, in a sparse `Matrix`, there is no `0`, therefore, looking for one hot-encoded categorical observations validating the rule `< 1.00001` is like just looking for `1` for this feature.
|
||||
`Frequency` is a simpler way to measure the `Gain`. It just counts the
|
||||
number of times a feature is used in all generated trees. You should not
|
||||
use it (unless you know why you want to use it).
|
||||
|
||||
### Plotting the feature importance
|
||||
|
||||
All these things are nice, but it would be even better to plot the
|
||||
results.
|
||||
|
||||
All these things are nice, but it would be even better to plot the results.
|
||||
xgb.plot.importance(importance_matrix = importance)
|
||||
|
||||
<img src="discoverYourData_files/figure-markdown_strict/unnamed-chunk-12-1.png" style="display: block; margin: auto;" />
|
||||
|
||||
```r
|
||||
xgb.plot.importance(importance_matrix = importanceRaw)
|
||||
```
|
||||
Running this line of code, you should get a bar chart showing the
|
||||
importance of the 6 features (containing the same data as the output we
|
||||
saw earlier, but displaying it visually for easier consumption). Note
|
||||
that `xgb.ggplot.importance` is also available for all the ggplot2 fans!
|
||||
|
||||
```
|
||||
## Error in xgb.plot.importance(importance_matrix = importanceRaw): Importance matrix is not correct (column names issue)
|
||||
```
|
||||
> Depending of the dataset and the learning parameters you may have more
|
||||
> than two clusters. Default value is to limit them to `10`, but you can
|
||||
> increase this limit. Look at the function documentation for more
|
||||
> information.
|
||||
|
||||
Feature have automatically been divided in 2 clusters: the interesting features... and the others.
|
||||
According to the plot above, the most important features in this dataset
|
||||
to predict if the treatment will work are :
|
||||
|
||||
> Depending of the dataset and the learning parameters you may have more than two clusters. Default value is to limit them to `10`, but you can increase this limit. Look at the function documentation for more information.
|
||||
|
||||
According to the plot above, the most important features in this dataset to predict if the treatment will work are :
|
||||
|
||||
* the Age ;
|
||||
* having received a placebo or not ;
|
||||
* the sex is third but already included in the not interesting features group ;
|
||||
* then we see our generated features (AgeDiscret). We can see that their contribution is very low.
|
||||
- An individual’s age;
|
||||
- Having received a placebo or not;
|
||||
- Gender;
|
||||
- Our generated feature AgeDiscret. We can see that its contribution
|
||||
is very low.
|
||||
|
||||
### Do these results make sense?
|
||||
|
||||
|
||||
Let's check some **Chi2** between each of these features and the label.
|
||||
Let’s check some **Chi2** between each of these features and the label.
|
||||
|
||||
Higher **Chi2** means better correlation.
|
||||
|
||||
|
||||
```r
|
||||
c2 <- chisq.test(df$Age, output_vector)
|
||||
print(c2)
|
||||
```
|
||||
|
||||
```
|
||||
##
|
||||
## Pearson's Chi-squared test
|
||||
##
|
||||
## data: df$Age and output_vector
|
||||
## X-squared = 35.475, df = 35, p-value = 0.4458
|
||||
```
|
||||
|
||||
Pearson correlation between Age and illness disappearing is **35.48**.
|
||||
The Pearson correlation between Age and illness disappearing is
|
||||
**35.47**.
|
||||
|
||||
|
||||
```r
|
||||
c2 <- chisq.test(df$AgeDiscret, output_vector)
|
||||
print(c2)
|
||||
```
|
||||
|
||||
```
|
||||
##
|
||||
## Pearson's Chi-squared test
|
||||
##
|
||||
## data: df$AgeDiscret and output_vector
|
||||
## X-squared = 8.2554, df = 5, p-value = 0.1427
|
||||
```
|
||||
|
||||
Our first simplification of Age gives a Pearson correlation is **8.26**.
|
||||
Our first simplification of Age gives a Pearson correlation of **8.26**.
|
||||
|
||||
|
||||
```r
|
||||
c2 <- chisq.test(df$AgeCat, output_vector)
|
||||
print(c2)
|
||||
```
|
||||
|
||||
```
|
||||
##
|
||||
## Pearson's Chi-squared test with Yates' continuity correction
|
||||
##
|
||||
## data: df$AgeCat and output_vector
|
||||
## X-squared = 2.3571, df = 1, p-value = 0.1247
|
||||
```
|
||||
|
||||
The perfectly random split I did between young and old at 30 years old have a low correlation of **2.36**. It's a result we may expect as may be in my mind > 30 years is being old (I am 32 and starting feeling old, this may explain that), but for the illness we are studying, the age to be vulnerable is not the same.
|
||||
The perfectly random split we did between young and old at 30 years old
|
||||
has a low correlation of **2.36**. This suggests that, for the
|
||||
particular illness we are studying, the age at which someone is
|
||||
vulnerable to this disease is likely very different from 30.
|
||||
|
||||
Morality: don't let your *gut* lower the quality of your model.
|
||||
Moral of the story: don’t let your *gut* lower the quality of your
|
||||
model.
|
||||
|
||||
In *data science* expression, there is the word *science* :-)
|
||||
In *data science*, there is the word *science* :-)
|
||||
|
||||
Conclusion
|
||||
----------
|
||||
## Conclusion
|
||||
|
||||
As you can see, in general *destroying information by simplifying it won't improve your model*. **Chi2** just demonstrates that.
|
||||
As you can see, in general *destroying information by simplifying it
|
||||
won’t improve your model*. **Chi2** just demonstrates that.
|
||||
|
||||
But in more complex cases, creating a new feature based on existing one which makes link with the outcome more obvious may help the algorithm and improve the model.
|
||||
But in more complex cases, creating a new feature from an existing one
|
||||
may help the algorithm and improve the model.
|
||||
|
||||
The case studied here is not enough complex to show that. Check [Kaggle website](http://www.kaggle.com/) for some challenging datasets. However it's almost always worse when you add some arbitrary rules.
|
||||
+The case studied here is not complex enough to show that. Check [Kaggle
|
||||
website](https://www.kaggle.com/) for some challenging datasets.
|
||||
|
||||
Moreover, you can notice that even if we have added some not useful new features highly correlated with other features, the boosting tree algorithm have been able to choose the best one, which in this case is the Age.
|
||||
Moreover, you can see that even if we have added some new features which
|
||||
are not very useful/highly correlated with other features, the boosting
|
||||
tree algorithm was still able to choose the best one (which in this case
|
||||
is the Age).
|
||||
|
||||
Linear models may not be that smart in this scenario.
|
||||
Linear models may not perform as well.
|
||||
|
||||
Special Note: What about Random Forests™?
|
||||
-----------------------------------------
|
||||
## Special Note: What about Random Forests™?
|
||||
|
||||
As you may know, [Random Forests](http://en.wikipedia.org/wiki/Random_forest) algorithm is cousin with boosting and both are part of the [ensemble learning](http://en.wikipedia.org/wiki/Ensemble_learning) family.
|
||||
As you may know, the [Random
|
||||
Forests](https://en.wikipedia.org/wiki/Random_forest) algorithm is
|
||||
cousin with boosting and both are part of the [ensemble
|
||||
learning](https://en.wikipedia.org/wiki/Ensemble_learning) family.
|
||||
|
||||
Both train several decision trees for one dataset. The *main* difference is that in Random Forests, trees are independent and in boosting, the tree `N+1` focus its learning on the loss (<=> what has not been well modeled by the tree `N`).
|
||||
Both train several decision trees for one dataset. The *main* difference
|
||||
is that in Random Forests, trees are independent and in boosting, the
|
||||
`N+1`-st tree focuses its learning on the loss (<=> what has not
|
||||
been well modeled by the tree `N`).
|
||||
|
||||
This difference have an impact on a corner case in feature importance analysis: the *correlated features*.
|
||||
This difference can have an impact on a edge case in feature importance
|
||||
analysis: *correlated features*.
|
||||
|
||||
Imagine two features perfectly correlated, feature `A` and feature `B`. For one specific tree, if the algorithm needs one of them, it will choose randomly (true in both boosting and Random Forests).
|
||||
Imagine two features perfectly correlated, feature `A` and feature `B`.
|
||||
For one specific tree, if the algorithm needs one of them, it will
|
||||
choose randomly (true in both boosting and Random Forests).
|
||||
|
||||
However, in Random Forests this random choice will be done for each tree, because each tree is independent from the others. Therefore, approximatively, depending of your parameters, 50% of the trees will choose feature `A` and the other 50% will choose feature `B`. So the *importance* of the information contained in `A` and `B` (which is the same, because they are perfectly correlated) is diluted in `A` and `B`. So you won't easily know this information is important to predict what you want to predict! It is even worse when you have 10 correlated features...
|
||||
However, in Random Forests this random choice will be done for each
|
||||
tree, because each tree is independent from the others. Therefore,
|
||||
approximately (and depending on your parameters) 50% of the trees will
|
||||
choose feature `A` and the other 50% will choose feature `B`. So the
|
||||
*importance* of the information contained in `A` and `B` (which is the
|
||||
same, because they are perfectly correlated) is diluted in `A` and `B`.
|
||||
So you won’t easily know this information is important to predict what
|
||||
you want to predict! It is even worse when you have 10 correlated
|
||||
features…
|
||||
|
||||
In boosting, when a specific link between feature and outcome have been learned by the algorithm, it will try to not refocus on it (in theory it is what happens, reality is not always that simple). Therefore, all the importance will be on feature `A` or on feature `B` (but not both). You will know that one feature have an important role in the link between the observations and the label. It is still up to you to search for the correlated features to the one detected as important if you need to know all of them.
|
||||
In boosting, when a specific link between feature and outcome have been
|
||||
learned by the algorithm, it will try to not refocus on it (in theory it
|
||||
is what happens, reality is not always that simple). Therefore, all the
|
||||
importance will be on feature `A` or on feature `B` (but not both). You
|
||||
will know that one feature has an important role in the link between the
|
||||
observations and the label. It is still up to you to search for the
|
||||
correlated features to the one detected as important if you need to know
|
||||
all of them.
|
||||
|
||||
If you want to try Random Forests algorithm, you can tweak XGBoost parameters!
|
||||
If you want to try Random Forests algorithm, you can tweak XGBoost
|
||||
parameters!
|
||||
|
||||
**Warning**: this is still an experimental parameter.
|
||||
For instance, to compute a model with 1000 trees, with a 0.5 factor on
|
||||
sampling rows and columns:
|
||||
|
||||
For instance, to compute a model with 1000 trees, with a 0.5 factor on sampling rows and columns:
|
||||
|
||||
|
||||
```r
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
|
||||
#Random Forest - 1000 trees
|
||||
bst <- xgboost(data = train$data, label = train$label, max.depth = 4, num_parallel_tree = 1000, subsample = 0.5, colsample_bytree =0.5, nrounds = 1, objective = "binary:logistic")
|
||||
```
|
||||
bst <- xgboost(
|
||||
data = train$data
|
||||
, label = train$label
|
||||
, max_depth = 4
|
||||
, num_parallel_tree = 1000
|
||||
, subsample = 0.5
|
||||
, colsample_bytree = 0.5
|
||||
, nrounds = 1
|
||||
, objective = "binary:logistic"
|
||||
)
|
||||
|
||||
```
|
||||
## [0] train-error:0.002150
|
||||
```
|
||||
## [1] train-logloss:0.456201
|
||||
|
||||
```r
|
||||
#Boosting - 3 rounds
|
||||
bst <- xgboost(data = train$data, label = train$label, max.depth = 4, nrounds = 3, objective = "binary:logistic")
|
||||
```
|
||||
bst <- xgboost(
|
||||
data = train$data
|
||||
, label = train$label
|
||||
, max_depth = 4
|
||||
, nrounds = 3
|
||||
, objective = "binary:logistic"
|
||||
)
|
||||
|
||||
```
|
||||
## [0] train-error:0.006142
|
||||
## [1] train-error:0.006756
|
||||
## [2] train-error:0.001228
|
||||
```
|
||||
## [1] train-logloss:0.444882
|
||||
## [2] train-logloss:0.302428
|
||||
## [3] train-logloss:0.212847
|
||||
|
||||
> Note that the parameter `round` is set to `1`.
|
||||
|
||||
> [**Random Forests**](https://www.stat.berkeley.edu/~breiman/RandomForests/cc_papers.htm) is a trademark of Leo Breiman and Adele Cutler and is licensed exclusively to Salford Systems for the commercial release of the software.
|
||||
> [**Random
|
||||
> Forests**](https://www.stat.berkeley.edu/~breiman/RandomForests/cc_papers.htm)
|
||||
> is a trademark of Leo Breiman and Adele Cutler and is licensed
|
||||
> exclusively to Salford Systems for the commercial release of the
|
||||
> software.
|
||||
|
||||
@@ -119,7 +119,7 @@ An up-to-date version of the CUDA toolkit is required.
|
||||
|
||||
.. note:: Checking your compiler version
|
||||
|
||||
CUDA is really picky about supported compilers, a table for the compatible compilers for the latests CUDA version on Linux can be seen `here <https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html>`_.
|
||||
CUDA is really picky about supported compilers, a table for the compatible compilers for the latest CUDA version on Linux can be seen `here <https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html>`_.
|
||||
|
||||
Some distros package a compatible ``gcc`` version with CUDA. If you run into compiler errors with ``nvcc``, try specifying the correct compiler with ``-DCMAKE_CXX_COMPILER=/path/to/correct/g++ -DCMAKE_C_COMPILER=/path/to/correct/gcc``. On Arch Linux, for example, both binaries can be found under ``/opt/cuda/bin/``.
|
||||
|
||||
@@ -259,7 +259,7 @@ There are several ways to build and install the package from source:
|
||||
|
||||
import sys
|
||||
import pathlib
|
||||
libpath = pathlib.Path(sys.prefix).joinpath("lib", "libxgboost.so")
|
||||
libpath = pathlib.Path(sys.base_prefix).joinpath("lib", "libxgboost.so")
|
||||
assert libpath.exists()
|
||||
|
||||
Then pass ``use_system_libxgboost=True`` option to ``pip install``:
|
||||
|
||||
@@ -33,6 +33,8 @@ DMatrix
|
||||
.. doxygengroup:: DMatrix
|
||||
:project: xgboost
|
||||
|
||||
.. _c_streaming:
|
||||
|
||||
Streaming
|
||||
---------
|
||||
|
||||
|
||||
11
doc/conf.py
11
doc/conf.py
@@ -19,7 +19,6 @@ import sys
|
||||
import tarfile
|
||||
import urllib.request
|
||||
import warnings
|
||||
from subprocess import call
|
||||
from urllib.error import HTTPError
|
||||
|
||||
from sh.contrib import git
|
||||
@@ -148,12 +147,20 @@ extensions = [
|
||||
|
||||
sphinx_gallery_conf = {
|
||||
# path to your example scripts
|
||||
"examples_dirs": ["../demo/guide-python", "../demo/dask", "../demo/aft_survival"],
|
||||
"examples_dirs": [
|
||||
"../demo/guide-python",
|
||||
"../demo/dask",
|
||||
"../demo/aft_survival",
|
||||
"../demo/gpu_acceleration",
|
||||
"../demo/rmm_plugin"
|
||||
],
|
||||
# path to where to save gallery generated output
|
||||
"gallery_dirs": [
|
||||
"python/examples",
|
||||
"python/dask-examples",
|
||||
"python/survival-examples",
|
||||
"python/gpu-examples",
|
||||
"python/rmm-examples",
|
||||
],
|
||||
"matplotlib_animations": True,
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ GitHub Actions is also used to build Python wheels targeting MacOS Intel and App
|
||||
``python_wheels`` pipeline sets up environment variables prefixed ``CIBW_*`` to indicate the target
|
||||
OS and processor. The pipeline then invokes the script ``build_python_wheels.sh``, which in turns
|
||||
calls ``cibuildwheel`` to build the wheel. The ``cibuildwheel`` is a library that sets up a
|
||||
suitable Python environment for each OS and processor target. Since we don't have Apple Silion
|
||||
suitable Python environment for each OS and processor target. Since we don't have Apple Silicon
|
||||
machine in GitHub Actions, cross-compilation is needed; ``cibuildwheel`` takes care of the complex
|
||||
task of cross-compiling a Python wheel. (Note that ``cibuildwheel`` will call
|
||||
``pip wheel``. Since XGBoost has a native library component, we created a customized build
|
||||
@@ -131,7 +131,7 @@ set up a credential pair in order to provision resources on AWS. See
|
||||
Worker Image Pipeline
|
||||
=====================
|
||||
Building images for worker machines used to be a chore: you'd provision an EC2 machine, SSH into it, and
|
||||
manually install the necessary packages. This process is not only laborous but also error-prone. You may
|
||||
manually install the necessary packages. This process is not only laborious but also error-prone. You may
|
||||
forget to install a package or change a system configuration.
|
||||
|
||||
No more. Now we have an automated pipeline for building images for worker machines.
|
||||
|
||||
@@ -16,8 +16,10 @@ C++ Coding Guideline
|
||||
* Each line of text may contain up to 100 characters.
|
||||
* The use of C++ exceptions is allowed.
|
||||
|
||||
- Use C++11 features such as smart pointers, braced initializers, lambda functions, and ``std::thread``.
|
||||
- Use C++17 features such as smart pointers, braced initializers, lambda functions, and ``std::thread``.
|
||||
- Use Doxygen to document all the interface code.
|
||||
- We have some comments around symbols imported by headers, some of those are hinted by `include-what-you-use <https://include-what-you-use.org>`_. It's not required.
|
||||
- We use clang-tidy and clang-format. You can check their configuration in the root directory of the XGBoost source tree.
|
||||
- We have a series of automatic checks to ensure that all of our codebase complies with the Google style. Before submitting your pull request, you are encouraged to run the style checks on your machine. See :ref:`running_checks_locally`.
|
||||
|
||||
***********************
|
||||
@@ -98,7 +100,7 @@ two automatic checks to enforce coding style conventions. To expedite the code r
|
||||
|
||||
Linter
|
||||
======
|
||||
We use `pylint <https://github.com/PyCQA/pylint>`_ and `cpplint <https://github.com/cpplint/cpplint>`_ to enforce style convention and find potential errors. Linting is especially useful for Python, as we can catch many errors that would have otherwise occured at run-time.
|
||||
We use `pylint <https://github.com/PyCQA/pylint>`_ and `cpplint <https://github.com/cpplint/cpplint>`_ to enforce style convention and find potential errors. Linting is especially useful for Python, as we can catch many errors that would have otherwise occurred at run-time.
|
||||
|
||||
To run this check locally, run the following command from the top level source tree:
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ General Development Process
|
||||
---------------------------
|
||||
Everyone in the community is welcomed to send patches, documents, and propose new directions to the project. The key guideline here is to enable everyone in the community to get involved and participate the decision and development. When major changes are proposed, an RFC should be sent to allow discussion by the community. We encourage public discussion, archivable channels such as issues and discuss forum, so that everyone in the community can participate and review the process later.
|
||||
|
||||
Code reviews are one of the key ways to ensure the quality of the code. High-quality code reviews prevent technical debt for long-term and are crucial to the success of the project. A pull request needs to be reviewed before it gets merged. A committer who has the expertise of the corresponding area would moderate the pull request and the merge the code when it is ready. The corresponding committer could request multiple reviewers who are familiar with the area of the code. We encourage contributors to request code reviews themselves and help review each other's code -- remember everyone is volunteering their time to the community, high-quality code review itself costs as much as the actual code contribution, you could get your code quickly reviewed if you do others the same favor.
|
||||
Code reviews are one of the key ways to ensure the quality of the code. High-quality code reviews prevent technical debt for long-term and are crucial to the success of the project. A pull request needs to be reviewed before it gets merged. A committer who has the expertise of the corresponding area would moderate the pull request and then merge the code when it is ready. The corresponding committer could request multiple reviewers who are familiar with the area of the code. We encourage contributors to request code reviews themselves and help review each other's code -- remember everyone is volunteering their time to the community, high-quality code review itself costs as much as the actual code contribution, you could get your code quickly reviewed if you do others the same favor.
|
||||
|
||||
The community should strive to reach a consensus on technical decisions through discussion. We expect committers and PMCs to moderate technical discussions in a diplomatic way, and provide suggestions with clear technical reasoning when necessary.
|
||||
|
||||
@@ -25,11 +25,11 @@ Committers are individuals who are granted the write access to the project. A co
|
||||
- Quality of contributions: High-quality, readable code contributions indicated by pull requests that can be merged without a substantial code review. History of creating clean, maintainable code and including good test cases. Informative code reviews to help other contributors that adhere to a good standard.
|
||||
- Community involvement: active participation in the discussion forum, promote the projects via tutorials, talks and outreach. We encourage committers to collaborate broadly, e.g. do code reviews and discuss designs with community members that they do not interact physically.
|
||||
|
||||
The Project Management Committee(PMC) consists group of active committers that moderate the discussion, manage the project release, and proposes new committer/PMC members. Potential candidates are usually proposed via an internal discussion among PMCs, followed by a consensus approval, i.e. least 3 +1 votes, and no vetoes. Any veto must be accompanied by reasoning. PMCs should serve the community by upholding the community practices and guidelines XGBoost a better community for everyone. PMCs should strive to only nominate new candidates outside of their own organization.
|
||||
The Project Management Committee(PMC) consists of a group of active committers that moderate the discussion, manage the project release, and proposes new committer/PMC members. Potential candidates are usually proposed via an internal discussion among PMCs, followed by a consensus approval, i.e. least 3 +1 votes, and no vetoes. Any veto must be accompanied by reasoning. PMCs should serve the community by upholding the community practices and guidelines in order to make XGBoost a better community for everyone. PMCs should strive to only nominate new candidates outside of their own organization.
|
||||
|
||||
The PMC is in charge of the project's `continuous integration (CI) <https://en.wikipedia.org/wiki/Continuous_integration>`_ and testing infrastructure. Currently, we host our own Jenkins server at https://xgboost-ci.net. The PMC shall appoint committer(s) to manage the CI infrastructure. The PMC may accept 3rd-party donations and sponsorships that would defray the cost of the CI infrastructure. See :ref:`donation_policy`.
|
||||
|
||||
|
||||
Reviewers
|
||||
---------
|
||||
Reviewers are individuals who actively contributed to the project and are willing to participate in the code review of new contributions. We identify reviewers from active contributors. The committers should explicitly solicit reviews from reviewers. High-quality code reviews prevent technical debt for long-term and are crucial to the success of the project. A pull request to the project has to be reviewed by at least one reviewer in order to be merged.
|
||||
Reviewers are individuals who actively contributed to the project and are willing to participate in the code review of new contributions. We identify reviewers from active contributors. The committers should explicitly solicit reviews from reviewers. High-quality code reviews prevent technical debt for the long-term and are crucial to the success of the project. A pull request to the project has to be reviewed by at least one reviewer in order to be merged.
|
||||
|
||||
@@ -8,23 +8,83 @@ Documentation and Examples
|
||||
:backlinks: none
|
||||
:local:
|
||||
|
||||
*********
|
||||
Documents
|
||||
*********
|
||||
*************
|
||||
Documentation
|
||||
*************
|
||||
* Python and C documentation is built using `Sphinx <http://www.sphinx-doc.org/en/master/>`_.
|
||||
* Each document is written in `reStructuredText <http://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`_.
|
||||
* You can build document locally to see the effect, by running
|
||||
* The documentation is the ``doc/`` directory.
|
||||
* You can build it locally using ``make html`` command.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
make html
|
||||
|
||||
inside the ``doc/`` directory. The online document is hosted by `Read the Docs <https://readthedocs.org/>`__ where the imported project is managed by `Hyunsu Cho <https://github.com/hcho3>`__ and `Jiaming Yuan <https://github.com/trivialfis>`__.
|
||||
Run ``make help`` to learn about the other commands.
|
||||
|
||||
The online document is hosted by `Read the Docs <https://readthedocs.org/>`__ where the imported project is managed by `Hyunsu Cho <https://github.com/hcho3>`__ and `Jiaming Yuan <https://github.com/trivialfis>`__.
|
||||
|
||||
=========================================
|
||||
Build the Python Docs using pip and Conda
|
||||
=========================================
|
||||
|
||||
#. Create a conda environment.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
conda create -n xgboost-docs --yes python=3.10
|
||||
|
||||
.. note:: Python 3.10 is required by `xgboost_ray <https://github.com/ray-project/xgboost_ray>`__ package.
|
||||
|
||||
#. Activate the environment
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
conda activate xgboost-docs
|
||||
|
||||
#. Install required packages (in the current environment) using ``pip`` command.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -r requirements.txt
|
||||
|
||||
.. note::
|
||||
It is currently not possible to install the required packages using ``conda``
|
||||
due to ``xgboost_ray`` being unavailable in conda channels.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
conda install --file requirements.txt --yes -c conda-forge
|
||||
|
||||
|
||||
#. (optional) Install `graphviz <https://www.graphviz.org/>`__
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
conda install graphviz --yes
|
||||
|
||||
#. Eventually, build the docs.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
make html
|
||||
|
||||
You should see the following messages in the console:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ make html
|
||||
sphinx-build -b html -d _build/doctrees . _build/html
|
||||
Running Sphinx v6.2.1
|
||||
...
|
||||
The HTML pages are in _build/html.
|
||||
|
||||
Build finished. The HTML pages are in _build/html.
|
||||
|
||||
********
|
||||
Examples
|
||||
********
|
||||
* Use cases and examples will be in `demo <https://github.com/dmlc/xgboost/tree/master/demo>`_.
|
||||
* Use cases and examples are in `demo <https://github.com/dmlc/xgboost/tree/master/demo>`_ directory.
|
||||
* We are super excited to hear about your story. If you have blog posts,
|
||||
tutorials, or code solutions using XGBoost, please tell us, and we will add
|
||||
a link in the example pages.
|
||||
|
||||
@@ -29,7 +29,7 @@ The Project Management Committee (PMC) of the XGBoost project appointed `Open So
|
||||
|
||||
All expenses incurred for hosting CI will be submitted to the fiscal host with receipts. Only the expenses in the following categories will be approved for reimbursement:
|
||||
|
||||
* Cloud exprenses for the cloud test farm (https://buildkite.com/xgboost)
|
||||
* Cloud expenses for the cloud test farm (https://buildkite.com/xgboost)
|
||||
* Cost of domain https://xgboost-ci.net
|
||||
* Monthly cost of using BuildKite
|
||||
* Hosting cost of the User Forum (https://discuss.xgboost.ai)
|
||||
|
||||
@@ -169,7 +169,7 @@ supply a specified SANITIZER_PATH.
|
||||
|
||||
How to use sanitizers with CUDA support
|
||||
=======================================
|
||||
Runing XGBoost on CUDA with address sanitizer (asan) will raise memory error.
|
||||
Running XGBoost on CUDA with address sanitizer (asan) will raise memory error.
|
||||
To use asan with CUDA correctly, you need to configure asan via ASAN_OPTIONS
|
||||
environment variable:
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ XGBoost supports missing values by default.
|
||||
In tree algorithms, branch directions for missing values are learned during training.
|
||||
Note that the gblinear booster treats missing values as zeros.
|
||||
|
||||
When the ``missing`` parameter is specifed, values in the input predictor that is equal to
|
||||
When the ``missing`` parameter is specified, values in the input predictor that is equal to
|
||||
``missing`` will be treated as missing and removed. By default it's set to ``NaN``.
|
||||
|
||||
**************************************
|
||||
|
||||
@@ -14,53 +14,46 @@ Most of the algorithms in XGBoost including training, prediction and evaluation
|
||||
|
||||
Usage
|
||||
=====
|
||||
Specify the ``tree_method`` parameter as ``gpu_hist``. For details around the ``tree_method`` parameter, see :doc:`tree method </treemethod>`.
|
||||
|
||||
Supported parameters
|
||||
--------------------
|
||||
|
||||
GPU accelerated prediction is enabled by default for the above mentioned ``tree_method`` parameters but can be switched to CPU prediction by setting ``predictor`` to ``cpu_predictor``. This could be useful if you want to conserve GPU memory. Likewise when using CPU algorithms, GPU accelerated prediction can be enabled by setting ``predictor`` to ``gpu_predictor``.
|
||||
|
||||
The device ordinal (which GPU to use if you have many of them) can be selected using the
|
||||
``gpu_id`` parameter, which defaults to 0 (the first device reported by CUDA runtime).
|
||||
|
||||
To enable GPU acceleration, specify the ``device`` parameter as ``cuda``. In addition, the device ordinal (which GPU to use if you have multiple devices in the same node) can be specified using the ``cuda:<ordinal>`` syntax, where ``<ordinal>`` is an integer that represents the device ordinal. XGBoost defaults to 0 (the first device reported by CUDA runtime).
|
||||
|
||||
The GPU algorithms currently work with CLI, Python, R, and JVM packages. See :doc:`/install` for details.
|
||||
|
||||
.. code-block:: python
|
||||
:caption: Python example
|
||||
|
||||
param['gpu_id'] = 0
|
||||
param['tree_method'] = 'gpu_hist'
|
||||
params = dict()
|
||||
params["device"] = "cuda"
|
||||
params["tree_method"] = "hist"
|
||||
Xy = xgboost.QuantileDMatrix(X, y)
|
||||
xgboost.train(params, Xy)
|
||||
|
||||
.. code-block:: python
|
||||
:caption: With Scikit-Learn interface
|
||||
|
||||
XGBRegressor(tree_method='gpu_hist', gpu_id=0)
|
||||
:caption: With the Scikit-Learn interface
|
||||
|
||||
XGBRegressor(tree_method="hist", device="cuda")
|
||||
|
||||
GPU-Accelerated SHAP values
|
||||
=============================
|
||||
XGBoost makes use of `GPUTreeShap <https://github.com/rapidsai/gputreeshap>`_ as a backend for computing shap values when the GPU predictor is selected.
|
||||
XGBoost makes use of `GPUTreeShap <https://github.com/rapidsai/gputreeshap>`_ as a backend for computing shap values when the GPU is used.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
model.set_param({"predictor": "gpu_predictor"})
|
||||
shap_values = model.predict(dtrain, pred_contribs=True)
|
||||
booster.set_param({"device": "cuda:0"})
|
||||
shap_values = booster.predict(dtrain, pred_contribs=True)
|
||||
shap_interaction_values = model.predict(dtrain, pred_interactions=True)
|
||||
|
||||
See examples `here
|
||||
<https://github.com/dmlc/xgboost/tree/master/demo/gpu_acceleration>`__.
|
||||
See :ref:`sphx_glr_python_gpu-examples_tree_shap.py` for a worked example.
|
||||
|
||||
Multi-node Multi-GPU Training
|
||||
=============================
|
||||
|
||||
XGBoost supports fully distributed GPU training using `Dask <https://dask.org/>`_, ``Spark`` and ``PySpark``. For getting started with Dask see our tutorial :doc:`/tutorials/dask` and worked examples `here <https://github.com/dmlc/xgboost/tree/master/demo/dask>`__, also Python documentation :ref:`dask_api` for complete reference. For usage with ``Spark`` using Scala see :doc:`/jvm/xgboost4j_spark_gpu_tutorial`. Lastly for distributed GPU training with ``PySpark``, see :doc:`/tutorials/spark_estimator`.
|
||||
XGBoost supports fully distributed GPU training using `Dask <https://dask.org/>`_, ``Spark`` and ``PySpark``. For getting started with Dask see our tutorial :doc:`/tutorials/dask` and worked examples :doc:`/python/dask-examples/index`, also Python documentation :ref:`dask_api` for complete reference. For usage with ``Spark`` using Scala see :doc:`/jvm/xgboost4j_spark_gpu_tutorial`. Lastly for distributed GPU training with ``PySpark``, see :doc:`/tutorials/spark_estimator`.
|
||||
|
||||
|
||||
Memory usage
|
||||
============
|
||||
The following are some guidelines on the device memory usage of the `gpu_hist` tree method.
|
||||
The following are some guidelines on the device memory usage of the ``hist`` tree method on GPU.
|
||||
|
||||
Memory inside xgboost training is generally allocated for two reasons - storing the dataset and working memory.
|
||||
|
||||
@@ -73,12 +66,13 @@ If you are getting out-of-memory errors on a big dataset, try the or :py:class:`
|
||||
|
||||
CPU-GPU Interoperability
|
||||
========================
|
||||
XGBoost models trained on GPUs can be used on CPU-only systems to generate predictions. For information about how to save and load an XGBoost model, see :doc:`/tutorials/saving_model`.
|
||||
|
||||
The model can be used on any device regardless of the one used to train it. For instance, a model trained using GPU can still work on a CPU-only machine and vice versa. For more information about model serialization, see :doc:`/tutorials/saving_model`.
|
||||
|
||||
|
||||
Developer notes
|
||||
===============
|
||||
The application may be profiled with annotations by specifying USE_NTVX to cmake. Regions covered by the 'Monitor' class in CUDA code will automatically appear in the nsight profiler when `verbosity` is set to 3.
|
||||
The application may be profiled with annotations by specifying ``USE_NTVX`` to cmake. Regions covered by the 'Monitor' class in CUDA code will automatically appear in the nsight profiler when `verbosity` is set to 3.
|
||||
|
||||
**********
|
||||
References
|
||||
|
||||
@@ -3,10 +3,10 @@ Installation Guide
|
||||
##################
|
||||
|
||||
XGBoost provides binary packages for some language bindings. The binary packages support
|
||||
the GPU algorithm (``gpu_hist``) on machines with NVIDIA GPUs. Please note that **training
|
||||
with multiple GPUs is only supported for Linux platform**. See :doc:`gpu/index`. Also we
|
||||
have both stable releases and nightly builds, see below for how to install them. For
|
||||
building from source, visit :doc:`this page </build>`.
|
||||
the GPU algorithm (``device=cuda:0``) on machines with NVIDIA GPUs. Please note that
|
||||
**training with multiple GPUs is only supported for Linux platform**. See
|
||||
:doc:`gpu/index`. Also we have both stable releases and nightly builds, see below for how
|
||||
to install them. For building from source, visit :doc:`this page </build>`.
|
||||
|
||||
.. contents:: Contents
|
||||
|
||||
@@ -189,7 +189,7 @@ This will check out the latest stable version from the Maven Central.
|
||||
|
||||
For the latest release version number, please check `release page <https://github.com/dmlc/xgboost/releases>`_.
|
||||
|
||||
To enable the GPU algorithm (``tree_method='gpu_hist'``), use artifacts ``xgboost4j-gpu_2.12`` and ``xgboost4j-spark-gpu_2.12`` instead (note the ``gpu`` suffix).
|
||||
To enable the GPU algorithm (``device='cuda'``), use artifacts ``xgboost4j-gpu_2.12`` and ``xgboost4j-spark-gpu_2.12`` instead (note the ``gpu`` suffix).
|
||||
|
||||
|
||||
.. note:: Windows not supported in the JVM package
|
||||
@@ -325,4 +325,4 @@ The SNAPSHOT JARs are hosted by the XGBoost project. Every commit in the ``maste
|
||||
|
||||
You can browse the file listing of the Maven repository at https://s3-us-west-2.amazonaws.com/xgboost-maven-repo/list.html.
|
||||
|
||||
To enable the GPU algorithm (``tree_method='gpu_hist'``), use artifacts ``xgboost4j-gpu_2.12`` and ``xgboost4j-spark-gpu_2.12`` instead (note the ``gpu`` suffix).
|
||||
To enable the GPU algorithm (``device='cuda'``), use artifacts ``xgboost4j-gpu_2.12`` and ``xgboost4j-spark-gpu_2.12`` instead (note the ``gpu`` suffix).
|
||||
|
||||
@@ -23,8 +23,8 @@ Installation
|
||||
:local:
|
||||
:backlinks: none
|
||||
|
||||
Checkout the :doc:`Installation Guide </install>` for how to install jvm package, or
|
||||
:doc:`Building from Source </build>` on how to build it form source.
|
||||
Checkout the :doc:`Installation Guide </install>` for how to install the jvm package, or
|
||||
:doc:`Building from Source </build>` on how to build it from the sources.
|
||||
|
||||
********
|
||||
Contents
|
||||
|
||||
@@ -129,7 +129,7 @@ With parameters and data, you are able to train a booster model.
|
||||
|
||||
booster.saveModel("model.bin");
|
||||
|
||||
* Generaing model dump with feature map
|
||||
* Generating model dump with feature map
|
||||
|
||||
.. code-block:: java
|
||||
|
||||
|
||||
@@ -121,7 +121,7 @@ To train a XGBoost model for classification, we need to claim a XGBoostClassifie
|
||||
"objective" -> "multi:softprob",
|
||||
"num_class" -> 3,
|
||||
"num_round" -> 100,
|
||||
"tree_method" -> "gpu_hist",
|
||||
"device" -> "cuda",
|
||||
"num_workers" -> 1)
|
||||
|
||||
val featuresNames = schema.fieldNames.filter(name => name != labelName)
|
||||
@@ -130,15 +130,14 @@ To train a XGBoost model for classification, we need to claim a XGBoostClassifie
|
||||
.setFeaturesCol(featuresNames)
|
||||
.setLabelCol(labelName)
|
||||
|
||||
The available parameters for training a XGBoost model can be found in :doc:`here </parameter>`.
|
||||
Similar to the XGBoost4J-Spark package, in addition to the default set of parameters,
|
||||
XGBoost4J-Spark-GPU also supports the camel-case variant of these parameters to be
|
||||
consistent with Spark's MLlib naming convention.
|
||||
The ``device`` parameter is for informing XGBoost that CUDA devices should be used instead of CPU. Unlike the single-node mode, GPUs are managed by spark instead of by XGBoost. Therefore, explicitly specified device ordinal like ``cuda:1`` is not support.
|
||||
|
||||
The available parameters for training a XGBoost model can be found in :doc:`here </parameter>`. Similar to the XGBoost4J-Spark package, in addition to the default set of parameters, XGBoost4J-Spark-GPU also supports the camel-case variant of these parameters to be consistent with Spark's MLlib naming convention.
|
||||
|
||||
Specifically, each parameter in :doc:`this page </parameter>` has its equivalent form in
|
||||
XGBoost4J-Spark-GPU with camel case. For example, to set ``max_depth`` for each tree, you can pass
|
||||
parameter just like what we did in the above code snippet (as ``max_depth`` wrapped in a Map), or
|
||||
you can do it through setters in XGBoostClassifer:
|
||||
XGBoost4J-Spark-GPU with camel case. For example, to set ``max_depth`` for each tree, you
|
||||
can pass parameter just like what we did in the above code snippet (as ``max_depth``
|
||||
wrapped in a Map), or you can do it through setters in XGBoostClassifer:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
|
||||
@@ -34,6 +34,20 @@ General Parameters
|
||||
|
||||
- Which booster to use. Can be ``gbtree``, ``gblinear`` or ``dart``; ``gbtree`` and ``dart`` use tree based models while ``gblinear`` uses linear functions.
|
||||
|
||||
* ``device`` [default= ``cpu``]
|
||||
|
||||
.. versionadded:: 2.0.0
|
||||
|
||||
- Device for XGBoost to run. User can set it to one of the following values:
|
||||
|
||||
+ ``cpu``: Use CPU.
|
||||
+ ``cuda``: Use a GPU (CUDA device).
|
||||
+ ``cuda:<ordinal>``: ``<ordinal>`` is an integer that specifies the ordinal of the GPU (which GPU do you want to use if you have more than one devices).
|
||||
+ ``gpu``: Default GPU device selection from the list of available and supported devices. Only ``cuda`` devices are supported currently.
|
||||
+ ``gpu:<ordinal>``: Default GPU device selection from the list of available and supported devices. Only ``cuda`` devices are supported currently.
|
||||
|
||||
For more information about GPU acceleration, see :doc:`/gpu/index`. In distributed environments, ordinal selection is handled by distributed frameworks instead of XGBoost. As a result, using ``cuda:<ordinal>`` will result in an error. Use ``cuda`` instead.
|
||||
|
||||
* ``verbosity`` [default=1]
|
||||
|
||||
- Verbosity of printing messages. Valid values are 0 (silent), 1 (warning), 2 (info), 3
|
||||
@@ -44,7 +58,7 @@ General Parameters
|
||||
* ``validate_parameters`` [default to ``false``, except for Python, R and CLI interface]
|
||||
|
||||
- When set to True, XGBoost will perform validation of input parameters to check whether
|
||||
a parameter is used or not.
|
||||
a parameter is used or not. A warning is emitted when there's unknown parameter.
|
||||
|
||||
* ``nthread`` [default to maximum number of threads available if not set]
|
||||
|
||||
@@ -55,10 +69,6 @@ General Parameters
|
||||
|
||||
- Flag to disable default metric. Set to 1 or ``true`` to disable.
|
||||
|
||||
* ``num_feature`` [set automatically by XGBoost, no need to be set by user]
|
||||
|
||||
- Feature dimension used in boosting, set to maximum dimension of the feature
|
||||
|
||||
Parameters for Tree Booster
|
||||
===========================
|
||||
* ``eta`` [default=0.3, alias: ``learning_rate``]
|
||||
@@ -99,7 +109,7 @@ Parameters for Tree Booster
|
||||
- ``gradient_based``: the selection probability for each training instance is proportional to the
|
||||
*regularized absolute value* of gradients (more specifically, :math:`\sqrt{g^2+\lambda h^2}`).
|
||||
``subsample`` may be set to as low as 0.1 without loss of model accuracy. Note that this
|
||||
sampling method is only supported when ``tree_method`` is set to ``gpu_hist``; other tree
|
||||
sampling method is only supported when ``tree_method`` is set to ``hist`` and the device is ``cuda``; other tree
|
||||
methods only support ``uniform`` sampling.
|
||||
|
||||
* ``colsample_bytree``, ``colsample_bylevel``, ``colsample_bynode`` [default=1]
|
||||
@@ -131,26 +141,15 @@ Parameters for Tree Booster
|
||||
* ``tree_method`` string [default= ``auto``]
|
||||
|
||||
- The tree construction algorithm used in XGBoost. See description in the `reference paper <http://arxiv.org/abs/1603.02754>`_ and :doc:`treemethod`.
|
||||
- XGBoost supports ``approx``, ``hist`` and ``gpu_hist`` for distributed training. Experimental support for external memory is available for ``approx`` and ``gpu_hist``.
|
||||
|
||||
- Choices: ``auto``, ``exact``, ``approx``, ``hist``, ``gpu_hist``, this is a
|
||||
combination of commonly used updaters. For other updaters like ``refresh``, set the
|
||||
parameter ``updater`` directly.
|
||||
- Choices: ``auto``, ``exact``, ``approx``, ``hist``, this is a combination of commonly
|
||||
used updaters. For other updaters like ``refresh``, set the parameter ``updater``
|
||||
directly.
|
||||
|
||||
- ``auto``: Use heuristic to choose the fastest method.
|
||||
|
||||
- For small dataset, exact greedy (``exact``) will be used.
|
||||
- For larger dataset, approximate algorithm (``approx``) will be chosen. It's
|
||||
recommended to try ``hist`` and ``gpu_hist`` for higher performance with large
|
||||
dataset.
|
||||
(``gpu_hist``)has support for ``external memory``.
|
||||
|
||||
- Because old behavior is always use exact greedy in single machine, user will get a
|
||||
message when approximate algorithm is chosen to notify this choice.
|
||||
- ``auto``: Same as the ``hist`` tree method.
|
||||
- ``exact``: Exact greedy algorithm. Enumerates all split candidates.
|
||||
- ``approx``: Approximate greedy algorithm using quantile sketch and gradient histogram.
|
||||
- ``hist``: Faster histogram optimized approximate greedy algorithm.
|
||||
- ``gpu_hist``: GPU implementation of ``hist`` algorithm.
|
||||
|
||||
* ``scale_pos_weight`` [default=1]
|
||||
|
||||
@@ -163,7 +162,8 @@ Parameters for Tree Booster
|
||||
- ``grow_colmaker``: non-distributed column-based construction of trees.
|
||||
- ``grow_histmaker``: distributed tree construction with row-based data splitting based on global proposal of histogram counting.
|
||||
- ``grow_quantile_histmaker``: Grow tree using quantized histogram.
|
||||
- ``grow_gpu_hist``: Grow tree with GPU.
|
||||
- ``grow_gpu_hist``: Enabled when ``tree_method`` is set to ``hist`` along with ``device=cuda``.
|
||||
- ``grow_gpu_approx``: Enabled when ``tree_method`` is set to ``approx`` along with ``device=cuda``.
|
||||
- ``sync``: synchronizes trees in all distributed nodes.
|
||||
- ``refresh``: refreshes tree's statistics and/or leaf values based on the current data. Note that no random subsampling of data rows is performed.
|
||||
- ``prune``: prunes the splits where loss < min_split_loss (or gamma) and nodes that have depth greater than ``max_depth``.
|
||||
@@ -183,7 +183,7 @@ Parameters for Tree Booster
|
||||
* ``grow_policy`` [default= ``depthwise``]
|
||||
|
||||
- Controls a way new nodes are added to the tree.
|
||||
- Currently supported only if ``tree_method`` is set to ``hist``, ``approx`` or ``gpu_hist``.
|
||||
- Currently supported only if ``tree_method`` is set to ``hist`` or ``approx``.
|
||||
- Choices: ``depthwise``, ``lossguide``
|
||||
|
||||
- ``depthwise``: split at nodes closest to the root.
|
||||
@@ -195,22 +195,10 @@ Parameters for Tree Booster
|
||||
|
||||
* ``max_bin``, [default=256]
|
||||
|
||||
- Only used if ``tree_method`` is set to ``hist``, ``approx`` or ``gpu_hist``.
|
||||
- Only used if ``tree_method`` is set to ``hist`` or ``approx``.
|
||||
- Maximum number of discrete bins to bucket continuous features.
|
||||
- Increasing this number improves the optimality of splits at the cost of higher computation time.
|
||||
|
||||
* ``predictor``, [default= ``auto``]
|
||||
|
||||
- The type of predictor algorithm to use. Provides the same results but allows the use of GPU or CPU.
|
||||
|
||||
- ``auto``: Configure predictor based on heuristics.
|
||||
- ``cpu_predictor``: Multicore CPU prediction algorithm.
|
||||
- ``gpu_predictor``: Prediction using GPU. Used when ``tree_method`` is ``gpu_hist``.
|
||||
When ``predictor`` is set to default value ``auto``, the ``gpu_hist`` tree method is
|
||||
able to provide GPU based prediction without copying training data to GPU memory.
|
||||
If ``gpu_predictor`` is explicitly specified, then all data is copied into GPU, only
|
||||
recommended for performing prediction tasks.
|
||||
|
||||
* ``num_parallel_tree``, [default=1]
|
||||
|
||||
- Number of parallel trees constructed during each iteration. This option is used to support boosted random forest.
|
||||
@@ -238,6 +226,15 @@ Parameters for Tree Booster
|
||||
- ``one_output_per_tree``: One model for each target.
|
||||
- ``multi_output_tree``: Use multi-target trees.
|
||||
|
||||
* ``max_cached_hist_node``, [default = 65536]
|
||||
|
||||
Maximum number of cached nodes for CPU histogram.
|
||||
|
||||
.. versionadded:: 2.0.0
|
||||
|
||||
- For most of the cases this parameter should not be set except for growing deep trees
|
||||
on CPU.
|
||||
|
||||
.. _cat-param:
|
||||
|
||||
Parameters for Categorical Feature
|
||||
@@ -357,7 +354,7 @@ Specify the learning task and the corresponding learning objective. The objectiv
|
||||
|
||||
- ``reg:squarederror``: regression with squared loss.
|
||||
- ``reg:squaredlogerror``: regression with squared log loss :math:`\frac{1}{2}[log(pred + 1) - log(label + 1)]^2`. All input labels are required to be greater than -1. Also, see metric ``rmsle`` for possible issue with this objective.
|
||||
- ``reg:logistic``: logistic regression.
|
||||
- ``reg:logistic``: logistic regression, output probability
|
||||
- ``reg:pseudohubererror``: regression with Pseudo Huber loss, a twice differentiable alternative to absolute loss.
|
||||
- ``reg:absoluteerror``: Regression with L1 error. When tree model is used, leaf value is refreshed after tree construction. If used in distributed training, the leaf value is calculated as the mean value from all workers, which is not guaranteed to be optimal.
|
||||
|
||||
|
||||
@@ -35,14 +35,14 @@ After 1.4 release, we added a new parameter called ``strict_shape``, one can set
|
||||
has equivalent output shape of ``multi:softprob`` due to dropped transformation. If
|
||||
strict shape is set to False then output can have 1 or 2 dim depending on used model.
|
||||
|
||||
- When using ``preds_contribs`` with ``strict_shape`` set to ``True``:
|
||||
- When using ``pred_contribs`` with ``strict_shape`` set to ``True``:
|
||||
|
||||
Output is a 3-dim array, with ``(rows, groups, columns + 1)`` as shape. Whether
|
||||
``approx_contribs`` is used does not change the output shape. If the strict shape
|
||||
parameter is not set, it can be a 2 or 3 dimension array depending on whether
|
||||
multi-class model is being used.
|
||||
|
||||
- When using ``preds_interactions`` with ``strict_shape`` set to ``True``:
|
||||
- When using ``pred_interactions`` with ``strict_shape`` set to ``True``:
|
||||
|
||||
Output is a 4-dim array, with ``(rows, groups, columns + 1, columns + 1)`` as shape.
|
||||
Like the predict contribution case, whether ``approx_contribs`` is used does not change
|
||||
@@ -54,7 +54,7 @@ After 1.4 release, we added a new parameter called ``strict_shape``, one can set
|
||||
Output is a 4-dim array with ``(n_samples, n_iterations, n_classes, n_trees_in_forest)``
|
||||
as shape. ``n_trees_in_forest`` is specified by the ``numb_parallel_tree`` during
|
||||
training. When strict shape is set to False, output is a 2-dim array with last 3 dims
|
||||
concatenated into 1. Also the last dimension is dropped if it eqauls to 1. When using
|
||||
concatenated into 1. Also the last dimension is dropped if it equals to 1. When using
|
||||
``apply`` method in scikit learn interface, this is set to False by default.
|
||||
|
||||
|
||||
@@ -68,7 +68,7 @@ n_classes, n_trees_in_forest)``, while R with ``strict_shape=TRUE`` outputs
|
||||
Other than these prediction types, there's also a parameter called ``iteration_range``,
|
||||
which is similar to model slicing. But instead of actually splitting up the model into
|
||||
multiple stacks, it simply returns the prediction formed by the trees within range.
|
||||
Number of trees created in each iteration eqauls to :math:`trees_i = num\_class \times
|
||||
Number of trees created in each iteration equals to :math:`trees_i = num\_class \times
|
||||
num\_parallel\_tree`. So if you are training a boosted random forest with size of 4, on
|
||||
the 3-class classification dataset, and want to use the first 2 iterations of trees for
|
||||
prediction, you need to provide ``iteration_range=(0, 2)``. Then the first :math:`2
|
||||
@@ -87,15 +87,6 @@ with the native Python interface :py:meth:`xgboost.Booster.predict` and
|
||||
behavior. Also the ``save_best`` parameter from :py:obj:`xgboost.callback.EarlyStopping`
|
||||
might be useful.
|
||||
|
||||
*********
|
||||
Predictor
|
||||
*********
|
||||
|
||||
There are 2 predictors in XGBoost (3 if you have the one-api plugin enabled), namely
|
||||
``cpu_predictor`` and ``gpu_predictor``. The default option is ``auto`` so that XGBoost
|
||||
can employ some heuristics for saving GPU memory during training. They might have slight
|
||||
different outputs due to floating point errors.
|
||||
|
||||
|
||||
***********
|
||||
Base Margin
|
||||
@@ -134,15 +125,6 @@ it. Be aware that the output of in-place prediction depends on input data type,
|
||||
input is on GPU data output is :py:obj:`cupy.ndarray`, otherwise a :py:obj:`numpy.ndarray`
|
||||
is returned.
|
||||
|
||||
****************
|
||||
Categorical Data
|
||||
****************
|
||||
|
||||
Other than users performing encoding, XGBoost has experimental support for categorical
|
||||
data using ``gpu_hist`` and ``gpu_predictor``. No special operation needs to be done on
|
||||
input test data since the information about categories is encoded into the model during
|
||||
training.
|
||||
|
||||
*************
|
||||
Thread Safety
|
||||
*************
|
||||
@@ -159,7 +141,6 @@ instance we might accidentally call ``clf.set_params()`` inside a predict functi
|
||||
|
||||
def predict_fn(clf: xgb.XGBClassifier, X):
|
||||
X = preprocess(X)
|
||||
clf.set_params(predictor="gpu_predictor") # NOT safe!
|
||||
clf.set_params(n_jobs=1) # NOT safe!
|
||||
return clf.predict_proba(X, iteration_range=(0, 10))
|
||||
|
||||
|
||||
2
doc/python/.gitignore
vendored
2
doc/python/.gitignore
vendored
@@ -1,3 +1,5 @@
|
||||
examples
|
||||
dask-examples
|
||||
survival-examples
|
||||
gpu-examples
|
||||
rmm-examples
|
||||
@@ -17,3 +17,5 @@ Contents
|
||||
examples/index
|
||||
dask-examples/index
|
||||
survival-examples/index
|
||||
gpu-examples/index
|
||||
rmm-examples/index
|
||||
|
||||
@@ -37,3 +37,7 @@ The sliced model is a copy of selected trees, that means the model itself is imm
|
||||
during slicing. This feature is the basis of `save_best` option in early stopping
|
||||
callback. See :ref:`sphx_glr_python_examples_individual_trees.py` for a worked example on
|
||||
how to combine prediction with sliced trees.
|
||||
|
||||
.. note::
|
||||
|
||||
The returned model slice doesn't contain attributes like :py:class:`~xgboost.Booster.best_iteration` and :py:class:`~xgboost.Booster.best_score`.
|
||||
|
||||
@@ -23,12 +23,16 @@ Core Data Structure
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: xgboost.QuantileDMatrix
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: xgboost.Booster
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: xgboost.DataIter
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
Learning API
|
||||
------------
|
||||
|
||||
@@ -310,8 +310,8 @@ for more info.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Use "gpu_hist" for training the model.
|
||||
reg = xgb.XGBRegressor(tree_method="gpu_hist")
|
||||
# Use "hist" for training the model.
|
||||
reg = xgb.XGBRegressor(tree_method="hist", device="cuda")
|
||||
# Fit the model using predictor X and response y.
|
||||
reg.fit(X, y)
|
||||
# Save model into JSON format.
|
||||
|
||||
@@ -20,7 +20,7 @@ sklearn estimator interface is still working in progress.
|
||||
|
||||
You can find some some quick start examples at
|
||||
:ref:`sphx_glr_python_examples_sklearn_examples.py`. The main advantage of using sklearn
|
||||
interface is that it works with most of the utilites provided by sklearn like
|
||||
interface is that it works with most of the utilities provided by sklearn like
|
||||
:py:func:`sklearn.model_selection.cross_validate`. Also, many other libraries recognize
|
||||
the sklearn estimator interface thanks to its popularity.
|
||||
|
||||
|
||||
@@ -3,14 +3,14 @@ Tree Methods
|
||||
############
|
||||
|
||||
For training boosted tree models, there are 2 parameters used for choosing algorithms,
|
||||
namely ``updater`` and ``tree_method``. XGBoost has 4 builtin tree methods, namely
|
||||
``exact``, ``approx``, ``hist`` and ``gpu_hist``. Along with these tree methods, there
|
||||
are also some free standing updaters including ``refresh``,
|
||||
``prune`` and ``sync``. The parameter ``updater`` is more primitive than ``tree_method``
|
||||
as the latter is just a pre-configuration of the former. The difference is mostly due to
|
||||
historical reasons that each updater requires some specific configurations and might has
|
||||
missing features. As we are moving forward, the gap between them is becoming more and
|
||||
more irrelevant. We will collectively document them under tree methods.
|
||||
namely ``updater`` and ``tree_method``. XGBoost has 3 builtin tree methods, namely
|
||||
``exact``, ``approx`` and ``hist``. Along with these tree methods, there are also some
|
||||
free standing updaters including ``refresh``, ``prune`` and ``sync``. The parameter
|
||||
``updater`` is more primitive than ``tree_method`` as the latter is just a
|
||||
pre-configuration of the former. The difference is mostly due to historical reasons that
|
||||
each updater requires some specific configurations and might has missing features. As we
|
||||
are moving forward, the gap between them is becoming more and more irrelevant. We will
|
||||
collectively document them under tree methods.
|
||||
|
||||
**************
|
||||
Exact Solution
|
||||
@@ -19,23 +19,23 @@ Exact Solution
|
||||
Exact means XGBoost considers all candidates from data for tree splitting, but underlying
|
||||
the objective is still interpreted as a Taylor expansion.
|
||||
|
||||
1. ``exact``: Vanilla gradient boosting tree algorithm described in `reference paper
|
||||
<http://arxiv.org/abs/1603.02754>`_. During each split finding procedure, it iterates
|
||||
over all entries of input data. It's more accurate (among other greedy methods) but
|
||||
slow in computation performance. Also it doesn't support distributed training as
|
||||
XGBoost employs row spliting data distribution while ``exact`` tree method works on a
|
||||
sorted column format. This tree method can be used with parameter ``tree_method`` set
|
||||
to ``exact``.
|
||||
1. ``exact``: The vanilla gradient boosting tree algorithm described in `reference paper
|
||||
<http://arxiv.org/abs/1603.02754>`_. During split-finding, it iterates over all
|
||||
entries of input data. It's more accurate (among other greedy methods) but
|
||||
computationally slower in compared to other tree methods. Further more, its feature
|
||||
set is limited. Features like distributed training and external memory that require
|
||||
approximated quantiles are not supported. This tree method can be used with the
|
||||
parameter ``tree_method`` set to ``exact``.
|
||||
|
||||
|
||||
**********************
|
||||
Approximated Solutions
|
||||
**********************
|
||||
|
||||
As ``exact`` tree method is slow in performance and not scalable, we often employ
|
||||
approximated training algorithms. These algorithms build a gradient histogram for each
|
||||
node and iterate through the histogram instead of real dataset. Here we introduce the
|
||||
implementations in XGBoost below.
|
||||
As ``exact`` tree method is slow in computation performance and difficult to scale, we
|
||||
often employ approximated training algorithms. These algorithms build a gradient
|
||||
histogram for each node and iterate through the histogram instead of real dataset. Here
|
||||
we introduce the implementations in XGBoost.
|
||||
|
||||
1. ``approx`` tree method: An approximation tree method described in `reference paper
|
||||
<http://arxiv.org/abs/1603.02754>`_. It runs sketching before building each tree
|
||||
@@ -48,22 +48,18 @@ implementations in XGBoost below.
|
||||
this global sketch. This is the fastest algorithm as it runs sketching only once. The
|
||||
algorithm can be accessed by setting ``tree_method`` to ``hist``.
|
||||
|
||||
3. ``gpu_hist`` tree method: The ``gpu_hist`` tree method is a GPU implementation of
|
||||
``hist``, with additional support for gradient based sampling. The algorithm can be
|
||||
accessed by setting ``tree_method`` to ``gpu_hist``.
|
||||
|
||||
************
|
||||
Implications
|
||||
************
|
||||
|
||||
Some objectives like ``reg:squarederror`` have constant hessian. In this case, ``hist``
|
||||
or ``gpu_hist`` should be preferred as weighted sketching doesn't make sense with constant
|
||||
Some objectives like ``reg:squarederror`` have constant hessian. In this case, the
|
||||
``hist`` should be preferred as weighted sketching doesn't make sense with constant
|
||||
weights. When using non-constant hessian objectives, sometimes ``approx`` yields better
|
||||
accuracy, but with slower computation performance. Most of the time using ``(gpu)_hist``
|
||||
with higher ``max_bin`` can achieve similar or even superior accuracy while maintaining
|
||||
good performance. However, as xgboost is largely driven by community effort, the actual
|
||||
implementations have some differences than pure math description. Result might have
|
||||
slight differences than expectation, which we are currently trying to overcome.
|
||||
accuracy, but with slower computation performance. Most of the time using ``hist`` with
|
||||
higher ``max_bin`` can achieve similar or even superior accuracy while maintaining good
|
||||
performance. However, as xgboost is largely driven by community effort, the actual
|
||||
implementations have some differences than pure math description. Result might be
|
||||
slightly different than expectation, which we are currently trying to overcome.
|
||||
|
||||
**************
|
||||
Other Updaters
|
||||
@@ -72,7 +68,7 @@ Other Updaters
|
||||
1. ``Prune``: It prunes the existing trees. ``prune`` is usually used as part of other
|
||||
tree methods. To use pruner independently, one needs to set the process type to update
|
||||
by: ``{"process_type": "update", "updater": "prune"}``. With this set of parameters,
|
||||
during trianing, XGBOost will prune the existing trees according to 2 parameters
|
||||
during training, XGBoost will prune the existing trees according to 2 parameters
|
||||
``min_split_loss (gamma)`` and ``max_depth``.
|
||||
|
||||
2. ``Refresh``: Refresh the statistic of built trees on a new training dataset. Like the
|
||||
@@ -106,8 +102,8 @@ solely for the interest of documentation.
|
||||
histogram creation step and uses sketching values directly during split evaluation. It
|
||||
was never tested and contained some unknown bugs, we decided to remove it and focus our
|
||||
resources on more promising algorithms instead. For accuracy, most of the time
|
||||
``approx``, ``hist`` and ``gpu_hist`` are enough with some parameters tuning, so
|
||||
removing them don't have any real practical impact.
|
||||
``approx`` and ``hist`` are enough with some parameters tuning, so removing them don't
|
||||
have any real practical impact.
|
||||
|
||||
3. ``grow_local_histmaker`` updater: An approximation tree method described in `reference
|
||||
paper <http://arxiv.org/abs/1603.02754>`_. This updater was rarely used in practice so
|
||||
@@ -127,23 +123,23 @@ Feature Matrix
|
||||
Following table summarizes some differences in supported features between 4 tree methods,
|
||||
`T` means supported while `F` means unsupported.
|
||||
|
||||
+------------------+-----------+---------------------+---------------------+------------------------+
|
||||
| | Exact | Approx | Hist | GPU Hist |
|
||||
+==================+===========+=====================+=====================+========================+
|
||||
| grow_policy | Depthwise | depthwise/lossguide | depthwise/lossguide | depthwise/lossguide |
|
||||
+------------------+-----------+---------------------+---------------------+------------------------+
|
||||
| max_leaves | F | T | T | T |
|
||||
+------------------+-----------+---------------------+---------------------+------------------------+
|
||||
| sampling method | uniform | uniform | uniform | gradient_based/uniform |
|
||||
+------------------+-----------+---------------------+---------------------+------------------------+
|
||||
| categorical data | F | T | T | T |
|
||||
+------------------+-----------+---------------------+---------------------+------------------------+
|
||||
| External memory | F | T | T | P |
|
||||
+------------------+-----------+---------------------+---------------------+------------------------+
|
||||
| Distributed | F | T | T | T |
|
||||
+------------------+-----------+---------------------+---------------------+------------------------+
|
||||
+------------------+-----------+---------------------+------------------------+---------------------+------------------------+
|
||||
| | Exact | Approx | Approx (GPU) | Hist | Hist (GPU) |
|
||||
+==================+===========+=====================+========================+=====================+========================+
|
||||
| grow_policy | Depthwise | depthwise/lossguide | depthwise/lossguide | depthwise/lossguide | depthwise/lossguide |
|
||||
+------------------+-----------+---------------------+------------------------+---------------------+------------------------+
|
||||
| max_leaves | F | T | T | T | T |
|
||||
+------------------+-----------+---------------------+------------------------+---------------------+------------------------+
|
||||
| sampling method | uniform | uniform | gradient_based/uniform | uniform | gradient_based/uniform |
|
||||
+------------------+-----------+---------------------+------------------------+---------------------+------------------------+
|
||||
| categorical data | F | T | T | T | T |
|
||||
+------------------+-----------+---------------------+------------------------+---------------------+------------------------+
|
||||
| External memory | F | T | P | T | P |
|
||||
+------------------+-----------+---------------------+------------------------+---------------------+------------------------+
|
||||
| Distributed | F | T | T | T | T |
|
||||
+------------------+-----------+---------------------+------------------------+---------------------+------------------------+
|
||||
|
||||
Features/parameters that are not mentioned here are universally supported for all 4 tree
|
||||
Features/parameters that are not mentioned here are universally supported for all 3 tree
|
||||
methods (for instance, column sampling and constraints). The `P` in external memory means
|
||||
partially supported. Please note that both categorical data and external memory are
|
||||
special handling. Please note that both categorical data and external memory are
|
||||
experimental.
|
||||
|
||||
@@ -55,7 +55,7 @@ To ensure that CMake can locate the XGBoost library, supply ``-DCMAKE_PREFIX_PAT
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Nagivate to the build directory for your application
|
||||
# Navigate to the build directory for your application
|
||||
cd build
|
||||
# Activate the Conda environment where we previously installed XGBoost
|
||||
conda activate [env_name]
|
||||
@@ -65,7 +65,7 @@ To ensure that CMake can locate the XGBoost library, supply ``-DCMAKE_PREFIX_PAT
|
||||
make
|
||||
|
||||
************************
|
||||
Usefull Tips To Remember
|
||||
Useful Tips To Remember
|
||||
************************
|
||||
|
||||
Below are some useful tips while using C API:
|
||||
@@ -151,7 +151,7 @@ c. Assertion technique: It works both in C/ C++. If expression evaluates to 0 (f
|
||||
Example if we our training data is in ``dense matrix`` format then your prediction dataset should also be a ``dense matrix`` or if training in ``libsvm`` format then dataset for prediction should also be in ``libsvm`` format.
|
||||
|
||||
|
||||
4. Always use strings for setting values to the parameters in booster handle object. The paramter value can be of any data type (e.g. int, char, float, double, etc), but they should always be encoded as strings.
|
||||
4. Always use strings for setting values to the parameters in booster handle object. The parameter value can be of any data type (e.g. int, char, float, double, etc), but they should always be encoded as strings.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
@@ -168,7 +168,7 @@ Sample examples along with Code snippet to use C API functions
|
||||
.. code-block:: c
|
||||
|
||||
DMatrixHandle data; // handle to DMatrix
|
||||
// Load the dat from file & store it in data variable of DMatrixHandle datatype
|
||||
// Load the data from file & store it in data variable of DMatrixHandle datatype
|
||||
safe_xgboost(XGDMatrixCreateFromFile("/path/to/file/filename", silent, &data));
|
||||
|
||||
|
||||
@@ -278,7 +278,7 @@ Sample examples along with Code snippet to use C API functions
|
||||
uint64_t const* out_shape;
|
||||
/* Dimension of output prediction */
|
||||
uint64_t out_dim;
|
||||
/* Pointer to a thread local contigious array, assigned in prediction function. */
|
||||
/* Pointer to a thread local contiguous array, assigned in prediction function. */
|
||||
float const* out_result = NULL;
|
||||
safe_xgboost(
|
||||
XGBoosterPredictFromDMatrix(booster, dmatrix, config, &out_shape, &out_dim, &out_result));
|
||||
|
||||
@@ -4,16 +4,17 @@ Categorical Data
|
||||
|
||||
.. note::
|
||||
|
||||
As of XGBoost 1.6, the feature is experimental and has limited features
|
||||
As of XGBoost 1.6, the feature is experimental and has limited features. Only the
|
||||
Python package is fully supported.
|
||||
|
||||
Starting from version 1.5, XGBoost has experimental support for categorical data available
|
||||
for public testing. For numerical data, the split condition is defined as :math:`value <
|
||||
threshold`, while for categorical data the split is defined depending on whether
|
||||
partitioning or onehot encoding is used. For partition-based splits, the splits are
|
||||
specified as :math:`value \in categories`, where ``categories`` is the set of categories
|
||||
in one feature. If onehot encoding is used instead, then the split is defined as
|
||||
:math:`value == category`. More advanced categorical split strategy is planned for future
|
||||
releases and this tutorial details how to inform XGBoost about the data type.
|
||||
Starting from version 1.5, the XGBoost Python package has experimental support for
|
||||
categorical data available for public testing. For numerical data, the split condition is
|
||||
defined as :math:`value < threshold`, while for categorical data the split is defined
|
||||
depending on whether partitioning or onehot encoding is used. For partition-based splits,
|
||||
the splits are specified as :math:`value \in categories`, where ``categories`` is the set
|
||||
of categories in one feature. If onehot encoding is used instead, then the split is
|
||||
defined as :math:`value == category`. More advanced categorical split strategy is planned
|
||||
for future releases and this tutorial details how to inform XGBoost about the data type.
|
||||
|
||||
************************************
|
||||
Training with scikit-learn Interface
|
||||
@@ -35,8 +36,8 @@ parameter ``enable_categorical``:
|
||||
|
||||
.. code:: python
|
||||
|
||||
# Supported tree methods are `gpu_hist`, `approx`, and `hist`.
|
||||
clf = xgb.XGBClassifier(tree_method="gpu_hist", enable_categorical=True)
|
||||
# Supported tree methods are `approx` and `hist`.
|
||||
clf = xgb.XGBClassifier(tree_method="hist", enable_categorical=True, device="cuda")
|
||||
# X is the dataframe we created in previous snippet
|
||||
clf.fit(X, y)
|
||||
# Must use JSON/UBJSON for serialization, otherwise the information is lost.
|
||||
|
||||
@@ -38,7 +38,7 @@ Although XGBoost has native support for said functions, using it for demonstrati
|
||||
provides us the opportunity of comparing the result from our own implementation and the
|
||||
one from XGBoost internal for learning purposes. After finishing this tutorial, we should
|
||||
be able to provide our own functions for rapid experiments. And at the end, we will
|
||||
provide some notes on non-identy link function along with examples of using custom metric
|
||||
provide some notes on non-identity link function along with examples of using custom metric
|
||||
and objective with the `scikit-learn` interface.
|
||||
|
||||
If we compute the gradient of said objective function:
|
||||
@@ -165,7 +165,7 @@ Reverse Link Function
|
||||
When using builtin objective, the raw prediction is transformed according to the objective
|
||||
function. When a custom objective is provided XGBoost doesn't know its link function so the
|
||||
user is responsible for making the transformation for both objective and custom evaluation
|
||||
metric. For objective with identiy link like ``squared error`` this is trivial, but for
|
||||
metric. For objective with identity link like ``squared error`` this is trivial, but for
|
||||
other link functions like log link or inverse link the difference is significant.
|
||||
|
||||
For the Python package, the behaviour of prediction can be controlled by the
|
||||
@@ -173,7 +173,7 @@ For the Python package, the behaviour of prediction can be controlled by the
|
||||
parameter without a custom objective, the metric function will receive transformed
|
||||
prediction since the objective is defined by XGBoost. However, when the custom objective is
|
||||
also provided along with that metric, then both the objective and custom metric will
|
||||
recieve raw prediction. The following example provides a comparison between two different
|
||||
receive raw prediction. The following example provides a comparison between two different
|
||||
behavior with a multi-class classification model. Firstly we define 2 different Python
|
||||
metric functions implementing the same underlying metric for comparison,
|
||||
`merror_with_transform` is used when custom objective is also used, otherwise the simpler
|
||||
|
||||
@@ -54,6 +54,8 @@ on a dask cluster:
|
||||
y = da.random.random(size=(num_obs, 1), chunks=(1000, 1))
|
||||
|
||||
dtrain = xgb.dask.DaskDMatrix(client, X, y)
|
||||
# or
|
||||
# dtrain = xgb.dask.DaskQuantileDMatrix(client, X, y)
|
||||
|
||||
output = xgb.dask.train(
|
||||
client,
|
||||
@@ -145,8 +147,8 @@ Also for inplace prediction:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
booster.set_param({'predictor': 'gpu_predictor'})
|
||||
# where X is a dask DataFrame or dask Array containing cupy or cuDF backed data.
|
||||
# where X is a dask DataFrame or dask Array backed by cupy or cuDF.
|
||||
booster.set_param({"device": "cuda"})
|
||||
prediction = xgb.dask.inplace_predict(client, booster, X)
|
||||
|
||||
When input is ``da.Array`` object, output is always ``da.Array``. However, if the input
|
||||
@@ -222,6 +224,12 @@ collection.
|
||||
main(client)
|
||||
|
||||
|
||||
****************
|
||||
GPU acceleration
|
||||
****************
|
||||
|
||||
For most of the use cases with GPUs, the `Dask-CUDA <https://docs.rapids.ai/api/dask-cuda/stable/quickstart.html>`__ project should be used to create the cluster, which automatically configures the correct device ordinal for worker processes. As a result, users should NOT specify the ordinal (good: ``device=cuda``, bad: ``device=cuda:1``). See :ref:`sphx_glr_python_dask-examples_gpu_training.py` and :ref:`sphx_glr_python_dask-examples_sklearn_gpu_training.py` for worked examples.
|
||||
|
||||
***************************
|
||||
Working with other clusters
|
||||
***************************
|
||||
@@ -248,7 +256,7 @@ In the example below, a ``KubeCluster`` is used for `deploying Dask on Kubernete
|
||||
m = 1000
|
||||
n = 10
|
||||
kWorkers = 2 # assuming you have 2 GPU nodes on that cluster.
|
||||
# You need to work out the worker-spec youself. See document in dask_kubernetes for
|
||||
# You need to work out the worker-spec yourself. See document in dask_kubernetes for
|
||||
# its usage. Here we just want to show that XGBoost works on various clusters.
|
||||
cluster = KubeCluster.from_yaml('worker-spec.yaml', deploy_mode='remote')
|
||||
cluster.scale(kWorkers) # scale to use all GPUs
|
||||
@@ -259,7 +267,7 @@ In the example below, a ``KubeCluster`` is used for `deploying Dask on Kubernete
|
||||
|
||||
regressor = xgb.dask.DaskXGBRegressor(n_estimators=10, missing=0.0)
|
||||
regressor.client = client
|
||||
regressor.set_params(tree_method='gpu_hist')
|
||||
regressor.set_params(tree_method='hist', device="cuda")
|
||||
regressor.fit(X, y, eval_set=[(X, y)])
|
||||
|
||||
|
||||
@@ -640,7 +648,7 @@ environment than training the model using a single node due to aforementioned cr
|
||||
Memory Usage
|
||||
************
|
||||
|
||||
Here are some pratices on reducing memory usage with dask and xgboost.
|
||||
Here are some practices on reducing memory usage with dask and xgboost.
|
||||
|
||||
- In a distributed work flow, data is best loaded by dask collections directly instead of
|
||||
loaded by client process. When loading with client process is unavoidable, use
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user