[R] Improve more docstrings (#9919)

This commit is contained in:
Michael Mayer 2023-12-26 10:30:13 +01:00 committed by GitHub
parent 6a5f6ba694
commit 52620fdb34
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 931 additions and 692 deletions

View File

@ -127,22 +127,20 @@ xgb.ggplot.shap.summary <- function(data, shap_contrib = NULL, features = NULL,
p p
} }
#' Combine and melt feature values and SHAP contributions for sample #' Combine feature values and SHAP values
#' observations.
#' #'
#' Conforms to data format required for ggplot functions. #' Internal function used to combine and melt feature values and SHAP contributions
#' as required for ggplot functions related to SHAP.
#' #'
#' Internal utility function. #' @param data_list The result of `xgb.shap.data()`.
#' @param normalize Whether to standardize feature values to mean 0 and
#' standard deviation 1. This is useful for comparing multiple features on the same
#' plot. Default is \code{FALSE}.
#' #'
#' @param data_list List containing 'data' and 'shap_contrib' returned by #' @return A `data.table` containing the observation ID, the feature name, the
#' \code{xgb.shap.data()}.
#' @param normalize Whether to standardize feature values to have mean 0 and
#' standard deviation 1 (useful for comparing multiple features on the same
#' plot). Default \code{FALSE}.
#'
#' @return A data.table containing the observation ID, the feature name, the
#' feature value (normalized if specified), and the SHAP contribution value. #' feature value (normalized if specified), and the SHAP contribution value.
#' @noRd #' @noRd
#' @keywords internal
prepare.ggplot.shap.data <- function(data_list, normalize = FALSE) { prepare.ggplot.shap.data <- function(data_list, normalize = FALSE) {
data <- data_list[["data"]] data <- data_list[["data"]]
shap_contrib <- data_list[["shap_contrib"]] shap_contrib <- data_list[["shap_contrib"]]
@ -163,15 +161,16 @@ prepare.ggplot.shap.data <- function(data_list, normalize = FALSE) {
p_data p_data
} }
#' Scale feature value to have mean 0, standard deviation 1 #' Scale feature values
#' #'
#' This is used to compare multiple features on the same plot. #' Internal function that scales feature values to mean 0 and standard deviation 1.
#' Internal utility function #' Useful to compare multiple features on the same plot.
#' #'
#' @param x Numeric vector #' @param x Numeric vector.
#' #'
#' @return Numeric vector with mean 0 and sd 1. #' @return Numeric vector with mean 0 and standard deviation 1.
#' @noRd #' @noRd
#' @keywords internal
normalize <- function(x) { normalize <- function(x) {
loc <- mean(x, na.rm = TRUE) loc <- mean(x, na.rm = TRUE)
scale <- stats::sd(x, na.rm = TRUE) scale <- stats::sd(x, na.rm = TRUE)

View File

@ -1,83 +1,115 @@
#' Importance of features in a model. #' Feature importance
#' #'
#' Creates a \code{data.table} of feature importances in a model. #' Creates a `data.table` of feature importances.
#' #'
#' @param feature_names character vector of feature names. If the model already #' @param feature_names Character vector used to overwrite the feature names
#' contains feature names, those would be used when \code{feature_names=NULL} (default value). #' of the model. The default is `NULL` (use original feature names).
#' Non-null \code{feature_names} could be provided to override those in the model. #' @param model Object of class `xgb.Booster`.
#' @param model object of class \code{xgb.Booster}. #' @param trees An integer vector of tree indices that should be included
#' @param trees (only for the gbtree booster) an integer vector of tree indices that should be included #' into the importance calculation (only for the "gbtree" booster).
#' into the importance calculation. If set to \code{NULL}, all trees of the model are parsed. #' The default (`NULL`) parses all trees.
#' It could be useful, e.g., in multiclass classification to get feature importances #' It could be useful, e.g., in multiclass classification to get feature importances
#' for each class separately. IMPORTANT: the tree index in xgboost models #' for each class separately. *Important*: the tree index in XGBoost models
#' is zero-based (e.g., use \code{trees = 0:4} for first 5 trees). #' is zero-based (e.g., use `trees = 0:4` for the first five trees).
#' @param data deprecated. #' @param data Deprecated.
#' @param label deprecated. #' @param label Deprecated.
#' @param target deprecated. #' @param target Deprecated.
#' #'
#' @details #' @details
#' #'
#' This function works for both linear and tree models. #' This function works for both linear and tree models.
#' #'
#' For linear models, the importance is the absolute magnitude of linear coefficients. #' For linear models, the importance is the absolute magnitude of linear coefficients.
#' For that reason, in order to obtain a meaningful ranking by importance for a linear model, #' To obtain a meaningful ranking by importance for linear models, the features need to
#' the features need to be on the same scale (which you also would want to do when using either #' be on the same scale (which is also recommended when using L1 or L2 regularization).
#' L1 or L2 regularization).
#' #'
#' @return #' @return A `data.table` with the following columns:
#' #'
#' For a tree model, a \code{data.table} with the following columns: #' For a tree model:
#' \itemize{ #' - `Features`: Names of the features used in the model.
#' \item \code{Features} names of the features used in the model; #' - `Gain`: Fractional contribution of each feature to the model based on
#' \item \code{Gain} represents fractional contribution of each feature to the model based on #' the total gain of this feature's splits. Higher percentage means higher importance.
#' the total gain of this feature's splits. Higher percentage means a more important #' - `Cover`: Metric of the number of observation related to this feature.
#' predictive feature. #' - `Frequency`: Percentage of times a feature has been used in trees.
#' \item \code{Cover} metric of the number of observation related to this feature;
#' \item \code{Frequency} percentage representing the relative number of times
#' a feature have been used in trees.
#' }
#' #'
#' A linear model's importance \code{data.table} has the following columns: #' For a linear model:
#' \itemize{ #' - `Features`: Names of the features used in the model.
#' \item \code{Features} names of the features used in the model; #' - `Weight`: Linear coefficient of this feature.
#' \item \code{Weight} the linear coefficient of this feature; #' - `Class`: Class label (only for multiclass models).
#' \item \code{Class} (only for multiclass models) class label.
#' }
#' #'
#' If \code{feature_names} is not provided and \code{model} doesn't have \code{feature_names}, #' If `feature_names` is not provided and `model` doesn't have `feature_names`,
#' index of the features will be used instead. Because the index is extracted from the model dump #' the index of the features will be used instead. Because the index is extracted from the model dump
#' (based on C++ code), it starts at 0 (as in C/C++ or Python) instead of 1 (usual in R). #' (based on C++ code), it starts at 0 (as in C/C++ or Python) instead of 1 (usual in R).
#' #'
#' @examples #' @examples
#' #'
#' # binomial classification using gbtree: #' # binomial classification using "gbtree":
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package = "xgboost")
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, #'
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") #' bst <- xgboost(
#' data = agaricus.train$data,
#' label = agaricus.train$label,
#' max_depth = 2,
#' eta = 1,
#' nthread = 2,
#' nrounds = 2,
#' objective = "binary:logistic"
#' )
#'
#' xgb.importance(model = bst) #' xgb.importance(model = bst)
#' #'
#' # binomial classification using gblinear: #' # binomial classification using "gblinear":
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, booster = "gblinear", #' bst <- xgboost(
#' eta = 0.3, nthread = 1, nrounds = 20, objective = "binary:logistic") #' data = agaricus.train$data,
#' label = agaricus.train$label,
#' booster = "gblinear",
#' eta = 0.3,
#' nthread = 1,
#' nrounds = 20,objective = "binary:logistic"
#' )
#'
#' xgb.importance(model = bst) #' xgb.importance(model = bst)
#' #'
#' # multiclass classification using gbtree: #' # multiclass classification using "gbtree":
#' nclass <- 3 #' nclass <- 3
#' nrounds <- 10 #' nrounds <- 10
#' mbst <- xgboost(data = as.matrix(iris[, -5]), label = as.numeric(iris$Species) - 1, #' mbst <- xgboost(
#' max_depth = 3, eta = 0.2, nthread = 2, nrounds = nrounds, #' data = as.matrix(iris[, -5]),
#' objective = "multi:softprob", num_class = nclass) #' label = as.numeric(iris$Species) - 1,
#' max_depth = 3,
#' eta = 0.2,
#' nthread = 2,
#' nrounds = nrounds,
#' objective = "multi:softprob",
#' num_class = nclass
#' )
#'
#' # all classes clumped together: #' # all classes clumped together:
#' xgb.importance(model = mbst) #' xgb.importance(model = mbst)
#' # inspect importances separately for each class:
#' xgb.importance(model = mbst, trees = seq(from=0, by=nclass, length.out=nrounds))
#' xgb.importance(model = mbst, trees = seq(from=1, by=nclass, length.out=nrounds))
#' xgb.importance(model = mbst, trees = seq(from=2, by=nclass, length.out=nrounds))
#' #'
#' # multiclass classification using gblinear: #' # inspect importances separately for each class:
#' mbst <- xgboost(data = scale(as.matrix(iris[, -5])), label = as.numeric(iris$Species) - 1, #' xgb.importance(
#' booster = "gblinear", eta = 0.2, nthread = 1, nrounds = 15, #' model = mbst, trees = seq(from = 0, by = nclass, length.out = nrounds)
#' objective = "multi:softprob", num_class = nclass) #' )
#' xgb.importance(
#' model = mbst, trees = seq(from = 1, by = nclass, length.out = nrounds)
#' )
#' xgb.importance(
#' model = mbst, trees = seq(from = 2, by = nclass, length.out = nrounds)
#' )
#'
#' # multiclass classification using "gblinear":
#' mbst <- xgboost(
#' data = scale(as.matrix(iris[, -5])),
#' label = as.numeric(iris$Species) - 1,
#' booster = "gblinear",
#' eta = 0.2,
#' nthread = 1,
#' nrounds = 15,
#' objective = "multi:softprob",
#' num_class = nclass
#' )
#'
#' xgb.importance(model = mbst) #' xgb.importance(model = mbst)
#' #'
#' @export #' @export

View File

@ -1,57 +1,58 @@
#' Parse a boosted tree model text dump #' Parse model text dump
#' #'
#' Parse a boosted tree model text dump into a \code{data.table} structure. #' Parse a boosted tree model text dump into a `data.table` structure.
#' #'
#' @param feature_names character vector of feature names. If the model already #' @param feature_names Character vector used to overwrite the feature names
#' contains feature names, those would be used when \code{feature_names=NULL} (default value). #' of the model. The default (`NULL`) uses the original feature names.
#' Non-null \code{feature_names} could be provided to override those in the model. #' @param model Object of class `xgb.Booster`.
#' @param model object of class \code{xgb.Booster} #' @param text Character vector previously generated by the function [xgb.dump()]
#' @param text \code{character} vector previously generated by the \code{xgb.dump} #' (called with parameter `with_stats = TRUE`). `text` takes precedence over `model`.
#' function (where parameter \code{with_stats = TRUE} should have been set). #' @param trees An integer vector of tree indices that should be used.
#' \code{text} takes precedence over \code{model}. #' The default (`NULL`) uses all trees.
#' @param trees an integer vector of tree indices that should be parsed. #' Useful, e.g., in multiclass classification to get only
#' If set to \code{NULL}, all trees of the model are parsed. #' the trees of one class. *Important*: the tree index in XGBoost models
#' It could be useful, e.g., in multiclass classification to get only #' is zero-based (e.g., use `trees = 0:4` for the first five trees).
#' the trees of one certain class. IMPORTANT: the tree index in xgboost models #' @param use_int_id A logical flag indicating whether nodes in columns "Yes", "No", and
#' is zero-based (e.g., use \code{trees = 0:4} for first 5 trees). #' "Missing" should be represented as integers (when `TRUE`) or as "Tree-Node"
#' @param use_int_id a logical flag indicating whether nodes in columns "Yes", "No", "Missing" should be #' character strings (when `FALSE`, default).
#' represented as integers (when FALSE) or as "Tree-Node" character strings (when FALSE). #' @param ... Currently not used.
#' @param ... currently not used.
#' #'
#' @return #' @return
#' A \code{data.table} with detailed information about model trees' nodes. #' A `data.table` with detailed information about tree nodes. It has the following columns:
#' #' - `Tree`: integer ID of a tree in a model (zero-based index).
#' The columns of the \code{data.table} are: #' - `Node`: integer ID of a node in a tree (zero-based index).
#' #' - `ID`: character identifier of a node in a model (only when `use_int_id = FALSE`).
#' \itemize{ #' - `Feature`: for a branch node, a feature ID or name (when available);
#' \item \code{Tree}: integer ID of a tree in a model (zero-based index) #' for a leaf node, it simply labels it as `"Leaf"`.
#' \item \code{Node}: integer ID of a node in a tree (zero-based index) #' - `Split`: location of the split for a branch node (split condition is always "less than").
#' \item \code{ID}: character identifier of a node in a model (only when \code{use_int_id=FALSE}) #' - `Yes`: ID of the next node when the split condition is met.
#' \item \code{Feature}: for a branch node, it's a feature id or name (when available); #' - `No`: ID of the next node when the split condition is not met.
#' for a leaf note, it simply labels it as \code{'Leaf'} #' - `Missing`: ID of the next node when the branch value is missing.
#' \item \code{Split}: location of the split for a branch node (split condition is always "less than") #' - `Quality`: either the split gain (change in loss) or the leaf value.
#' \item \code{Yes}: ID of the next node when the split condition is met #' - `Cover`: metric related to the number of observations either seen by a split
#' \item \code{No}: ID of the next node when the split condition is not met
#' \item \code{Missing}: ID of the next node when branch value is missing
#' \item \code{Quality}: either the split gain (change in loss) or the leaf value
#' \item \code{Cover}: metric related to the number of observation either seen by a split
#' or collected by a leaf during training. #' or collected by a leaf during training.
#' }
#' #'
#' When \code{use_int_id=FALSE}, columns "Yes", "No", and "Missing" point to model-wide node identifiers #' When `use_int_id = FALSE`, columns "Yes", "No", and "Missing" point to model-wide node identifiers
#' in the "ID" column. When \code{use_int_id=TRUE}, those columns point to node identifiers from #' in the "ID" column. When `use_int_id = TRUE`, those columns point to node identifiers from
#' the corresponding trees in the "Node" column. #' the corresponding trees in the "Node" column.
#' #'
#' @examples #' @examples
#' # Basic use: #' # Basic use:
#' #'
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package = "xgboost")
#' ## Keep the number of threads to 1 for examples #' ## Keep the number of threads to 1 for examples
#' nthread <- 1 #' nthread <- 1
#' data.table::setDTthreads(nthread) #' data.table::setDTthreads(nthread)
#' #'
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, #' bst <- xgboost(
#' eta = 1, nthread = nthread, nrounds = 2,objective = "binary:logistic") #' data = agaricus.train$data,
#' label = agaricus.train$label,
#' max_depth = 2,
#' eta = 1,
#' nthread = nthread,
#' nrounds = 2,
#' objective = "binary:logistic"
#' )
#' #'
#' (dt <- xgb.model.dt.tree(colnames(agaricus.train$data), bst)) #' (dt <- xgb.model.dt.tree(colnames(agaricus.train$data), bst))
#' #'
@ -60,8 +61,12 @@
#' (dt <- xgb.model.dt.tree(model = bst)) #' (dt <- xgb.model.dt.tree(model = bst))
#' #'
#' # How to match feature names of splits that are following a current 'Yes' branch: #' # How to match feature names of splits that are following a current 'Yes' branch:
#' #' merge(
#' merge(dt, dt[, .(ID, Y.Feature=Feature)], by.x='Yes', by.y='ID', all.x=TRUE)[order(Tree,Node)] #' dt,
#' dt[, .(ID, Y.Feature = Feature)], by.x = "Yes", by.y = "ID", all.x = TRUE
#' )[
#' order(Tree, Node)
#' ]
#' #'
#' @export #' @export
xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL, xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,

View File

@ -1,65 +1,74 @@
#' Plot model trees deepness #' Plot model tree depth
#' #'
#' Visualizes distributions related to depth of tree leafs. #' Visualizes distributions related to the depth of tree leaves.
#' \code{xgb.plot.deepness} uses base R graphics, while \code{xgb.ggplot.deepness} uses the ggplot backend. #' - `xgb.plot.deepness()` uses base R graphics, while
#' - `xgb.ggplot.deepness()` uses "ggplot2".
#' #'
#' @param model either an \code{xgb.Booster} model generated by the \code{xgb.train} function #' @param model Either an `xgb.Booster` model, or the "data.table" returned by [xgb.model.dt.tree()].
#' or a data.table result of the \code{xgb.model.dt.tree} function. #' @param which Which distribution to plot (see details).
#' @param plot (base R barplot) whether a barplot should be produced. #' @param plot Should the plot be shown? Default is `TRUE`.
#' If FALSE, only a data.table is returned. #' @param ... Other parameters passed to [graphics::barplot()] or [graphics::plot()].
#' @param which which distribution to plot (see details).
#' @param ... other parameters passed to \code{barplot} or \code{plot}.
#' #'
#' @details #' @details
#' #'
#' When \code{which="2x1"}, two distributions with respect to the leaf depth #' When `which = "2x1"`, two distributions with respect to the leaf depth
#' are plotted on top of each other: #' are plotted on top of each other:
#' \itemize{ #' 1. The distribution of the number of leaves in a tree model at a certain depth.
#' \item the distribution of the number of leafs in a tree model at a certain depth; #' 2. The distribution of the average weighted number of observations ("cover")
#' \item the distribution of average weighted number of observations ("cover") #' ending up in leaves at a certain depth.
#' ending up in leafs at certain depth.
#' }
#' Those could be helpful in determining sensible ranges of the \code{max_depth}
#' and \code{min_child_weight} parameters.
#' #'
#' When \code{which="max.depth"} or \code{which="med.depth"}, plots of either maximum or median depth #' Those could be helpful in determining sensible ranges of the `max_depth`
#' per tree with respect to tree number are created. And \code{which="med.weight"} allows to see how #' and `min_child_weight` parameters.
#'
#' When `which = "max.depth"` or `which = "med.depth"`, plots of either maximum or
#' median depth per tree with respect to the tree number are created.
#'
#' Finally, `which = "med.weight"` allows to see how
#' a tree's median absolute leaf weight changes through the iterations. #' a tree's median absolute leaf weight changes through the iterations.
#' #'
#' This function was inspired by the blog post #' These functions have been inspired by the blog post
#' \url{https://github.com/aysent/random-forest-leaf-visualization}. #' <https://github.com/aysent/random-forest-leaf-visualization>.
#' #'
#' @return #' @return
#' The return value of the two functions is as follows:
#' - `xgb.plot.deepness()`: A "data.table" (invisibly).
#' Each row corresponds to a terminal leaf in the model. It contains its information
#' about depth, cover, and weight (used in calculating predictions).
#' If `plot = TRUE`, also a plot is shown.
#' - `xgb.ggplot.deepness()`: When `which = "2x1"`, a list of two "ggplot" objects,
#' and a single "ggplot" object otherwise.
#' #'
#' Other than producing plots (when \code{plot=TRUE}), the \code{xgb.plot.deepness} function #' @seealso [xgb.train()] and [xgb.model.dt.tree()].
#' silently returns a processed data.table where each row corresponds to a terminal leaf in a tree model,
#' and contains information about leaf's depth, cover, and weight (which is used in calculating predictions).
#'
#' The \code{xgb.ggplot.deepness} silently returns either a list of two ggplot graphs when \code{which="2x1"}
#' or a single ggplot graph for the other \code{which} options.
#'
#' @seealso
#'
#' \code{\link{xgb.train}}, \code{\link{xgb.model.dt.tree}}.
#' #'
#' @examples #' @examples
#' #'
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package = "xgboost")
#' ## Keep the number of threads to 2 for examples #' ## Keep the number of threads to 2 for examples
#' nthread <- 2 #' nthread <- 2
#' data.table::setDTthreads(nthread) #' data.table::setDTthreads(nthread)
#' #'
#' ## Change max_depth to a higher number to get a more significant result #' ## Change max_depth to a higher number to get a more significant result
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 6, #' bst <- xgboost(
#' eta = 0.1, nthread = nthread, nrounds = 50, objective = "binary:logistic", #' data = agaricus.train$data,
#' subsample = 0.5, min_child_weight = 2) #' label = agaricus.train$label,
#' max_depth = 6,
#' nthread = nthread,
#' nrounds = 50,
#' objective = "binary:logistic",
#' subsample = 0.5,
#' min_child_weight = 2
#' )
#' #'
#' xgb.plot.deepness(bst) #' xgb.plot.deepness(bst)
#' xgb.ggplot.deepness(bst) #' xgb.ggplot.deepness(bst)
#' #'
#' xgb.plot.deepness(bst, which='max.depth', pch=16, col=rgb(0,0,1,0.3), cex=2) #' xgb.plot.deepness(
#' bst, which = "max.depth", pch = 16, col = rgb(0, 0, 1, 0.3), cex = 2
#' )
#' #'
#' xgb.plot.deepness(bst, which='med.weight', pch=16, col=rgb(0,0,1,0.3), cex=2) #' xgb.plot.deepness(
#' bst, which = "med.weight", pch = 16, col = rgb(0, 0, 1, 0.3), cex = 2
#' )
#' #'
#' @rdname xgb.plot.deepness #' @rdname xgb.plot.deepness
#' @export #' @export

View File

@ -1,64 +1,75 @@
#' Plot feature importance as a bar graph #' Plot feature importance
#' #'
#' Represents previously calculated feature importance as a bar graph. #' Represents previously calculated feature importance as a bar graph.
#' \code{xgb.plot.importance} uses base R graphics, while \code{xgb.ggplot.importance} uses the ggplot backend. #' - `xgb.plot.importance()` uses base R graphics, while
#' - `xgb.ggplot.importance()` uses "ggplot".
#' #'
#' @param importance_matrix a \code{data.table} returned by \code{\link{xgb.importance}}. #' @param importance_matrix A `data.table` as returned by [xgb.importance()].
#' @param top_n maximal number of top features to include into the plot. #' @param top_n Maximal number of top features to include into the plot.
#' @param measure the name of importance measure to plot. #' @param measure The name of importance measure to plot.
#' When \code{NULL}, 'Gain' would be used for trees and 'Weight' would be used for gblinear. #' When `NULL`, 'Gain' would be used for trees and 'Weight' would be used for gblinear.
#' @param rel_to_first whether importance values should be represented as relative to the highest ranked feature. #' @param rel_to_first Whether importance values should be represented as relative to
#' See Details. #' the highest ranked feature, see Details.
#' @param left_margin (base R barplot) allows to adjust the left margin size to fit feature names. #' @param left_margin Adjust the left margin size to fit feature names.
#' When it is NULL, the existing \code{par('mar')} is used. #' When `NULL`, the existing `par("mar")` is used.
#' @param cex (base R barplot) passed as \code{cex.names} parameter to \code{barplot}. #' @param cex Passed as `cex.names` parameter to [graphics::barplot()].
#' @param plot (base R barplot) whether a barplot should be produced. #' @param plot Should the barplot be shown? Default is `TRUE`.
#' If FALSE, only a data.table is returned. #' @param n_clusters A numeric vector containing the min and the max range
#' @param n_clusters (ggplot only) a \code{numeric} vector containing the min and the max range
#' of the possible number of clusters of bars. #' of the possible number of clusters of bars.
#' @param ... other parameters passed to \code{barplot} (except horiz, border, cex.names, names.arg, and las). #' @param ... Other parameters passed to [graphics::barplot()]
#' (except `horiz`, `border`, `cex.names`, `names.arg`, and `las`).
#' Only used in `xgb.plot.importance()`.
#' #'
#' @details #' @details
#' The graph represents each feature as a horizontal bar of length proportional to the importance of a feature. #' The graph represents each feature as a horizontal bar of length proportional to the importance of a feature.
#' Features are shown ranked in a decreasing importance order. #' Features are sorted by decreasing importance.
#' It works for importances from both \code{gblinear} and \code{gbtree} models. #' It works for both "gblinear" and "gbtree" models.
#' #'
#' When \code{rel_to_first = FALSE}, the values would be plotted as they were in \code{importance_matrix}. #' When `rel_to_first = FALSE`, the values would be plotted as in `importance_matrix`.
#' For gbtree model, that would mean being normalized to the total of 1 #' For a "gbtree" model, that would mean being normalized to the total of 1
#' ("what is feature's importance contribution relative to the whole model?"). #' ("what is feature's importance contribution relative to the whole model?").
#' For linear models, \code{rel_to_first = FALSE} would show actual values of the coefficients. #' For linear models, `rel_to_first = FALSE` would show actual values of the coefficients.
#' Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of #' Setting `rel_to_first = TRUE` allows to see the picture from the perspective of
#' "what is feature's importance contribution relative to the most important feature?" #' "what is feature's importance contribution relative to the most important feature?"
#' #'
#' The ggplot-backend method also performs 1-D clustering of the importance values, #' The "ggplot" backend performs 1-D clustering of the importance values,
#' with bar colors corresponding to different clusters that have somewhat similar importance values. #' with bar colors corresponding to different clusters having similar importance values.
#' #'
#' @return #' @return
#' The \code{xgb.plot.importance} function creates a \code{barplot} (when \code{plot=TRUE}) #' The return value depends on the function:
#' and silently returns a processed data.table with \code{n_top} features sorted by importance. #' - `xgb.plot.importance()`: Invisibly, a "data.table" with `n_top` features sorted
#' by importance. If `plot = TRUE`, the values are also plotted as barplot.
#' - `xgb.ggplot.importance()`: A customizable "ggplot" object.
#' E.g., to change the title, set `+ ggtitle("A GRAPH NAME")`.
#' #'
#' The \code{xgb.ggplot.importance} function returns a ggplot graph which could be customized afterwards. #' @seealso [graphics::barplot()]
#' E.g., to change the title of the graph, add \code{+ ggtitle("A GRAPH NAME")} to the result.
#'
#' @seealso
#' \code{\link[graphics]{barplot}}.
#' #'
#' @examples #' @examples
#' data(agaricus.train) #' data(agaricus.train)
#'
#' ## Keep the number of threads to 2 for examples #' ## Keep the number of threads to 2 for examples
#' nthread <- 2 #' nthread <- 2
#' data.table::setDTthreads(nthread) #' data.table::setDTthreads(nthread)
#' #'
#' bst <- xgboost( #' bst <- xgboost(
#' data = agaricus.train$data, label = agaricus.train$label, max_depth = 3, #' data = agaricus.train$data,
#' eta = 1, nthread = nthread, nrounds = 2, objective = "binary:logistic" #' label = agaricus.train$label,
#' max_depth = 3,
#' eta = 1,
#' nthread = nthread,
#' nrounds = 2,
#' objective = "binary:logistic"
#' ) #' )
#' #'
#' importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst) #' importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst)
#' xgb.plot.importance(
#' importance_matrix, rel_to_first = TRUE, xlab = "Relative importance"
#' )
#' #'
#' xgb.plot.importance(importance_matrix, rel_to_first = TRUE, xlab = "Relative importance") #' gg <- xgb.ggplot.importance(
#' #' importance_matrix, measure = "Frequency", rel_to_first = TRUE
#' (gg <- xgb.ggplot.importance(importance_matrix, measure = "Frequency", rel_to_first = TRUE)) #' )
#' gg
#' gg + ggplot2::ylab("Frequency") #' gg + ggplot2::ylab("Frequency")
#' #'
#' @rdname xgb.plot.importance #' @rdname xgb.plot.importance

View File

@ -1,14 +1,10 @@
#' Project all trees on one tree and plot it #' Project all trees on one tree
#' #'
#' Visualization of the ensemble of trees as a single collective unit. #' Visualization of the ensemble of trees as a single collective unit.
#' #'
#' @param model produced by the \code{xgb.train} function. #' @inheritParams xgb.plot.tree
#' @param feature_names names of each feature as a \code{character} vector. #' @param features_keep Number of features to keep in each position of the multi trees,
#' @param features_keep number of features to keep in each position of the multi trees. #' by default 5.
#' @param plot_width width in pixels of the graph to produce
#' @param plot_height height in pixels of the graph to produce
#' @param render a logical flag for whether the graph should be rendered (see Value).
#' @param ... currently not used
#' #'
#' @details #' @details
#' #'
@ -24,33 +20,31 @@
#' Moreover, the trees tend to reuse the same features. #' Moreover, the trees tend to reuse the same features.
#' #'
#' The function projects each tree onto one, and keeps for each position the #' The function projects each tree onto one, and keeps for each position the
#' \code{features_keep} first features (based on the Gain per feature measure). #' `features_keep` first features (based on the Gain per feature measure).
#' #'
#' This function is inspired by this blog post: #' This function is inspired by this blog post:
#' \url{https://wellecks.wordpress.com/2015/02/21/peering-into-the-black-box-visualizing-lambdamart/} #' <https://wellecks.wordpress.com/2015/02/21/peering-into-the-black-box-visualizing-lambdamart/>
#' #'
#' @return #' @inherit xgb.plot.tree return
#'
#' When \code{render = TRUE}:
#' returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}.
#' Similar to ggplot objects, it needs to be printed to see it when not running from command line.
#'
#' When \code{render = FALSE}:
#' silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}.
#' This could be useful if one wants to modify some of the graph attributes
#' before rendering the graph with \code{\link[DiagrammeR]{render_graph}}.
#' #'
#' @examples #' @examples
#' #'
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package = "xgboost")
#'
#' ## Keep the number of threads to 2 for examples #' ## Keep the number of threads to 2 for examples
#' nthread <- 2 #' nthread <- 2
#' data.table::setDTthreads(nthread) #' data.table::setDTthreads(nthread)
#' #'
#' bst <- xgboost( #' bst <- xgboost(
#' data = agaricus.train$data, label = agaricus.train$label, max_depth = 15, #' data = agaricus.train$data,
#' eta = 1, nthread = nthread, nrounds = 30, objective = "binary:logistic", #' label = agaricus.train$label,
#' min_child_weight = 50, verbose = 0 #' max_depth = 15,
#' eta = 1,
#' nthread = nthread,
#' nrounds = 30,
#' objective = "binary:logistic",
#' min_child_weight = 50,
#' verbose = 0
#' ) #' )
#' #'
#' p <- xgb.plot.multi.trees(model = bst, features_keep = 3) #' p <- xgb.plot.multi.trees(model = bst, features_keep = 3)
@ -58,10 +52,13 @@
#' #'
#' \dontrun{ #' \dontrun{
#' # Below is an example of how to save this plot to a file. #' # Below is an example of how to save this plot to a file.
#' # Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed. #' # Note that for export_graph() to work, the {DiagrammeRsvg} and {rsvg} packages
#' # must also be installed.
#'
#' library(DiagrammeR) #' library(DiagrammeR)
#'
#' gr <- xgb.plot.multi.trees(model = bst, features_keep = 3, render = FALSE) #' gr <- xgb.plot.multi.trees(model = bst, features_keep = 3, render = FALSE)
#' export_graph(gr, 'tree.pdf', width=1500, height=600) #' export_graph(gr, "tree.pdf", width = 1500, height = 600)
#' } #' }
#' #'
#' @export #' @export

View File

@ -1,110 +1,165 @@
#' SHAP contribution dependency plots #' SHAP dependence plots
#' #'
#' Visualizing the SHAP feature contribution to prediction dependencies on feature value. #' Visualizes SHAP values against feature values to gain an impression of feature effects.
#' #'
#' @param data data as a \code{matrix} or \code{dgCMatrix}. #' @param data The data to explain as a `matrix` or `dgCMatrix`.
#' @param shap_contrib a matrix of SHAP contributions that was computed earlier for the above #' @param shap_contrib Matrix of SHAP contributions of `data`.
#' \code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}. #' The default (`NULL`) computes it from `model` and `data`.
#' @param features a vector of either column indices or of feature names to plot. When it is NULL, #' @param features Vector of column indices or feature names to plot.
#' feature importance is calculated, and \code{top_n} high ranked features are taken. #' When `NULL` (default), the `top_n` most important features are selected
#' @param top_n when \code{features} is NULL, top_n `[1, 100]` most important features in a model are taken. #' by [xgb.importance()].
#' @param model an \code{xgb.Booster} model. It has to be provided when either \code{shap_contrib} #' @param top_n How many of the most important features (<= 100) should be selected?
#' or \code{features} is missing. #' By default 1 for SHAP dependence and 10 for SHAP summary).
#' @param trees passed to \code{\link{xgb.importance}} when \code{features = NULL}. #' Only used when `features = NULL`.
#' @param target_class is only relevant for multiclass models. When it is set to a 0-based class index, #' @param model An `xgb.Booster` model. Only required when `shap_contrib = NULL` or
#' only SHAP contributions for that specific class are used. #' `features = NULL`.
#' If it is not set, SHAP importances are averaged over all classes. #' @param trees Passed to [xgb.importance()] when `features = NULL`.
#' @param approxcontrib passed to \code{\link{predict.xgb.Booster}} when \code{shap_contrib = NULL}. #' @param target_class Only relevant for multiclass models. The default (`NULL`)
#' @param subsample a random fraction of data points to use for plotting. When it is NULL, #' averages the SHAP values over all classes. Pass a (0-based) class index
#' it is set so that up to 100K data points are used. #' to show only SHAP values of that class.
#' @param n_col a number of columns in a grid of plots. #' @param approxcontrib Passed to `predict()` when `shap_contrib = NULL`.
#' @param col color of the scatterplot markers. #' @param subsample Fraction of data points randomly picked for plotting.
#' @param pch scatterplot marker. #' The default (`NULL`) will use up to 100k data points.
#' @param discrete_n_uniq a maximal number of unique values in a feature to consider it as discrete. #' @param n_col Number of columns in a grid of plots.
#' @param discrete_jitter an \code{amount} parameter of jitter added to discrete features' positions. #' @param col Color of the scatterplot markers.
#' @param ylab a y-axis label in 1D plots. #' @param pch Scatterplot marker.
#' @param plot_NA whether the contributions of cases with missing values should also be plotted. #' @param discrete_n_uniq Maximal number of unique feature values to consider the
#' @param col_NA a color of marker for missing value contributions. #' feature as discrete.
#' @param pch_NA a marker type for NA values. #' @param discrete_jitter Jitter amount added to the values of discrete features.
#' @param pos_NA a relative position of the x-location where NA values are shown: #' @param ylab The y-axis label in 1D plots.
#' \code{min(x) + (max(x) - min(x)) * pos_NA}. #' @param plot_NA Should contributions of cases with missing values be plotted?
#' @param plot_loess whether to plot loess-smoothed curves. The smoothing is only done for features with #' Default is `TRUE`.
#' more than 5 distinct values. #' @param col_NA Color of marker for missing value contributions.
#' @param col_loess a color to use for the loess curves. #' @param pch_NA Marker type for `NA` values.
#' @param span_loess the \code{span} parameter in \code{\link[stats]{loess}}'s call. #' @param pos_NA Relative position of the x-location where `NA` values are shown:
#' @param which whether to do univariate or bivariate plotting. NOTE: only 1D is implemented so far. #' `min(x) + (max(x) - min(x)) * pos_NA`.
#' @param plot whether a plot should be drawn. If FALSE, only a list of matrices is returned. #' @param plot_loess Should loess-smoothed curves be plotted? (Default is `TRUE`).
#' @param ... other parameters passed to \code{plot}. #' The smoothing is only done for features with more than 5 distinct values.
#' @param col_loess Color of loess curves.
#' @param span_loess The `span` parameter of [stats::loess()].
#' @param which Whether to do univariate or bivariate plotting. Currently, only "1d" is implemented.
#' @param plot Should the plot be drawn? (Default is `TRUE`).
#' If `FALSE`, only a list of matrices is returned.
#' @param ... Other parameters passed to [graphics::plot()].
#' #'
#' @details #' @details
#' #'
#' These scatterplots represent how SHAP feature contributions depend of feature values. #' These scatterplots represent how SHAP feature contributions depend of feature values.
#' The similarity to partial dependency plots is that they also give an idea for how feature values #' The similarity to partial dependence plots is that they also give an idea for how feature values
#' affect predictions. However, in partial dependency plots, we usually see marginal dependencies #' affect predictions. However, in partial dependence plots, we see marginal dependencies
#' of model prediction on feature value, while SHAP contribution dependency plots display the estimated #' of model prediction on feature value, while SHAP dependence plots display the estimated
#' contributions of a feature to model prediction for each individual case. #' contributions of a feature to the prediction for each individual case.
#' #'
#' When \code{plot_loess = TRUE} is set, feature values are rounded to 3 significant digits and #' When `plot_loess = TRUE`, feature values are rounded to three significant digits and
#' weighted LOESS is computed and plotted, where weights are the numbers of data points #' weighted LOESS is computed and plotted, where the weights are the numbers of data points
#' at each rounded value. #' at each rounded value.
#' #'
#' Note: SHAP contributions are shown on the scale of model margin. E.g., for a logistic binomial objective, #' Note: SHAP contributions are on the scale of the model margin.
#' the margin is prediction before a sigmoidal transform into probability-like values. #' E.g., for a logistic binomial objective, the margin is on log-odds scale.
#' Also, since SHAP stands for "SHapley Additive exPlanation" (model prediction = sum of SHAP #' Also, since SHAP stands for "SHapley Additive exPlanation" (model prediction = sum of SHAP
#' contributions for all features + bias), depending on the objective used, transforming SHAP #' contributions for all features + bias), depending on the objective used, transforming SHAP
#' contributions for a feature from the marginal to the prediction space is not necessarily #' contributions for a feature from the marginal to the prediction space is not necessarily
#' a meaningful thing to do. #' a meaningful thing to do.
#' #'
#' @return #' @return
#' #' In addition to producing plots (when `plot = TRUE`), it silently returns a list of two matrices:
#' In addition to producing plots (when \code{plot=TRUE}), it silently returns a list of two matrices: #' - `data`: Feature value matrix.
#' \itemize{ #' - `shap_contrib`: Corresponding SHAP value matrix.
#' \item \code{data} the values of selected features;
#' \item \code{shap_contrib} the contributions of selected features.
#' }
#' #'
#' @references #' @references
#' #' 1. Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions",
#' Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions", NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874} #' NIPS Proceedings 2017, <https://arxiv.org/abs/1705.07874>
#' #' 2. Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles",
#' Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles", \url{https://arxiv.org/abs/1706.06060} #' <https://arxiv.org/abs/1706.06060>
#' #'
#' @examples #' @examples
#' #'
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package = "xgboost")
#' data(agaricus.test, package='xgboost') #' data(agaricus.test, package = "xgboost")
#' #'
#' ## Keep the number of threads to 1 for examples #' ## Keep the number of threads to 1 for examples
#' nthread <- 1 #' nthread <- 1
#' data.table::setDTthreads(nthread) #' data.table::setDTthreads(nthread)
#' nrounds <- 20 #' nrounds <- 20
#' #'
#' bst <- xgboost(agaricus.train$data, agaricus.train$label, nrounds = nrounds, #' bst <- xgboost(
#' eta = 0.1, max_depth = 3, subsample = .5, #' agaricus.train$data,
#' method = "hist", objective = "binary:logistic", nthread = nthread, verbose = 0) #' agaricus.train$label,
#' nrounds = nrounds,
#' eta = 0.1,
#' max_depth = 3,
#' subsample = 0.5,
#' objective = "binary:logistic",
#' nthread = nthread,
#' verbose = 0
#' )
#' #'
#' xgb.plot.shap(agaricus.test$data, model = bst, features = "odor=none") #' xgb.plot.shap(agaricus.test$data, model = bst, features = "odor=none")
#'
#' contr <- predict(bst, agaricus.test$data, predcontrib = TRUE) #' contr <- predict(bst, agaricus.test$data, predcontrib = TRUE)
#' xgb.plot.shap(agaricus.test$data, contr, model = bst, top_n = 12, n_col = 3) #' xgb.plot.shap(agaricus.test$data, contr, model = bst, top_n = 12, n_col = 3)
#' xgb.ggplot.shap.summary(agaricus.test$data, contr, model = bst, top_n = 12) # Summary plot
#' #'
#' # multiclass example - plots for each class separately: #' # Summary plot
#' xgb.ggplot.shap.summary(agaricus.test$data, contr, model = bst, top_n = 12)
#'
#' # Multiclass example - plots for each class separately:
#' nclass <- 3 #' nclass <- 3
#' x <- as.matrix(iris[, -5]) #' x <- as.matrix(iris[, -5])
#' set.seed(123) #' set.seed(123)
#' is.na(x[sample(nrow(x) * 4, 30)]) <- TRUE # introduce some missing values #' is.na(x[sample(nrow(x) * 4, 30)]) <- TRUE # introduce some missing values
#' mbst <- xgboost(data = x, label = as.numeric(iris$Species) - 1, nrounds = nrounds, #'
#' max_depth = 2, eta = 0.3, subsample = .5, nthread = nthread, #' mbst <- xgboost(
#' objective = "multi:softprob", num_class = nclass, verbose = 0) #' data = x,
#' label = as.numeric(iris$Species) - 1,
#' nrounds = nrounds,
#' max_depth = 2,
#' eta = 0.3,
#' subsample = 0.5,
#' nthread = nthread,
#' objective = "multi:softprob",
#' num_class = nclass,
#' verbose = 0
#' )
#' trees0 <- seq(from = 0, by = nclass, length.out = nrounds) #' trees0 <- seq(from = 0, by = nclass, length.out = nrounds)
#' col <- rgb(0, 0, 1, 0.5) #' col <- rgb(0, 0, 1, 0.5)
#' xgb.plot.shap(x, model = mbst, trees = trees0, target_class = 0, top_n = 4, #' xgb.plot.shap(
#' n_col = 2, col = col, pch = 16, pch_NA = 17) #' x,
#' xgb.plot.shap(x, model = mbst, trees = trees0 + 1, target_class = 1, top_n = 4, #' model = mbst,
#' n_col = 2, col = col, pch = 16, pch_NA = 17) #' trees = trees0,
#' xgb.plot.shap(x, model = mbst, trees = trees0 + 2, target_class = 2, top_n = 4, #' target_class = 0,
#' n_col = 2, col = col, pch = 16, pch_NA = 17) #' top_n = 4,
#' xgb.ggplot.shap.summary(x, model = mbst, target_class = 0, top_n = 4) # Summary plot #' n_col = 2,
#' col = col,
#' pch = 16,
#' pch_NA = 17
#' )
#'
#' xgb.plot.shap(
#' x,
#' model = mbst,
#' trees = trees0 + 1,
#' target_class = 1,
#' top_n = 4,
#' n_col = 2,
#' col = col,
#' pch = 16,
#' pch_NA = 17
#' )
#'
#' xgb.plot.shap(
#' x,
#' model = mbst,
#' trees = trees0 + 2,
#' target_class = 2,
#' top_n = 4,
#' n_col = 2,
#' col = col,
#' pch = 16,
#' pch_NA = 17
#' )
#'
#' # Summary plot
#' xgb.ggplot.shap.summary(x, model = mbst, target_class = 0, top_n = 4)
#' #'
#' @rdname xgb.plot.shap #' @rdname xgb.plot.shap
#' @export #' @export
@ -187,41 +242,48 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
invisible(list(data = data, shap_contrib = shap_contrib)) invisible(list(data = data, shap_contrib = shap_contrib))
} }
#' SHAP contribution dependency summary plot #' SHAP summary plot
#' #'
#' Compare SHAP contributions of different features. #' Visualizes SHAP contributions of different features.
#' #'
#' A point plot (each point representing one sample from \code{data}) is #' A point plot (each point representing one observation from `data`) is
#' produced for each feature, with the points plotted on the SHAP value axis. #' produced for each feature, with the points plotted on the SHAP value axis.
#' Each point (observation) is coloured based on its feature value. The plot #' Each point (observation) is coloured based on its feature value.
#' hence allows us to see which features have a negative / positive contribution #'
#' The plot allows to see which features have a negative / positive contribution
#' on the model prediction, and whether the contribution is different for larger #' on the model prediction, and whether the contribution is different for larger
#' or smaller values of the feature. We effectively try to replicate the #' or smaller values of the feature. Inspired by the summary plot of
#' \code{summary_plot} function from <https://github.com/shap/shap>. #' <https://github.com/shap/shap>.
#' #'
#' @inheritParams xgb.plot.shap #' @inheritParams xgb.plot.shap
#' #'
#' @return A \code{ggplot2} object. #' @return A `ggplot2` object.
#' @export #' @export
#' #'
#' @examples # See \code{\link{xgb.plot.shap}}. #' @examples
#' @seealso \code{\link{xgb.plot.shap}}, \code{\link{xgb.ggplot.shap.summary}}, #' # See examples in xgb.plot.shap()
#' \url{https://github.com/shap/shap} #'
#' @seealso [xgb.plot.shap()], [xgb.ggplot.shap.summary()],
#' and the Python library <https://github.com/shap/shap>.
xgb.plot.shap.summary <- function(data, shap_contrib = NULL, features = NULL, top_n = 10, model = NULL, xgb.plot.shap.summary <- function(data, shap_contrib = NULL, features = NULL, top_n = 10, model = NULL,
trees = NULL, target_class = NULL, approxcontrib = FALSE, subsample = NULL) { trees = NULL, target_class = NULL, approxcontrib = FALSE, subsample = NULL) {
# Only ggplot implementation is available. # Only ggplot implementation is available.
xgb.ggplot.shap.summary(data, shap_contrib, features, top_n, model, trees, target_class, approxcontrib, subsample) xgb.ggplot.shap.summary(data, shap_contrib, features, top_n, model, trees, target_class, approxcontrib, subsample)
} }
#' Prepare data for SHAP plots. To be used in xgb.plot.shap, xgb.plot.shap.summary, etc. #' Prepare data for SHAP plots
#' Internal utility function. #'
#' Internal function used in [xgb.plot.shap()], [xgb.plot.shap.summary()], etc.
#' #'
#' @inheritParams xgb.plot.shap #' @inheritParams xgb.plot.shap
#' @param max_observations Maximum number of observations to consider.
#' @keywords internal #' @keywords internal
#' @noRd
#' #'
#' @return A list containing: 'data', a matrix containing sample observations #' @return
#' and their feature values; 'shap_contrib', a matrix containing the SHAP contribution #' A list containing:
#' values for these observations. #' - `data`: The matrix of feature values.
#' - `shap_contrib`: The matrix with corresponding SHAP values.
xgb.shap.data <- function(data, shap_contrib = NULL, features = NULL, top_n = 1, model = NULL, xgb.shap.data <- function(data, shap_contrib = NULL, features = NULL, top_n = 1, model = NULL,
trees = NULL, target_class = NULL, approxcontrib = FALSE, trees = NULL, target_class = NULL, approxcontrib = FALSE,
subsample = NULL, max_observations = 100000) { subsample = NULL, max_observations = 100000) {

View File

@ -1,69 +1,78 @@
#' Plot a boosted tree model #' Plot boosted trees
#' #'
#' Read a tree model text dump and plot the model. #' Read a tree model text dump and plot the model.
#' #'
#' @param feature_names names of each feature as a \code{character} vector. #' @param feature_names Character vector used to overwrite the feature names
#' @param model produced by the \code{xgb.train} function. #' of the model. The default (`NULL`) uses the original feature names.
#' @param trees an integer vector of tree indices that should be visualized. #' @param model Object of class `xgb.Booster`.
#' If set to \code{NULL}, all trees of the model are included. #' @param trees An integer vector of tree indices that should be used.
#' IMPORTANT: the tree index in xgboost model is zero-based #' The default (`NULL`) uses all trees.
#' (e.g., use \code{trees = 0:2} for the first 3 trees in a model). #' Useful, e.g., in multiclass classification to get only
#' @param plot_width the width of the diagram in pixels. #' the trees of one class. *Important*: the tree index in XGBoost models
#' @param plot_height the height of the diagram in pixels. #' is zero-based (e.g., use `trees = 0:2` for the first three trees).
#' @param render a logical flag for whether the graph should be rendered (see Value). #' @param plot_width,plot_height Width and height of the graph in pixels.
#' The values are passed to [DiagrammeR::render_graph()].
#' @param render Should the graph be rendered or not? The default is `TRUE`.
#' @param show_node_id a logical flag for whether to show node id's in the graph. #' @param show_node_id a logical flag for whether to show node id's in the graph.
#' @param ... currently not used. #' @param ... currently not used.
#' #'
#' @details #' @details
#' #'
#' The content of each node is organised that way: #' The content of each node is visualized like this:
#' #' - *Feature name*.
#' \itemize{ #' - *Cover:* The sum of second order gradients of training data.
#' \item Feature name. #' For the squared loss, this simply corresponds to the number of instances in the node.
#' \item \code{Cover}: The sum of second order gradient of training data classified to the leaf. #' The deeper in the tree, the lower the value.
#' If it is square loss, this simply corresponds to the number of instances seen by a split #' - *Gain* (for split nodes): Information gain metric of a split
#' or collected by a leaf during training.
#' The deeper in the tree a node is, the lower this metric will be.
#' \item \code{Gain} (for split nodes): the information gain metric of a split
#' (corresponds to the importance of the node in the model). #' (corresponds to the importance of the node in the model).
#' \item \code{Value} (for leafs): the margin value that the leaf may contribute to prediction. #' - *Value* (for leaves): Margin value that the leaf may contribute to the prediction.
#' } #'
#' The tree root nodes also indicate the Tree index (0-based). #' The tree root nodes also indicate the tree index (0-based).
#' #'
#' The "Yes" branches are marked by the "< split_value" label. #' The "Yes" branches are marked by the "< split_value" label.
#' The branches that also used for missing values are marked as bold #' The branches also used for missing values are marked as bold
#' (as in "carrying extra capacity"). #' (as in "carrying extra capacity").
#' #'
#' This function uses \href{https://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR. #' This function uses [GraphViz](https://www.graphviz.org/) as DiagrammeR backend.
#' #'
#' @return #' @return
#' #' The value depends on the `render` parameter:
#' When \code{render = TRUE}: #' - If `render = TRUE` (default): Rendered graph object which is an htmlwidget of
#' returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}. #' class `grViz`. Similar to "ggplot" objects, it needs to be printed when not
#' Similar to ggplot objects, it needs to be printed to see it when not running from command line. #' running from the command line.
#' #' - If `render = FALSE`: Graph object which is of DiagrammeR's class `dgr_graph`.
#' When \code{render = FALSE}:
#' silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}.
#' This could be useful if one wants to modify some of the graph attributes #' This could be useful if one wants to modify some of the graph attributes
#' before rendering the graph with \code{\link[DiagrammeR]{render_graph}}. #' before rendering the graph with [DiagrammeR::render_graph()].
#' #'
#' @examples #' @examples
#' data(agaricus.train, package='xgboost') #' data(agaricus.train, package = "xgboost")
#'
#' bst <- xgboost(
#' data = agaricus.train$data,
#' label = agaricus.train$label,
#' max_depth = 3,
#' eta = 1,
#' nthread = 2,
#' nrounds = 2,
#' objective = "binary:logistic"
#' )
#' #'
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 3,
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
#' # plot all the trees #' # plot all the trees
#' xgb.plot.tree(model = bst) #' xgb.plot.tree(model = bst)
#'
#' # plot only the first tree and display the node ID: #' # plot only the first tree and display the node ID:
#' xgb.plot.tree(model = bst, trees = 0, show_node_id = TRUE) #' xgb.plot.tree(model = bst, trees = 0, show_node_id = TRUE)
#' #'
#' \dontrun{ #' \dontrun{
#' # Below is an example of how to save this plot to a file. #' # Below is an example of how to save this plot to a file.
#' # Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed. #' # Note that for export_graph() to work, the {DiagrammeRsvg}
#' # and {rsvg} packages must also be installed.
#'
#' library(DiagrammeR) #' library(DiagrammeR)
#'
#' gr <- xgb.plot.tree(model = bst, trees = 0:1, render = FALSE) #' gr <- xgb.plot.tree(model = bst, trees = 0:1, render = FALSE)
#' export_graph(gr, 'tree.pdf', width=1500, height=1900) #' export_graph(gr, "tree.pdf", width = 1500, height = 1900)
#' export_graph(gr, 'tree.png', width=1500, height=1900) #' export_graph(gr, "tree.png", width = 1500, height = 1900)
#' } #' }
#' #'
#' @export #' @export

View File

@ -2,7 +2,7 @@
% Please edit documentation in R/xgb.importance.R % Please edit documentation in R/xgb.importance.R
\name{xgb.importance} \name{xgb.importance}
\alias{xgb.importance} \alias{xgb.importance}
\title{Importance of features in a model.} \title{Feature importance}
\usage{ \usage{
xgb.importance( xgb.importance(
feature_names = NULL, feature_names = NULL,
@ -14,88 +14,126 @@ xgb.importance(
) )
} }
\arguments{ \arguments{
\item{feature_names}{character vector of feature names. If the model already \item{feature_names}{Character vector used to overwrite the feature names
contains feature names, those would be used when \code{feature_names=NULL} (default value). of the model. The default is \code{NULL} (use original feature names).}
Non-null \code{feature_names} could be provided to override those in the model.}
\item{model}{object of class \code{xgb.Booster}.} \item{model}{Object of class \code{xgb.Booster}.}
\item{trees}{(only for the gbtree booster) an integer vector of tree indices that should be included \item{trees}{An integer vector of tree indices that should be included
into the importance calculation. If set to \code{NULL}, all trees of the model are parsed. into the importance calculation (only for the "gbtree" booster).
The default (\code{NULL}) parses all trees.
It could be useful, e.g., in multiclass classification to get feature importances It could be useful, e.g., in multiclass classification to get feature importances
for each class separately. IMPORTANT: the tree index in xgboost models for each class separately. \emph{Important}: the tree index in XGBoost models
is zero-based (e.g., use \code{trees = 0:4} for first 5 trees).} is zero-based (e.g., use \code{trees = 0:4} for the first five trees).}
\item{data}{deprecated.} \item{data}{Deprecated.}
\item{label}{deprecated.} \item{label}{Deprecated.}
\item{target}{deprecated.} \item{target}{Deprecated.}
} }
\value{ \value{
For a tree model, a \code{data.table} with the following columns: A \code{data.table} with the following columns:
For a tree model:
\itemize{ \itemize{
\item \code{Features} names of the features used in the model; \item \code{Features}: Names of the features used in the model.
\item \code{Gain} represents fractional contribution of each feature to the model based on \item \code{Gain}: Fractional contribution of each feature to the model based on
the total gain of this feature's splits. Higher percentage means a more important the total gain of this feature's splits. Higher percentage means higher importance.
predictive feature. \item \code{Cover}: Metric of the number of observation related to this feature.
\item \code{Cover} metric of the number of observation related to this feature; \item \code{Frequency}: Percentage of times a feature has been used in trees.
\item \code{Frequency} percentage representing the relative number of times
a feature have been used in trees.
} }
A linear model's importance \code{data.table} has the following columns: For a linear model:
\itemize{ \itemize{
\item \code{Features} names of the features used in the model; \item \code{Features}: Names of the features used in the model.
\item \code{Weight} the linear coefficient of this feature; \item \code{Weight}: Linear coefficient of this feature.
\item \code{Class} (only for multiclass models) class label. \item \code{Class}: Class label (only for multiclass models).
} }
If \code{feature_names} is not provided and \code{model} doesn't have \code{feature_names}, If \code{feature_names} is not provided and \code{model} doesn't have \code{feature_names},
index of the features will be used instead. Because the index is extracted from the model dump the index of the features will be used instead. Because the index is extracted from the model dump
(based on C++ code), it starts at 0 (as in C/C++ or Python) instead of 1 (usual in R). (based on C++ code), it starts at 0 (as in C/C++ or Python) instead of 1 (usual in R).
} }
\description{ \description{
Creates a \code{data.table} of feature importances in a model. Creates a \code{data.table} of feature importances.
} }
\details{ \details{
This function works for both linear and tree models. This function works for both linear and tree models.
For linear models, the importance is the absolute magnitude of linear coefficients. For linear models, the importance is the absolute magnitude of linear coefficients.
For that reason, in order to obtain a meaningful ranking by importance for a linear model, To obtain a meaningful ranking by importance for linear models, the features need to
the features need to be on the same scale (which you also would want to do when using either be on the same scale (which is also recommended when using L1 or L2 regularization).
L1 or L2 regularization).
} }
\examples{ \examples{
# binomial classification using gbtree: # binomial classification using "gbtree":
data(agaricus.train, package='xgboost') data(agaricus.train, package = "xgboost")
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") bst <- xgboost(
data = agaricus.train$data,
label = agaricus.train$label,
max_depth = 2,
eta = 1,
nthread = 2,
nrounds = 2,
objective = "binary:logistic"
)
xgb.importance(model = bst) xgb.importance(model = bst)
# binomial classification using gblinear: # binomial classification using "gblinear":
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, booster = "gblinear", bst <- xgboost(
eta = 0.3, nthread = 1, nrounds = 20, objective = "binary:logistic") data = agaricus.train$data,
label = agaricus.train$label,
booster = "gblinear",
eta = 0.3,
nthread = 1,
nrounds = 20,objective = "binary:logistic"
)
xgb.importance(model = bst) xgb.importance(model = bst)
# multiclass classification using gbtree: # multiclass classification using "gbtree":
nclass <- 3 nclass <- 3
nrounds <- 10 nrounds <- 10
mbst <- xgboost(data = as.matrix(iris[, -5]), label = as.numeric(iris$Species) - 1, mbst <- xgboost(
max_depth = 3, eta = 0.2, nthread = 2, nrounds = nrounds, data = as.matrix(iris[, -5]),
objective = "multi:softprob", num_class = nclass) label = as.numeric(iris$Species) - 1,
max_depth = 3,
eta = 0.2,
nthread = 2,
nrounds = nrounds,
objective = "multi:softprob",
num_class = nclass
)
# all classes clumped together: # all classes clumped together:
xgb.importance(model = mbst) xgb.importance(model = mbst)
# inspect importances separately for each class:
xgb.importance(model = mbst, trees = seq(from=0, by=nclass, length.out=nrounds))
xgb.importance(model = mbst, trees = seq(from=1, by=nclass, length.out=nrounds))
xgb.importance(model = mbst, trees = seq(from=2, by=nclass, length.out=nrounds))
# multiclass classification using gblinear: # inspect importances separately for each class:
mbst <- xgboost(data = scale(as.matrix(iris[, -5])), label = as.numeric(iris$Species) - 1, xgb.importance(
booster = "gblinear", eta = 0.2, nthread = 1, nrounds = 15, model = mbst, trees = seq(from = 0, by = nclass, length.out = nrounds)
objective = "multi:softprob", num_class = nclass) )
xgb.importance(
model = mbst, trees = seq(from = 1, by = nclass, length.out = nrounds)
)
xgb.importance(
model = mbst, trees = seq(from = 2, by = nclass, length.out = nrounds)
)
# multiclass classification using "gblinear":
mbst <- xgboost(
data = scale(as.matrix(iris[, -5])),
label = as.numeric(iris$Species) - 1,
booster = "gblinear",
eta = 0.2,
nthread = 1,
nrounds = 15,
objective = "multi:softprob",
num_class = nclass
)
xgb.importance(model = mbst) xgb.importance(model = mbst)
} }

View File

@ -2,7 +2,7 @@
% Please edit documentation in R/xgb.model.dt.tree.R % Please edit documentation in R/xgb.model.dt.tree.R
\name{xgb.model.dt.tree} \name{xgb.model.dt.tree}
\alias{xgb.model.dt.tree} \alias{xgb.model.dt.tree}
\title{Parse a boosted tree model text dump} \title{Parse model text dump}
\usage{ \usage{
xgb.model.dt.tree( xgb.model.dt.tree(
feature_names = NULL, feature_names = NULL,
@ -14,44 +14,40 @@ xgb.model.dt.tree(
) )
} }
\arguments{ \arguments{
\item{feature_names}{character vector of feature names. If the model already \item{feature_names}{Character vector used to overwrite the feature names
contains feature names, those would be used when \code{feature_names=NULL} (default value). of the model. The default (\code{NULL}) uses the original feature names.}
Non-null \code{feature_names} could be provided to override those in the model.}
\item{model}{object of class \code{xgb.Booster}} \item{model}{Object of class \code{xgb.Booster}.}
\item{text}{\code{character} vector previously generated by the \code{xgb.dump} \item{text}{Character vector previously generated by the function \code{\link[=xgb.dump]{xgb.dump()}}
function (where parameter \code{with_stats = TRUE} should have been set). (called with parameter \code{with_stats = TRUE}). \code{text} takes precedence over \code{model}.}
\code{text} takes precedence over \code{model}.}
\item{trees}{an integer vector of tree indices that should be parsed. \item{trees}{An integer vector of tree indices that should be used.
If set to \code{NULL}, all trees of the model are parsed. The default (\code{NULL}) uses all trees.
It could be useful, e.g., in multiclass classification to get only Useful, e.g., in multiclass classification to get only
the trees of one certain class. IMPORTANT: the tree index in xgboost models the trees of one class. \emph{Important}: the tree index in XGBoost models
is zero-based (e.g., use \code{trees = 0:4} for first 5 trees).} is zero-based (e.g., use \code{trees = 0:4} for the first five trees).}
\item{use_int_id}{a logical flag indicating whether nodes in columns "Yes", "No", "Missing" should be \item{use_int_id}{A logical flag indicating whether nodes in columns "Yes", "No", and
represented as integers (when FALSE) or as "Tree-Node" character strings (when FALSE).} "Missing" should be represented as integers (when \code{TRUE}) or as "Tree-Node"
character strings (when \code{FALSE}, default).}
\item{...}{currently not used.} \item{...}{Currently not used.}
} }
\value{ \value{
A \code{data.table} with detailed information about model trees' nodes. A \code{data.table} with detailed information about tree nodes. It has the following columns:
The columns of the \code{data.table} are:
\itemize{ \itemize{
\item \code{Tree}: integer ID of a tree in a model (zero-based index) \item \code{Tree}: integer ID of a tree in a model (zero-based index).
\item \code{Node}: integer ID of a node in a tree (zero-based index) \item \code{Node}: integer ID of a node in a tree (zero-based index).
\item \code{ID}: character identifier of a node in a model (only when \code{use_int_id=FALSE}) \item \code{ID}: character identifier of a node in a model (only when \code{use_int_id = FALSE}).
\item \code{Feature}: for a branch node, it's a feature id or name (when available); \item \code{Feature}: for a branch node, a feature ID or name (when available);
for a leaf note, it simply labels it as \code{'Leaf'} for a leaf node, it simply labels it as \code{"Leaf"}.
\item \code{Split}: location of the split for a branch node (split condition is always "less than") \item \code{Split}: location of the split for a branch node (split condition is always "less than").
\item \code{Yes}: ID of the next node when the split condition is met \item \code{Yes}: ID of the next node when the split condition is met.
\item \code{No}: ID of the next node when the split condition is not met \item \code{No}: ID of the next node when the split condition is not met.
\item \code{Missing}: ID of the next node when branch value is missing \item \code{Missing}: ID of the next node when the branch value is missing.
\item \code{Quality}: either the split gain (change in loss) or the leaf value \item \code{Quality}: either the split gain (change in loss) or the leaf value.
\item \code{Cover}: metric related to the number of observation either seen by a split \item \code{Cover}: metric related to the number of observations either seen by a split
or collected by a leaf during training. or collected by a leaf during training.
} }
@ -65,13 +61,20 @@ Parse a boosted tree model text dump into a \code{data.table} structure.
\examples{ \examples{
# Basic use: # Basic use:
data(agaricus.train, package='xgboost') data(agaricus.train, package = "xgboost")
## Keep the number of threads to 1 for examples ## Keep the number of threads to 1 for examples
nthread <- 1 nthread <- 1
data.table::setDTthreads(nthread) data.table::setDTthreads(nthread)
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, bst <- xgboost(
eta = 1, nthread = nthread, nrounds = 2,objective = "binary:logistic") data = agaricus.train$data,
label = agaricus.train$label,
max_depth = 2,
eta = 1,
nthread = nthread,
nrounds = 2,
objective = "binary:logistic"
)
(dt <- xgb.model.dt.tree(colnames(agaricus.train$data), bst)) (dt <- xgb.model.dt.tree(colnames(agaricus.train$data), bst))
@ -80,7 +83,11 @@ bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_dep
(dt <- xgb.model.dt.tree(model = bst)) (dt <- xgb.model.dt.tree(model = bst))
# How to match feature names of splits that are following a current 'Yes' branch: # How to match feature names of splits that are following a current 'Yes' branch:
merge(
merge(dt, dt[, .(ID, Y.Feature=Feature)], by.x='Yes', by.y='ID', all.x=TRUE)[order(Tree,Node)] dt,
dt[, .(ID, Y.Feature = Feature)], by.x = "Yes", by.y = "ID", all.x = TRUE
)[
order(Tree, Node)
]
} }

View File

@ -3,7 +3,7 @@
\name{xgb.ggplot.deepness} \name{xgb.ggplot.deepness}
\alias{xgb.ggplot.deepness} \alias{xgb.ggplot.deepness}
\alias{xgb.plot.deepness} \alias{xgb.plot.deepness}
\title{Plot model trees deepness} \title{Plot model tree depth}
\usage{ \usage{
xgb.ggplot.deepness( xgb.ggplot.deepness(
model = NULL, model = NULL,
@ -18,66 +18,84 @@ xgb.plot.deepness(
) )
} }
\arguments{ \arguments{
\item{model}{either an \code{xgb.Booster} model generated by the \code{xgb.train} function \item{model}{Either an \code{xgb.Booster} model, or the "data.table" returned by \code{\link[=xgb.model.dt.tree]{xgb.model.dt.tree()}}.}
or a data.table result of the \code{xgb.model.dt.tree} function.}
\item{which}{which distribution to plot (see details).} \item{which}{Which distribution to plot (see details).}
\item{plot}{(base R barplot) whether a barplot should be produced. \item{plot}{Should the plot be shown? Default is \code{TRUE}.}
If FALSE, only a data.table is returned.}
\item{...}{other parameters passed to \code{barplot} or \code{plot}.} \item{...}{Other parameters passed to \code{\link[graphics:barplot]{graphics::barplot()}} or \code{\link[graphics:plot.default]{graphics::plot()}}.}
} }
\value{ \value{
Other than producing plots (when \code{plot=TRUE}), the \code{xgb.plot.deepness} function The return value of the two functions is as follows:
silently returns a processed data.table where each row corresponds to a terminal leaf in a tree model, \itemize{
and contains information about leaf's depth, cover, and weight (which is used in calculating predictions). \item \code{xgb.plot.deepness()}: A "data.table" (invisibly).
Each row corresponds to a terminal leaf in the model. It contains its information
The \code{xgb.ggplot.deepness} silently returns either a list of two ggplot graphs when \code{which="2x1"} about depth, cover, and weight (used in calculating predictions).
or a single ggplot graph for the other \code{which} options. If \code{plot = TRUE}, also a plot is shown.
\item \code{xgb.ggplot.deepness()}: When \code{which = "2x1"}, a list of two "ggplot" objects,
and a single "ggplot" object otherwise.
}
} }
\description{ \description{
Visualizes distributions related to depth of tree leafs. Visualizes distributions related to the depth of tree leaves.
\code{xgb.plot.deepness} uses base R graphics, while \code{xgb.ggplot.deepness} uses the ggplot backend. \itemize{
\item \code{xgb.plot.deepness()} uses base R graphics, while
\item \code{xgb.ggplot.deepness()} uses "ggplot2".
}
} }
\details{ \details{
When \code{which = "2x1"}, two distributions with respect to the leaf depth When \code{which = "2x1"}, two distributions with respect to the leaf depth
are plotted on top of each other: are plotted on top of each other:
\itemize{ \enumerate{
\item the distribution of the number of leafs in a tree model at a certain depth; \item The distribution of the number of leaves in a tree model at a certain depth.
\item the distribution of average weighted number of observations ("cover") \item The distribution of the average weighted number of observations ("cover")
ending up in leafs at certain depth. ending up in leaves at a certain depth.
} }
Those could be helpful in determining sensible ranges of the \code{max_depth} Those could be helpful in determining sensible ranges of the \code{max_depth}
and \code{min_child_weight} parameters. and \code{min_child_weight} parameters.
When \code{which="max.depth"} or \code{which="med.depth"}, plots of either maximum or median depth When \code{which = "max.depth"} or \code{which = "med.depth"}, plots of either maximum or
per tree with respect to tree number are created. And \code{which="med.weight"} allows to see how median depth per tree with respect to the tree number are created.
Finally, \code{which = "med.weight"} allows to see how
a tree's median absolute leaf weight changes through the iterations. a tree's median absolute leaf weight changes through the iterations.
This function was inspired by the blog post These functions have been inspired by the blog post
\url{https://github.com/aysent/random-forest-leaf-visualization}. \url{https://github.com/aysent/random-forest-leaf-visualization}.
} }
\examples{ \examples{
data(agaricus.train, package='xgboost') data(agaricus.train, package = "xgboost")
## Keep the number of threads to 2 for examples ## Keep the number of threads to 2 for examples
nthread <- 2 nthread <- 2
data.table::setDTthreads(nthread) data.table::setDTthreads(nthread)
## Change max_depth to a higher number to get a more significant result ## Change max_depth to a higher number to get a more significant result
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 6, bst <- xgboost(
eta = 0.1, nthread = nthread, nrounds = 50, objective = "binary:logistic", data = agaricus.train$data,
subsample = 0.5, min_child_weight = 2) label = agaricus.train$label,
max_depth = 6,
nthread = nthread,
nrounds = 50,
objective = "binary:logistic",
subsample = 0.5,
min_child_weight = 2
)
xgb.plot.deepness(bst) xgb.plot.deepness(bst)
xgb.ggplot.deepness(bst) xgb.ggplot.deepness(bst)
xgb.plot.deepness(bst, which='max.depth', pch=16, col=rgb(0,0,1,0.3), cex=2) xgb.plot.deepness(
bst, which = "max.depth", pch = 16, col = rgb(0, 0, 1, 0.3), cex = 2
)
xgb.plot.deepness(bst, which='med.weight', pch=16, col=rgb(0,0,1,0.3), cex=2) xgb.plot.deepness(
bst, which = "med.weight", pch = 16, col = rgb(0, 0, 1, 0.3), cex = 2
)
} }
\seealso{ \seealso{
\code{\link{xgb.train}}, \code{\link{xgb.model.dt.tree}}. \code{\link[=xgb.train]{xgb.train()}} and \code{\link[=xgb.model.dt.tree]{xgb.model.dt.tree()}}.
} }

View File

@ -3,7 +3,7 @@
\name{xgb.ggplot.importance} \name{xgb.ggplot.importance}
\alias{xgb.ggplot.importance} \alias{xgb.ggplot.importance}
\alias{xgb.plot.importance} \alias{xgb.plot.importance}
\title{Plot feature importance as a bar graph} \title{Plot feature importance}
\usage{ \usage{
xgb.ggplot.importance( xgb.ggplot.importance(
importance_matrix = NULL, importance_matrix = NULL,
@ -26,74 +26,90 @@ xgb.plot.importance(
) )
} }
\arguments{ \arguments{
\item{importance_matrix}{a \code{data.table} returned by \code{\link{xgb.importance}}.} \item{importance_matrix}{A \code{data.table} as returned by \code{\link[=xgb.importance]{xgb.importance()}}.}
\item{top_n}{maximal number of top features to include into the plot.} \item{top_n}{Maximal number of top features to include into the plot.}
\item{measure}{the name of importance measure to plot. \item{measure}{The name of importance measure to plot.
When \code{NULL}, 'Gain' would be used for trees and 'Weight' would be used for gblinear.} When \code{NULL}, 'Gain' would be used for trees and 'Weight' would be used for gblinear.}
\item{rel_to_first}{whether importance values should be represented as relative to the highest ranked feature. \item{rel_to_first}{Whether importance values should be represented as relative to
See Details.} the highest ranked feature, see Details.}
\item{n_clusters}{(ggplot only) a \code{numeric} vector containing the min and the max range \item{n_clusters}{A numeric vector containing the min and the max range
of the possible number of clusters of bars.} of the possible number of clusters of bars.}
\item{...}{other parameters passed to \code{barplot} (except horiz, border, cex.names, names.arg, and las).} \item{...}{Other parameters passed to \code{\link[graphics:barplot]{graphics::barplot()}}
(except \code{horiz}, \code{border}, \code{cex.names}, \code{names.arg}, and \code{las}).
Only used in \code{xgb.plot.importance()}.}
\item{left_margin}{(base R barplot) allows to adjust the left margin size to fit feature names. \item{left_margin}{Adjust the left margin size to fit feature names.
When it is NULL, the existing \code{par('mar')} is used.} When \code{NULL}, the existing \code{par("mar")} is used.}
\item{cex}{(base R barplot) passed as \code{cex.names} parameter to \code{barplot}.} \item{cex}{Passed as \code{cex.names} parameter to \code{\link[graphics:barplot]{graphics::barplot()}}.}
\item{plot}{(base R barplot) whether a barplot should be produced. \item{plot}{Should the barplot be shown? Default is \code{TRUE}.}
If FALSE, only a data.table is returned.}
} }
\value{ \value{
The \code{xgb.plot.importance} function creates a \code{barplot} (when \code{plot=TRUE}) The return value depends on the function:
and silently returns a processed data.table with \code{n_top} features sorted by importance. \itemize{
\item \code{xgb.plot.importance()}: Invisibly, a "data.table" with \code{n_top} features sorted
The \code{xgb.ggplot.importance} function returns a ggplot graph which could be customized afterwards. by importance. If \code{plot = TRUE}, the values are also plotted as barplot.
E.g., to change the title of the graph, add \code{+ ggtitle("A GRAPH NAME")} to the result. \item \code{xgb.ggplot.importance()}: A customizable "ggplot" object.
E.g., to change the title, set \code{+ ggtitle("A GRAPH NAME")}.
}
} }
\description{ \description{
Represents previously calculated feature importance as a bar graph. Represents previously calculated feature importance as a bar graph.
\code{xgb.plot.importance} uses base R graphics, while \code{xgb.ggplot.importance} uses the ggplot backend. \itemize{
\item \code{xgb.plot.importance()} uses base R graphics, while
\item \code{xgb.ggplot.importance()} uses "ggplot".
}
} }
\details{ \details{
The graph represents each feature as a horizontal bar of length proportional to the importance of a feature. The graph represents each feature as a horizontal bar of length proportional to the importance of a feature.
Features are shown ranked in a decreasing importance order. Features are sorted by decreasing importance.
It works for importances from both \code{gblinear} and \code{gbtree} models. It works for both "gblinear" and "gbtree" models.
When \code{rel_to_first = FALSE}, the values would be plotted as they were in \code{importance_matrix}. When \code{rel_to_first = FALSE}, the values would be plotted as in \code{importance_matrix}.
For gbtree model, that would mean being normalized to the total of 1 For a "gbtree" model, that would mean being normalized to the total of 1
("what is feature's importance contribution relative to the whole model?"). ("what is feature's importance contribution relative to the whole model?").
For linear models, \code{rel_to_first = FALSE} would show actual values of the coefficients. For linear models, \code{rel_to_first = FALSE} would show actual values of the coefficients.
Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of
"what is feature's importance contribution relative to the most important feature?" "what is feature's importance contribution relative to the most important feature?"
The ggplot-backend method also performs 1-D clustering of the importance values, The "ggplot" backend performs 1-D clustering of the importance values,
with bar colors corresponding to different clusters that have somewhat similar importance values. with bar colors corresponding to different clusters having similar importance values.
} }
\examples{ \examples{
data(agaricus.train) data(agaricus.train)
## Keep the number of threads to 2 for examples ## Keep the number of threads to 2 for examples
nthread <- 2 nthread <- 2
data.table::setDTthreads(nthread) data.table::setDTthreads(nthread)
bst <- xgboost( bst <- xgboost(
data = agaricus.train$data, label = agaricus.train$label, max_depth = 3, data = agaricus.train$data,
eta = 1, nthread = nthread, nrounds = 2, objective = "binary:logistic" label = agaricus.train$label,
max_depth = 3,
eta = 1,
nthread = nthread,
nrounds = 2,
objective = "binary:logistic"
) )
importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst) importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst)
xgb.plot.importance(
importance_matrix, rel_to_first = TRUE, xlab = "Relative importance"
)
xgb.plot.importance(importance_matrix, rel_to_first = TRUE, xlab = "Relative importance") gg <- xgb.ggplot.importance(
importance_matrix, measure = "Frequency", rel_to_first = TRUE
(gg <- xgb.ggplot.importance(importance_matrix, measure = "Frequency", rel_to_first = TRUE)) )
gg
gg + ggplot2::ylab("Frequency") gg + ggplot2::ylab("Frequency")
} }
\seealso{ \seealso{
\code{\link[graphics]{barplot}}. \code{\link[graphics:barplot]{graphics::barplot()}}
} }

View File

@ -2,7 +2,7 @@
% Please edit documentation in R/xgb.plot.multi.trees.R % Please edit documentation in R/xgb.plot.multi.trees.R
\name{xgb.plot.multi.trees} \name{xgb.plot.multi.trees}
\alias{xgb.plot.multi.trees} \alias{xgb.plot.multi.trees}
\title{Project all trees on one tree and plot it} \title{Project all trees on one tree}
\usage{ \usage{
xgb.plot.multi.trees( xgb.plot.multi.trees(
model, model,
@ -15,29 +15,31 @@ xgb.plot.multi.trees(
) )
} }
\arguments{ \arguments{
\item{model}{produced by the \code{xgb.train} function.} \item{model}{Object of class \code{xgb.Booster}.}
\item{feature_names}{names of each feature as a \code{character} vector.} \item{feature_names}{Character vector used to overwrite the feature names
of the model. The default (\code{NULL}) uses the original feature names.}
\item{features_keep}{number of features to keep in each position of the multi trees.} \item{features_keep}{Number of features to keep in each position of the multi trees,
by default 5.}
\item{plot_width}{width in pixels of the graph to produce} \item{plot_width, plot_height}{Width and height of the graph in pixels.
The values are passed to \code{\link[DiagrammeR:render_graph]{DiagrammeR::render_graph()}}.}
\item{plot_height}{height in pixels of the graph to produce} \item{render}{Should the graph be rendered or not? The default is \code{TRUE}.}
\item{render}{a logical flag for whether the graph should be rendered (see Value).} \item{...}{currently not used.}
\item{...}{currently not used}
} }
\value{ \value{
When \code{render = TRUE}: The value depends on the \code{render} parameter:
returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}. \itemize{
Similar to ggplot objects, it needs to be printed to see it when not running from command line. \item If \code{render = TRUE} (default): Rendered graph object which is an htmlwidget of
class \code{grViz}. Similar to "ggplot" objects, it needs to be printed when not
When \code{render = FALSE}: running from the command line.
silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}. \item If \code{render = FALSE}: Graph object which is of DiagrammeR's class \code{dgr_graph}.
This could be useful if one wants to modify some of the graph attributes This could be useful if one wants to modify some of the graph attributes
before rendering the graph with \code{\link[DiagrammeR]{render_graph}}. before rendering the graph with \code{\link[DiagrammeR:render_graph]{DiagrammeR::render_graph()}}.
}
} }
\description{ \description{
Visualization of the ensemble of trees as a single collective unit. Visualization of the ensemble of trees as a single collective unit.
@ -62,15 +64,22 @@ This function is inspired by this blog post:
} }
\examples{ \examples{
data(agaricus.train, package='xgboost') data(agaricus.train, package = "xgboost")
## Keep the number of threads to 2 for examples ## Keep the number of threads to 2 for examples
nthread <- 2 nthread <- 2
data.table::setDTthreads(nthread) data.table::setDTthreads(nthread)
bst <- xgboost( bst <- xgboost(
data = agaricus.train$data, label = agaricus.train$label, max_depth = 15, data = agaricus.train$data,
eta = 1, nthread = nthread, nrounds = 30, objective = "binary:logistic", label = agaricus.train$label,
min_child_weight = 50, verbose = 0 max_depth = 15,
eta = 1,
nthread = nthread,
nrounds = 30,
objective = "binary:logistic",
min_child_weight = 50,
verbose = 0
) )
p <- xgb.plot.multi.trees(model = bst, features_keep = 3) p <- xgb.plot.multi.trees(model = bst, features_keep = 3)
@ -78,10 +87,13 @@ print(p)
\dontrun{ \dontrun{
# Below is an example of how to save this plot to a file. # Below is an example of how to save this plot to a file.
# Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed. # Note that for export_graph() to work, the {DiagrammeRsvg} and {rsvg} packages
# must also be installed.
library(DiagrammeR) library(DiagrammeR)
gr <- xgb.plot.multi.trees(model = bst, features_keep = 3, render = FALSE) gr <- xgb.plot.multi.trees(model = bst, features_keep = 3, render = FALSE)
export_graph(gr, 'tree.pdf', width=1500, height=600) export_graph(gr, "tree.pdf", width = 1500, height = 600)
} }
} }

View File

@ -2,7 +2,7 @@
% Please edit documentation in R/xgb.plot.shap.R % Please edit documentation in R/xgb.plot.shap.R
\name{xgb.plot.shap} \name{xgb.plot.shap}
\alias{xgb.plot.shap} \alias{xgb.plot.shap}
\title{SHAP contribution dependency plots} \title{SHAP dependence plots}
\usage{ \usage{
xgb.plot.shap( xgb.plot.shap(
data, data,
@ -33,87 +33,93 @@ xgb.plot.shap(
) )
} }
\arguments{ \arguments{
\item{data}{data as a \code{matrix} or \code{dgCMatrix}.} \item{data}{The data to explain as a \code{matrix} or \code{dgCMatrix}.}
\item{shap_contrib}{a matrix of SHAP contributions that was computed earlier for the above \item{shap_contrib}{Matrix of SHAP contributions of \code{data}.
\code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.} The default (\code{NULL}) computes it from \code{model} and \code{data}.}
\item{features}{a vector of either column indices or of feature names to plot. When it is NULL, \item{features}{Vector of column indices or feature names to plot.
feature importance is calculated, and \code{top_n} high ranked features are taken.} When \code{NULL} (default), the \code{top_n} most important features are selected
by \code{\link[=xgb.importance]{xgb.importance()}}.}
\item{top_n}{when \code{features} is NULL, top_n \verb{[1, 100]} most important features in a model are taken.} \item{top_n}{How many of the most important features (<= 100) should be selected?
By default 1 for SHAP dependence and 10 for SHAP summary).
Only used when \code{features = NULL}.}
\item{model}{an \code{xgb.Booster} model. It has to be provided when either \code{shap_contrib} \item{model}{An \code{xgb.Booster} model. Only required when \code{shap_contrib = NULL} or
or \code{features} is missing.} \code{features = NULL}.}
\item{trees}{passed to \code{\link{xgb.importance}} when \code{features = NULL}.} \item{trees}{Passed to \code{\link[=xgb.importance]{xgb.importance()}} when \code{features = NULL}.}
\item{target_class}{is only relevant for multiclass models. When it is set to a 0-based class index, \item{target_class}{Only relevant for multiclass models. The default (\code{NULL})
only SHAP contributions for that specific class are used. averages the SHAP values over all classes. Pass a (0-based) class index
If it is not set, SHAP importances are averaged over all classes.} to show only SHAP values of that class.}
\item{approxcontrib}{passed to \code{\link{predict.xgb.Booster}} when \code{shap_contrib = NULL}.} \item{approxcontrib}{Passed to \code{predict()} when \code{shap_contrib = NULL}.}
\item{subsample}{a random fraction of data points to use for plotting. When it is NULL, \item{subsample}{Fraction of data points randomly picked for plotting.
it is set so that up to 100K data points are used.} The default (\code{NULL}) will use up to 100k data points.}
\item{n_col}{a number of columns in a grid of plots.} \item{n_col}{Number of columns in a grid of plots.}
\item{col}{color of the scatterplot markers.} \item{col}{Color of the scatterplot markers.}
\item{pch}{scatterplot marker.} \item{pch}{Scatterplot marker.}
\item{discrete_n_uniq}{a maximal number of unique values in a feature to consider it as discrete.} \item{discrete_n_uniq}{Maximal number of unique feature values to consider the
feature as discrete.}
\item{discrete_jitter}{an \code{amount} parameter of jitter added to discrete features' positions.} \item{discrete_jitter}{Jitter amount added to the values of discrete features.}
\item{ylab}{a y-axis label in 1D plots.} \item{ylab}{The y-axis label in 1D plots.}
\item{plot_NA}{whether the contributions of cases with missing values should also be plotted.} \item{plot_NA}{Should contributions of cases with missing values be plotted?
Default is \code{TRUE}.}
\item{col_NA}{a color of marker for missing value contributions.} \item{col_NA}{Color of marker for missing value contributions.}
\item{pch_NA}{a marker type for NA values.} \item{pch_NA}{Marker type for \code{NA} values.}
\item{pos_NA}{a relative position of the x-location where NA values are shown: \item{pos_NA}{Relative position of the x-location where \code{NA} values are shown:
\code{min(x) + (max(x) - min(x)) * pos_NA}.} \code{min(x) + (max(x) - min(x)) * pos_NA}.}
\item{plot_loess}{whether to plot loess-smoothed curves. The smoothing is only done for features with \item{plot_loess}{Should loess-smoothed curves be plotted? (Default is \code{TRUE}).
more than 5 distinct values.} The smoothing is only done for features with more than 5 distinct values.}
\item{col_loess}{a color to use for the loess curves.} \item{col_loess}{Color of loess curves.}
\item{span_loess}{the \code{span} parameter in \code{\link[stats]{loess}}'s call.} \item{span_loess}{The \code{span} parameter of \code{\link[stats:loess]{stats::loess()}}.}
\item{which}{whether to do univariate or bivariate plotting. NOTE: only 1D is implemented so far.} \item{which}{Whether to do univariate or bivariate plotting. Currently, only "1d" is implemented.}
\item{plot}{whether a plot should be drawn. If FALSE, only a list of matrices is returned.} \item{plot}{Should the plot be drawn? (Default is \code{TRUE}).
If \code{FALSE}, only a list of matrices is returned.}
\item{...}{other parameters passed to \code{plot}.} \item{...}{Other parameters passed to \code{\link[graphics:plot.default]{graphics::plot()}}.}
} }
\value{ \value{
In addition to producing plots (when \code{plot = TRUE}), it silently returns a list of two matrices: In addition to producing plots (when \code{plot = TRUE}), it silently returns a list of two matrices:
\itemize{ \itemize{
\item \code{data} the values of selected features; \item \code{data}: Feature value matrix.
\item \code{shap_contrib} the contributions of selected features. \item \code{shap_contrib}: Corresponding SHAP value matrix.
} }
} }
\description{ \description{
Visualizing the SHAP feature contribution to prediction dependencies on feature value. Visualizes SHAP values against feature values to gain an impression of feature effects.
} }
\details{ \details{
These scatterplots represent how SHAP feature contributions depend of feature values. These scatterplots represent how SHAP feature contributions depend of feature values.
The similarity to partial dependency plots is that they also give an idea for how feature values The similarity to partial dependence plots is that they also give an idea for how feature values
affect predictions. However, in partial dependency plots, we usually see marginal dependencies affect predictions. However, in partial dependence plots, we see marginal dependencies
of model prediction on feature value, while SHAP contribution dependency plots display the estimated of model prediction on feature value, while SHAP dependence plots display the estimated
contributions of a feature to model prediction for each individual case. contributions of a feature to the prediction for each individual case.
When \code{plot_loess = TRUE} is set, feature values are rounded to 3 significant digits and When \code{plot_loess = TRUE}, feature values are rounded to three significant digits and
weighted LOESS is computed and plotted, where weights are the numbers of data points weighted LOESS is computed and plotted, where the weights are the numbers of data points
at each rounded value. at each rounded value.
Note: SHAP contributions are shown on the scale of model margin. E.g., for a logistic binomial objective, Note: SHAP contributions are on the scale of the model margin.
the margin is prediction before a sigmoidal transform into probability-like values. E.g., for a logistic binomial objective, the margin is on log-odds scale.
Also, since SHAP stands for "SHapley Additive exPlanation" (model prediction = sum of SHAP Also, since SHAP stands for "SHapley Additive exPlanation" (model prediction = sum of SHAP
contributions for all features + bias), depending on the objective used, transforming SHAP contributions for all features + bias), depending on the objective used, transforming SHAP
contributions for a feature from the marginal to the prediction space is not necessarily contributions for a feature from the marginal to the prediction space is not necessarily
@ -121,44 +127,99 @@ a meaningful thing to do.
} }
\examples{ \examples{
data(agaricus.train, package='xgboost') data(agaricus.train, package = "xgboost")
data(agaricus.test, package='xgboost') data(agaricus.test, package = "xgboost")
## Keep the number of threads to 1 for examples ## Keep the number of threads to 1 for examples
nthread <- 1 nthread <- 1
data.table::setDTthreads(nthread) data.table::setDTthreads(nthread)
nrounds <- 20 nrounds <- 20
bst <- xgboost(agaricus.train$data, agaricus.train$label, nrounds = nrounds, bst <- xgboost(
eta = 0.1, max_depth = 3, subsample = .5, agaricus.train$data,
method = "hist", objective = "binary:logistic", nthread = nthread, verbose = 0) agaricus.train$label,
nrounds = nrounds,
eta = 0.1,
max_depth = 3,
subsample = 0.5,
objective = "binary:logistic",
nthread = nthread,
verbose = 0
)
xgb.plot.shap(agaricus.test$data, model = bst, features = "odor=none") xgb.plot.shap(agaricus.test$data, model = bst, features = "odor=none")
contr <- predict(bst, agaricus.test$data, predcontrib = TRUE) contr <- predict(bst, agaricus.test$data, predcontrib = TRUE)
xgb.plot.shap(agaricus.test$data, contr, model = bst, top_n = 12, n_col = 3) xgb.plot.shap(agaricus.test$data, contr, model = bst, top_n = 12, n_col = 3)
xgb.ggplot.shap.summary(agaricus.test$data, contr, model = bst, top_n = 12) # Summary plot
# multiclass example - plots for each class separately: # Summary plot
xgb.ggplot.shap.summary(agaricus.test$data, contr, model = bst, top_n = 12)
# Multiclass example - plots for each class separately:
nclass <- 3 nclass <- 3
x <- as.matrix(iris[, -5]) x <- as.matrix(iris[, -5])
set.seed(123) set.seed(123)
is.na(x[sample(nrow(x) * 4, 30)]) <- TRUE # introduce some missing values is.na(x[sample(nrow(x) * 4, 30)]) <- TRUE # introduce some missing values
mbst <- xgboost(data = x, label = as.numeric(iris$Species) - 1, nrounds = nrounds,
max_depth = 2, eta = 0.3, subsample = .5, nthread = nthread, mbst <- xgboost(
objective = "multi:softprob", num_class = nclass, verbose = 0) data = x,
label = as.numeric(iris$Species) - 1,
nrounds = nrounds,
max_depth = 2,
eta = 0.3,
subsample = 0.5,
nthread = nthread,
objective = "multi:softprob",
num_class = nclass,
verbose = 0
)
trees0 <- seq(from = 0, by = nclass, length.out = nrounds) trees0 <- seq(from = 0, by = nclass, length.out = nrounds)
col <- rgb(0, 0, 1, 0.5) col <- rgb(0, 0, 1, 0.5)
xgb.plot.shap(x, model = mbst, trees = trees0, target_class = 0, top_n = 4, xgb.plot.shap(
n_col = 2, col = col, pch = 16, pch_NA = 17) x,
xgb.plot.shap(x, model = mbst, trees = trees0 + 1, target_class = 1, top_n = 4, model = mbst,
n_col = 2, col = col, pch = 16, pch_NA = 17) trees = trees0,
xgb.plot.shap(x, model = mbst, trees = trees0 + 2, target_class = 2, top_n = 4, target_class = 0,
n_col = 2, col = col, pch = 16, pch_NA = 17) top_n = 4,
xgb.ggplot.shap.summary(x, model = mbst, target_class = 0, top_n = 4) # Summary plot n_col = 2,
col = col,
pch = 16,
pch_NA = 17
)
xgb.plot.shap(
x,
model = mbst,
trees = trees0 + 1,
target_class = 1,
top_n = 4,
n_col = 2,
col = col,
pch = 16,
pch_NA = 17
)
xgb.plot.shap(
x,
model = mbst,
trees = trees0 + 2,
target_class = 2,
top_n = 4,
n_col = 2,
col = col,
pch = 16,
pch_NA = 17
)
# Summary plot
xgb.ggplot.shap.summary(x, model = mbst, target_class = 0, top_n = 4)
} }
\references{ \references{
Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions", NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874} \enumerate{
\item Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions",
Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles", \url{https://arxiv.org/abs/1706.06060} NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874}
\item Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles",
\url{https://arxiv.org/abs/1706.06060}
}
} }

View File

@ -3,7 +3,7 @@
\name{xgb.ggplot.shap.summary} \name{xgb.ggplot.shap.summary}
\alias{xgb.ggplot.shap.summary} \alias{xgb.ggplot.shap.summary}
\alias{xgb.plot.shap.summary} \alias{xgb.plot.shap.summary}
\title{SHAP contribution dependency summary plot} \title{SHAP summary plot}
\usage{ \usage{
xgb.ggplot.shap.summary( xgb.ggplot.shap.summary(
data, data,
@ -30,49 +30,54 @@ xgb.plot.shap.summary(
) )
} }
\arguments{ \arguments{
\item{data}{data as a \code{matrix} or \code{dgCMatrix}.} \item{data}{The data to explain as a \code{matrix} or \code{dgCMatrix}.}
\item{shap_contrib}{a matrix of SHAP contributions that was computed earlier for the above \item{shap_contrib}{Matrix of SHAP contributions of \code{data}.
\code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.} The default (\code{NULL}) computes it from \code{model} and \code{data}.}
\item{features}{a vector of either column indices or of feature names to plot. When it is NULL, \item{features}{Vector of column indices or feature names to plot.
feature importance is calculated, and \code{top_n} high ranked features are taken.} When \code{NULL} (default), the \code{top_n} most important features are selected
by \code{\link[=xgb.importance]{xgb.importance()}}.}
\item{top_n}{when \code{features} is NULL, top_n \verb{[1, 100]} most important features in a model are taken.} \item{top_n}{How many of the most important features (<= 100) should be selected?
By default 1 for SHAP dependence and 10 for SHAP summary).
Only used when \code{features = NULL}.}
\item{model}{an \code{xgb.Booster} model. It has to be provided when either \code{shap_contrib} \item{model}{An \code{xgb.Booster} model. Only required when \code{shap_contrib = NULL} or
or \code{features} is missing.} \code{features = NULL}.}
\item{trees}{passed to \code{\link{xgb.importance}} when \code{features = NULL}.} \item{trees}{Passed to \code{\link[=xgb.importance]{xgb.importance()}} when \code{features = NULL}.}
\item{target_class}{is only relevant for multiclass models. When it is set to a 0-based class index, \item{target_class}{Only relevant for multiclass models. The default (\code{NULL})
only SHAP contributions for that specific class are used. averages the SHAP values over all classes. Pass a (0-based) class index
If it is not set, SHAP importances are averaged over all classes.} to show only SHAP values of that class.}
\item{approxcontrib}{passed to \code{\link{predict.xgb.Booster}} when \code{shap_contrib = NULL}.} \item{approxcontrib}{Passed to \code{predict()} when \code{shap_contrib = NULL}.}
\item{subsample}{a random fraction of data points to use for plotting. When it is NULL, \item{subsample}{Fraction of data points randomly picked for plotting.
it is set so that up to 100K data points are used.} The default (\code{NULL}) will use up to 100k data points.}
} }
\value{ \value{
A \code{ggplot2} object. A \code{ggplot2} object.
} }
\description{ \description{
Compare SHAP contributions of different features. Visualizes SHAP contributions of different features.
} }
\details{ \details{
A point plot (each point representing one sample from \code{data}) is A point plot (each point representing one observation from \code{data}) is
produced for each feature, with the points plotted on the SHAP value axis. produced for each feature, with the points plotted on the SHAP value axis.
Each point (observation) is coloured based on its feature value. The plot Each point (observation) is coloured based on its feature value.
hence allows us to see which features have a negative / positive contribution
The plot allows to see which features have a negative / positive contribution
on the model prediction, and whether the contribution is different for larger on the model prediction, and whether the contribution is different for larger
or smaller values of the feature. We effectively try to replicate the or smaller values of the feature. Inspired by the summary plot of
\code{summary_plot} function from \url{https://github.com/shap/shap}. \url{https://github.com/shap/shap}.
} }
\examples{ \examples{
# See \code{\link{xgb.plot.shap}}. # See examples in xgb.plot.shap()
} }
\seealso{ \seealso{
\code{\link{xgb.plot.shap}}, \code{\link{xgb.ggplot.shap.summary}}, \code{\link[=xgb.plot.shap]{xgb.plot.shap()}}, \code{\link[=xgb.ggplot.shap.summary]{xgb.ggplot.shap.summary()}},
\url{https://github.com/shap/shap} and the Python library \url{https://github.com/shap/shap}.
} }

View File

@ -2,7 +2,7 @@
% Please edit documentation in R/xgb.plot.tree.R % Please edit documentation in R/xgb.plot.tree.R
\name{xgb.plot.tree} \name{xgb.plot.tree}
\alias{xgb.plot.tree} \alias{xgb.plot.tree}
\title{Plot a boosted tree model} \title{Plot boosted trees}
\usage{ \usage{
xgb.plot.tree( xgb.plot.tree(
feature_names = NULL, feature_names = NULL,
@ -16,76 +16,89 @@ xgb.plot.tree(
) )
} }
\arguments{ \arguments{
\item{feature_names}{names of each feature as a \code{character} vector.} \item{feature_names}{Character vector used to overwrite the feature names
of the model. The default (\code{NULL}) uses the original feature names.}
\item{model}{produced by the \code{xgb.train} function.} \item{model}{Object of class \code{xgb.Booster}.}
\item{trees}{an integer vector of tree indices that should be visualized. \item{trees}{An integer vector of tree indices that should be used.
If set to \code{NULL}, all trees of the model are included. The default (\code{NULL}) uses all trees.
IMPORTANT: the tree index in xgboost model is zero-based Useful, e.g., in multiclass classification to get only
(e.g., use \code{trees = 0:2} for the first 3 trees in a model).} the trees of one class. \emph{Important}: the tree index in XGBoost models
is zero-based (e.g., use \code{trees = 0:2} for the first three trees).}
\item{plot_width}{the width of the diagram in pixels.} \item{plot_width, plot_height}{Width and height of the graph in pixels.
The values are passed to \code{\link[DiagrammeR:render_graph]{DiagrammeR::render_graph()}}.}
\item{plot_height}{the height of the diagram in pixels.} \item{render}{Should the graph be rendered or not? The default is \code{TRUE}.}
\item{render}{a logical flag for whether the graph should be rendered (see Value).}
\item{show_node_id}{a logical flag for whether to show node id's in the graph.} \item{show_node_id}{a logical flag for whether to show node id's in the graph.}
\item{...}{currently not used.} \item{...}{currently not used.}
} }
\value{ \value{
When \code{render = TRUE}: The value depends on the \code{render} parameter:
returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}. \itemize{
Similar to ggplot objects, it needs to be printed to see it when not running from command line. \item If \code{render = TRUE} (default): Rendered graph object which is an htmlwidget of
class \code{grViz}. Similar to "ggplot" objects, it needs to be printed when not
When \code{render = FALSE}: running from the command line.
silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}. \item If \code{render = FALSE}: Graph object which is of DiagrammeR's class \code{dgr_graph}.
This could be useful if one wants to modify some of the graph attributes This could be useful if one wants to modify some of the graph attributes
before rendering the graph with \code{\link[DiagrammeR]{render_graph}}. before rendering the graph with \code{\link[DiagrammeR:render_graph]{DiagrammeR::render_graph()}}.
}
} }
\description{ \description{
Read a tree model text dump and plot the model. Read a tree model text dump and plot the model.
} }
\details{ \details{
The content of each node is organised that way: The content of each node is visualized like this:
\itemize{ \itemize{
\item Feature name. \item \emph{Feature name}.
\item \code{Cover}: The sum of second order gradient of training data classified to the leaf. \item \emph{Cover:} The sum of second order gradients of training data.
If it is square loss, this simply corresponds to the number of instances seen by a split For the squared loss, this simply corresponds to the number of instances in the node.
or collected by a leaf during training. The deeper in the tree, the lower the value.
The deeper in the tree a node is, the lower this metric will be. \item \emph{Gain} (for split nodes): Information gain metric of a split
\item \code{Gain} (for split nodes): the information gain metric of a split
(corresponds to the importance of the node in the model). (corresponds to the importance of the node in the model).
\item \code{Value} (for leafs): the margin value that the leaf may contribute to prediction. \item \emph{Value} (for leaves): Margin value that the leaf may contribute to the prediction.
} }
The tree root nodes also indicate the Tree index (0-based).
The tree root nodes also indicate the tree index (0-based).
The "Yes" branches are marked by the "< split_value" label. The "Yes" branches are marked by the "< split_value" label.
The branches that also used for missing values are marked as bold The branches also used for missing values are marked as bold
(as in "carrying extra capacity"). (as in "carrying extra capacity").
This function uses \href{https://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR. This function uses \href{https://www.graphviz.org/}{GraphViz} as DiagrammeR backend.
} }
\examples{ \examples{
data(agaricus.train, package='xgboost') data(agaricus.train, package = "xgboost")
bst <- xgboost(
data = agaricus.train$data,
label = agaricus.train$label,
max_depth = 3,
eta = 1,
nthread = 2,
nrounds = 2,
objective = "binary:logistic"
)
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 3,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
# plot all the trees # plot all the trees
xgb.plot.tree(model = bst) xgb.plot.tree(model = bst)
# plot only the first tree and display the node ID: # plot only the first tree and display the node ID:
xgb.plot.tree(model = bst, trees = 0, show_node_id = TRUE) xgb.plot.tree(model = bst, trees = 0, show_node_id = TRUE)
\dontrun{ \dontrun{
# Below is an example of how to save this plot to a file. # Below is an example of how to save this plot to a file.
# Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed. # Note that for export_graph() to work, the {DiagrammeRsvg}
# and {rsvg} packages must also be installed.
library(DiagrammeR) library(DiagrammeR)
gr <- xgb.plot.tree(model = bst, trees = 0:1, render = FALSE) gr <- xgb.plot.tree(model = bst, trees = 0:1, render = FALSE)
export_graph(gr, 'tree.pdf', width=1500, height=1900) export_graph(gr, "tree.pdf", width = 1500, height = 1900)
export_graph(gr, 'tree.png', width=1500, height=1900) export_graph(gr, "tree.png", width = 1500, height = 1900)
} }
} }

View File

@ -1,55 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.plot.shap.R
\name{xgb.shap.data}
\alias{xgb.shap.data}
\title{Prepare data for SHAP plots. To be used in xgb.plot.shap, xgb.plot.shap.summary, etc.
Internal utility function.}
\usage{
xgb.shap.data(
data,
shap_contrib = NULL,
features = NULL,
top_n = 1,
model = NULL,
trees = NULL,
target_class = NULL,
approxcontrib = FALSE,
subsample = NULL,
max_observations = 1e+05
)
}
\arguments{
\item{data}{data as a \code{matrix} or \code{dgCMatrix}.}
\item{shap_contrib}{a matrix of SHAP contributions that was computed earlier for the above
\code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.}
\item{features}{a vector of either column indices or of feature names to plot. When it is NULL,
feature importance is calculated, and \code{top_n} high ranked features are taken.}
\item{top_n}{when \code{features} is NULL, top_n \verb{[1, 100]} most important features in a model are taken.}
\item{model}{an \code{xgb.Booster} model. It has to be provided when either \code{shap_contrib}
or \code{features} is missing.}
\item{trees}{passed to \code{\link{xgb.importance}} when \code{features = NULL}.}
\item{target_class}{is only relevant for multiclass models. When it is set to a 0-based class index,
only SHAP contributions for that specific class are used.
If it is not set, SHAP importances are averaged over all classes.}
\item{approxcontrib}{passed to \code{\link{predict.xgb.Booster}} when \code{shap_contrib = NULL}.}
\item{subsample}{a random fraction of data points to use for plotting. When it is NULL,
it is set so that up to 100K data points are used.}
}
\value{
A list containing: 'data', a matrix containing sample observations
and their feature values; 'shap_contrib', a matrix containing the SHAP contribution
values for these observations.
}
\description{
Prepare data for SHAP plots. To be used in xgb.plot.shap, xgb.plot.shap.summary, etc.
Internal utility function.
}
\keyword{internal}