120 lines
7.3 KiB
R
120 lines
7.3 KiB
R
#' Show importance of features in a model
|
|
#'
|
|
#' Read a xgboost model text dump.
|
|
#' Can be tree or linear model (text dump of linear model are only supported in dev version of \code{Xgboost} for now).
|
|
#'
|
|
#' @importFrom data.table data.table
|
|
#' @importFrom data.table setnames
|
|
#' @importFrom data.table :=
|
|
#' @importFrom magrittr %>%
|
|
#' @importFrom Matrix colSums
|
|
#' @importFrom Matrix cBind
|
|
#' @importFrom Matrix sparseVector
|
|
#'
|
|
#' @param feature_names names of each feature as a character vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.
|
|
#' @param model generated by the \code{xgb.train} function. Avoid the creation of a dump file.
|
|
#' @param data the dataset used for the training step. Will be used with \code{label} parameter for co-occurence computation. More information in \code{Detail} part. This parameter is optional.
|
|
#' @param label the label vetor used for the training step. Will be used with \code{data} parameter for co-occurence computation. More information in \code{Detail} part. This parameter is optional.
|
|
#' @param target a function which returns \code{TRUE} or \code{1} when an observation should be count as a co-occurence and \code{FALSE} or \code{0} otherwise. Default function is provided for computing co-occurences in a binary classification. The \code{target} function should have only one parameter. This parameter will be used to provide each important feature vector after having applied the split condition, therefore these vector will be only made of 0 and 1 only, whatever was the information before. More information in \code{Detail} part. This parameter is optional.
|
|
#'
|
|
#' @return A \code{data.table} of the features used in the model with their average gain (and their weight for boosted tree model) in the model.
|
|
#'
|
|
#' @details
|
|
#' This is the function to understand the model trained (and through your model, your data).
|
|
#'
|
|
#' Results are returned for both linear and tree models.
|
|
#'
|
|
#' \code{data.table} is returned by the function.
|
|
#' There are 3 columns :
|
|
#' \itemize{
|
|
#' \item \code{Features} name of the features as provided in \code{feature_names} or already present in the model dump.
|
|
#' \item \code{Gain} contribution of each feature to the model. For boosted tree model, each gain of each feature of each tree is taken into account, then average per feature to give a vision of the entire model. Highest percentage means important feature to predict the \code{label} used for the training ;
|
|
#' \item \code{Cover} metric of the number of observation related to this feature (only available for tree models) ;
|
|
#' \item \code{Weight} percentage representing the relative number of times a feature have been taken into trees. \code{Gain} should be prefered to search the most important feature. For boosted linear model, this column has no meaning.
|
|
#' }
|
|
#'
|
|
#' Co-occurence count
|
|
#' ------------------
|
|
#'
|
|
#' The gain gives you indication about the information of how a feature is important in making a branch of a decision tree more pure. However, with this information only, you can't know if this feature has to be present or not to get a specific classification. In the example code, you may wonder if odor=none should be \code{TRUE} to not eat a mushroom.
|
|
#'
|
|
#' Co-occurence computation is here to help in understanding this relation between a predictor and a specific class. It will count how many observations are returned as \code{TRUE} by the \code{target} function (see parameters). When you execute the example below, there are 92 times only over the 3140 observations of the train dataset where a mushroom have no odor and can be eaten safely.
|
|
#'
|
|
#' If you need to remember one thing only: until you want to leave us early, don't eat a mushroom which has no odor :-)
|
|
#'
|
|
#' @examples
|
|
#' data(agaricus.train, package='xgboost')
|
|
#'
|
|
#' # Both dataset are list with two items, a sparse matrix and labels
|
|
#' # (labels = outcome column which will be learned).
|
|
#' # Each column of the sparse Matrix is a feature in one hot encoding format.
|
|
#' train <- agaricus.train
|
|
#'
|
|
#' bst <- xgboost(data = train$data, label = train$label, max.depth = 2,
|
|
#' eta = 1, nthread = 2, nround = 2,objective = "binary:logistic")
|
|
#'
|
|
#' # train$data@@Dimnames[[2]] represents the column names of the sparse matrix.
|
|
#' xgb.importance(train$data@@Dimnames[[2]], model = bst)
|
|
#'
|
|
#' # Same thing with co-occurence computation this time
|
|
#' xgb.importance(train$data@@Dimnames[[2]], model = bst, data = train$data, label = train$label)
|
|
#'
|
|
#' @export
|
|
xgb.importance <- function(feature_names = NULL, model = NULL, data = NULL, label = NULL, target = function(x) ( (x + label) == 2)){
|
|
if (!class(feature_names) %in% c("character", "NULL")) {
|
|
stop("feature_names: Has to be a vector of character or NULL if the model already contains feature name. Look at this function documentation to see where to get feature names.")
|
|
}
|
|
|
|
if (class(model) != "xgb.Booster") {
|
|
stop("model: Has to be an object of class xgb.Booster model generaged by the xgb.train function.")
|
|
}
|
|
|
|
if((is.null(data) & !is.null(label)) | (!is.null(data) & is.null(label))) {
|
|
stop("data/label: Provide the two arguments if you want co-occurence computation or none of them if you are not interested but not one of them only.")
|
|
}
|
|
|
|
if(class(label) == "numeric"){
|
|
if(sum(label == 0) / length(label) > 0.5) label <- as(label, "sparseVector")
|
|
}
|
|
|
|
treeDump <- function(feature_names, text, keepDetail){
|
|
if(keepDetail) groupBy <- c("Feature", "Split", "MissingNo") else groupBy <- "Feature"
|
|
xgb.model.dt.tree(feature_names = feature_names, text = text)[,"MissingNo" := Missing == No ][Feature != "Leaf",.(Gain = sum(Quality), Cover = sum(Cover), Frequency = .N), by = groupBy, with = T][,`:=`(Gain = Gain / sum(Gain), Cover = Cover / sum(Cover), Frequency = Frequency / sum(Frequency))][order(Gain, decreasing = T)]
|
|
}
|
|
|
|
linearDump <- function(feature_names, text){
|
|
which(text == "weight:") %>% {a =. + 1; text[a:length(text)]} %>% as.numeric %>% data.table(Feature = feature_names, Weight = .)
|
|
}
|
|
|
|
model.text.dump <- xgb.dump(model = model, with.stats = T)
|
|
|
|
if(model.text.dump[2] == "bias:"){
|
|
result <- model.text.dump %>% linearDump(feature_names, .)
|
|
if(!is.null(data) | !is.null(label)) warning("data/label: these parameters should only be provided with decision tree based models.")
|
|
} else {
|
|
result <- treeDump(feature_names, text = model.text.dump, keepDetail = !is.null(data))
|
|
|
|
# Co-occurence computation
|
|
if(!is.null(data) & !is.null(label) & nrow(result) > 0) {
|
|
# Take care of missing column
|
|
a <- data[, result[MissingNo == T,Feature], drop=FALSE] != 0
|
|
# Bind the two Matrix and reorder columns
|
|
c <- data[, result[MissingNo == F,Feature], drop=FALSE] %>% cBind(a,.) %>% .[,result[,Feature]]
|
|
rm(a)
|
|
# Apply split
|
|
d <- data[, result[,Feature], drop=FALSE] < as.numeric(result[,Split])
|
|
apply(c & d, 2, . %>% target %>% sum) -> vec
|
|
|
|
result <- result[, "RealCover" := as.numeric(vec), with = F][, "RealCover %" := RealCover / sum(label)][,MissingNo := NULL]
|
|
}
|
|
}
|
|
result
|
|
}
|
|
|
|
|
|
|
|
# Avoid error messages during CRAN check.
|
|
# The reason is that these variables are never declared
|
|
# They are mainly column names inferred by Data.table...
|
|
globalVariables(c(".", "Feature", "Split", "No", "Missing", "MissingNo", "RealCover"))
|