Merge pull request #125 from pommedeterresautee/master
Take gain into account for feature importance
This commit is contained in:
commit
39bb719063
@ -23,4 +23,5 @@ Imports:
|
|||||||
Matrix (>= 1.1-0),
|
Matrix (>= 1.1-0),
|
||||||
methods,
|
methods,
|
||||||
data.table (>= 1.9),
|
data.table (>= 1.9),
|
||||||
magrittr (>= 1.5)
|
magrittr (>= 1.5),
|
||||||
|
stringr
|
||||||
@ -19,3 +19,4 @@ importClassesFrom(Matrix,dgeMatrix)
|
|||||||
importFrom(data.table,":=")
|
importFrom(data.table,":=")
|
||||||
importFrom(data.table,data.table)
|
importFrom(data.table,data.table)
|
||||||
importFrom(magrittr,"%>%")
|
importFrom(magrittr,"%>%")
|
||||||
|
importFrom(stringr,str_extract)
|
||||||
|
|||||||
@ -1,34 +1,54 @@
|
|||||||
#' Show importance of features in a model
|
#' Show importance of features in a model
|
||||||
#'
|
#'
|
||||||
#' Read a xgboost model in text file format.
|
#' Read a xgboost model text dump.
|
||||||
#' Can be tree or linear model (text dump of linear model are only supported in dev version of Xgboost for now).
|
#' Can be tree or linear model (text dump of linear model are only supported in dev version of \code{Xgboost} for now).
|
||||||
|
#' Return a data.table of the features used in the model with their average gain (and their weight for boosted tree model) in the model.
|
||||||
#'
|
#'
|
||||||
#' Return a data.table of the features with their weight.
|
|
||||||
#' #'
|
|
||||||
#' @importFrom data.table data.table
|
#' @importFrom data.table data.table
|
||||||
#' @importFrom magrittr %>%
|
#' @importFrom magrittr %>%
|
||||||
#' @importFrom data.table :=
|
#' @importFrom data.table :=
|
||||||
#' @param feature_names names of each feature as a character vector. Can be extracted from a sparse matrix.
|
#' @importFrom stringr str_extract
|
||||||
#' @param filename_dump the name of the text file.
|
#' @param feature_names names of each feature as a character vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.
|
||||||
|
#' @param filename_dump the path to the text file storing the model. Model dump must include the gain per feature and per tree (\code{with.stats = T} in function \code{xgb.dump}).
|
||||||
|
#'
|
||||||
|
#' @details
|
||||||
|
#' This is the function to understand the model trained (and through your model, your data).
|
||||||
|
#'
|
||||||
|
#' Results are returned for both linear and tree models.
|
||||||
|
#'
|
||||||
|
#' \code{data.table} is returned by the function.
|
||||||
|
#' There are 3 columns :
|
||||||
|
#' \itemize{
|
||||||
|
#' \item \code{Features} name of the features as provided in \code{feature_names} or already present in the model dump.
|
||||||
|
#' \item \code{Gain} contribution of each feature to the model. For boosted tree model, each gain of each feature of each tree is taken into account, then average per feature to give a vision of the entire model. Highest percentage means most important feature regarding the \code{label} used for the training.
|
||||||
|
#' \item \code{Weight} percentage representing the relative number of times a feature have been taken into trees. \code{Gain} should be prefered to search the most important feature. For boosted linear model, this column has no meaning.
|
||||||
|
#' }
|
||||||
|
#'
|
||||||
#'
|
#'
|
||||||
#' @examples
|
#' @examples
|
||||||
#' data(agaricus.train, package='xgboost')
|
#' data(agaricus.train, package='xgboost')
|
||||||
#' data(agaricus.test, package='xgboost')
|
#' data(agaricus.test, package='xgboost')
|
||||||
#'
|
#'
|
||||||
#' #Both dataset are list with two items, a sparse matrix and labels (outcome column which will be learned).
|
#' #Both dataset are list with two items, a sparse matrix and labels (labels = outcome column which will be learned).
|
||||||
#' #Each column of the sparse Matrix is a feature in one hot encoding format.
|
#' #Each column of the sparse Matrix is a feature in one hot encoding format.
|
||||||
#' train <- agaricus.train
|
#' train <- agaricus.train
|
||||||
#' test <- agaricus.test
|
#' test <- agaricus.test
|
||||||
#'
|
#'
|
||||||
#' bst <- xgboost(data = train$data, label = train$label, max.depth = 2,
|
#' bst <- xgboost(data = train$data, label = train$label, max.depth = 2,
|
||||||
#' eta = 1, nround = 2,objective = "binary:logistic")
|
#' eta = 1, nround = 2,objective = "binary:logistic")
|
||||||
#' xgb.dump(bst, 'xgb.model.dump')
|
#' xgb.dump(bst, 'xgb.model.dump', with.stats = T)
|
||||||
#'
|
#'
|
||||||
#' #agaricus.test$data@@Dimnames[[2]] represents the column name of the sparse matrix.
|
#' #agaricus.test$data@@Dimnames[[2]] represents the column names of the sparse matrix.
|
||||||
#' xgb.importance(agaricus.test$data@@Dimnames[[2]], 'xgb.model.dump')
|
#' xgb.importance(agaricus.test$data@@Dimnames[[2]], 'xgb.model.dump')
|
||||||
#'
|
#'
|
||||||
#' @export
|
#' @export
|
||||||
xgb.importance <- function(feature_names, filename_dump){
|
xgb.importance <- function(feature_names = NULL, filename_dump = NULL){
|
||||||
|
if (!class(feature_names) %in% c("character", "NULL")) {
|
||||||
|
stop("feature_names: Has to be a vector of character or NULL if the model dump already contains feature name. Look at this function documentation to see where to get feature names.")
|
||||||
|
}
|
||||||
|
if (class(filename_dump) != "character" & file.exists(filename_dump)) {
|
||||||
|
stop("filename_dump: Has to be a path to the model dump file.")
|
||||||
|
}
|
||||||
text <- readLines(filename_dump)
|
text <- readLines(filename_dump)
|
||||||
if(text[2] == "bias:"){
|
if(text[2] == "bias:"){
|
||||||
result <- linearDump(feature_names, text)
|
result <- linearDump(feature_names, text)
|
||||||
@ -39,16 +59,20 @@ xgb.importance <- function(feature_names, filename_dump){
|
|||||||
}
|
}
|
||||||
|
|
||||||
treeDump <- function(feature_names, text){
|
treeDump <- function(feature_names, text){
|
||||||
result <- c()
|
featureVec <- c()
|
||||||
|
gainVec <- c()
|
||||||
for(line in text){
|
for(line in text){
|
||||||
p <- regexec("\\[f.*\\]", line) %>% regmatches(line, .)
|
p <- str_extract(line, "\\[f.*<")
|
||||||
if (length(p[[1]]) > 0) {
|
if (!is.na(p)) {
|
||||||
splits <- sub("\\[f", "", p[[1]]) %>% sub("\\]", "", .) %>% strsplit("<") %>% .[[1]] %>% as.numeric
|
featureVec <- substr(p, 3, nchar(p)-1) %>% c(featureVec)
|
||||||
result <- c(result, feature_names[splits[1]+ 1])
|
gainVec <- str_extract(line, "gain.*,") %>% substr(x = ., 6, nchar(.)-1) %>% as.numeric %>% c(gainVec)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if(!is.null(feature_names)) {
|
||||||
|
featureVec %<>% as.numeric %>% {c =.+1; feature_names[c]} #+1 because in R indexing start with 1 instead of 0.
|
||||||
|
}
|
||||||
#1. Reduce, 2. %, 3. reorder - bigger top, 4. remove temp col
|
#1. Reduce, 2. %, 3. reorder - bigger top, 4. remove temp col
|
||||||
data.table(Feature = result)[,.N, by = Feature][, Weight:= N /sum(N)][order(-rank(Weight))][,-2,with=F]
|
data.table(Feature = featureVec, Weight = gainVec)[,list(sum(Weight), .N), by = Feature][, Gain:= V1/sum(V1)][,Weight:= N/sum(N)][order(-rank(Gain))][,-c(2,3), with = F]
|
||||||
}
|
}
|
||||||
|
|
||||||
linearDump <- function(feature_names, text){
|
linearDump <- function(feature_names, text){
|
||||||
|
|||||||
@ -4,7 +4,7 @@
|
|||||||
\alias{xgb.dump}
|
\alias{xgb.dump}
|
||||||
\title{Save xgboost model to text file}
|
\title{Save xgboost model to text file}
|
||||||
\usage{
|
\usage{
|
||||||
xgb.dump(model, fname, fmap = "")
|
xgb.dump(model, fname, fmap = "", with.stats = FALSE)
|
||||||
}
|
}
|
||||||
\arguments{
|
\arguments{
|
||||||
\item{model}{the model object.}
|
\item{model}{the model object.}
|
||||||
@ -12,11 +12,16 @@ xgb.dump(model, fname, fmap = "")
|
|||||||
\item{fname}{the name of the binary file.}
|
\item{fname}{the name of the binary file.}
|
||||||
|
|
||||||
\item{fmap}{feature map file representing the type of feature.
|
\item{fmap}{feature map file representing the type of feature.
|
||||||
Detailed description could be found at
|
Detailed description could be found at
|
||||||
\url{https://github.com/tqchen/xgboost/wiki/Binary-Classification#dump-model}.
|
\url{https://github.com/tqchen/xgboost/wiki/Binary-Classification#dump-model}.
|
||||||
See demo/ for walkthrough example in R, and
|
See demo/ for walkthrough example in R, and
|
||||||
\url{https://github.com/tqchen/xgboost/blob/master/demo/data/featmap.txt}
|
\url{https://github.com/tqchen/xgboost/blob/master/demo/data/featmap.txt}
|
||||||
for example Format.}
|
for example Format.}
|
||||||
|
|
||||||
|
\item{with.stats}{whether dump statistics of splits
|
||||||
|
When this option is on, the model dump comes with two additional statistics:
|
||||||
|
gain is the approximate loss function gain we get in each split;
|
||||||
|
cover is the sum of second order gradient in each node.}
|
||||||
}
|
}
|
||||||
\description{
|
\description{
|
||||||
Save a xgboost model to text file. Could be parsed later.
|
Save a xgboost model to text file. Could be parsed later.
|
||||||
|
|||||||
@ -4,30 +4,45 @@
|
|||||||
\alias{xgb.importance}
|
\alias{xgb.importance}
|
||||||
\title{Show importance of features in a model}
|
\title{Show importance of features in a model}
|
||||||
\usage{
|
\usage{
|
||||||
xgb.importance(feature_names, filename_dump)
|
xgb.importance(feature_names = NULL, filename_dump = NULL)
|
||||||
}
|
}
|
||||||
\arguments{
|
\arguments{
|
||||||
\item{feature_names}{names of each feature as a character vector. Can be extracted from a sparse matrix.}
|
\item{feature_names}{names of each feature as a character vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.}
|
||||||
|
|
||||||
\item{filename_dump}{the name of the text file.}
|
\item{filename_dump}{the path to the text file storing the model. Model dump must include the gain per feature and per tree (\code{with.stats = T} in function \code{xgb.dump}).}
|
||||||
}
|
}
|
||||||
\description{
|
\description{
|
||||||
Read a xgboost model in text file format. Return a data.table of the features with their weight.
|
Read a xgboost model text dump.
|
||||||
|
Can be tree or linear model (text dump of linear model are only supported in dev version of \code{Xgboost} for now).
|
||||||
|
Return a data.table of the features used in the model with their average gain (and their weight for boosted tree model) in the model.
|
||||||
|
}
|
||||||
|
\details{
|
||||||
|
This is the function to understand the model trained (and through your model, your data).
|
||||||
|
|
||||||
|
Results are returned for both linear and tree models.
|
||||||
|
|
||||||
|
\code{data.table} is returned by the function.
|
||||||
|
There are 3 columns :
|
||||||
|
\itemize{
|
||||||
|
\item \code{Features} name of the features as provided in \code{feature_names} or already present in the model dump.
|
||||||
|
\item \code{Gain} contribution of each feature to the model. For boosted tree model, each gain of each feature of each tree is taken into account, then average per feature to give a vision of the entire model. Highest percentage means most important feature regarding the \code{label} used for the training.
|
||||||
|
\item \code{Weight} percentage representing the relative number of times a feature have been taken into trees. \code{Gain} should be prefered to search the most important feature. For boosted linear model, this column has no meaning.
|
||||||
|
}
|
||||||
}
|
}
|
||||||
\examples{
|
\examples{
|
||||||
data(agaricus.train, package='xgboost')
|
data(agaricus.train, package='xgboost')
|
||||||
data(agaricus.test, package='xgboost')
|
data(agaricus.test, package='xgboost')
|
||||||
|
|
||||||
#Both dataset are list with two items, a sparse matrix and labels (outcome column which will be learned).
|
#Both dataset are list with two items, a sparse matrix and labels (labels = outcome column which will be learned).
|
||||||
#Each column of the sparse Matrix is a feature in one hot encoding format.
|
#Each column of the sparse Matrix is a feature in one hot encoding format.
|
||||||
train <- agaricus.train
|
train <- agaricus.train
|
||||||
test <- agaricus.test
|
test <- agaricus.test
|
||||||
|
|
||||||
bst <- xgboost(data = train$data, label = train$label, max.depth = 2,
|
bst <- xgboost(data = train$data, label = train$label, max.depth = 2,
|
||||||
eta = 1, nround = 2,objective = "binary:logistic")
|
eta = 1, nround = 2,objective = "binary:logistic")
|
||||||
xgb.dump(bst, 'xgb.model.dump')
|
xgb.dump(bst, 'xgb.model.dump', with.stats = T)
|
||||||
|
|
||||||
#agaricus.test$data@Dimnames[[2]] represents the column name of the sparse matrix.
|
#agaricus.test$data@Dimnames[[2]] represents the column names of the sparse matrix.
|
||||||
xgb.importance(agaricus.test$data@Dimnames[[2]], 'xgb.model.dump')
|
xgb.importance(agaricus.test$data@Dimnames[[2]], 'xgb.model.dump')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user