commit
5a49eb06ca
@ -10,8 +10,8 @@
|
||||
#' @importFrom Matrix cBind
|
||||
#' @importFrom Matrix sparseVector
|
||||
#'
|
||||
#' @param feature_names names of each feature as a character vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.
|
||||
#' @param model generated by the \code{xgb.train} function. Avoid the creation of a dump file.
|
||||
#' @param feature_names names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.
|
||||
#' @param model generated by the \code{xgb.train} function.
|
||||
#' @param data the dataset used for the training step. Will be used with \code{label} parameter for co-occurence computation. More information in \code{Detail} part. This parameter is optional.
|
||||
#' @param label the label vetor used for the training step. Will be used with \code{data} parameter for co-occurence computation. More information in \code{Detail} part. This parameter is optional.
|
||||
#' @param target a function which returns \code{TRUE} or \code{1} when an observation should be count as a co-occurence and \code{FALSE} or \code{0} otherwise. Default function is provided for computing co-occurences in a binary classification. The \code{target} function should have only one parameter. This parameter will be used to provide each important feature vector after having applied the split condition, therefore these vector will be only made of 0 and 1 only, whatever was the information before. More information in \code{Detail} part. This parameter is optional.
|
||||
@ -19,8 +19,6 @@
|
||||
#' @return A \code{data.table} of the features used in the model with their average gain (and their weight for boosted tree model) in the model.
|
||||
#'
|
||||
#' @details
|
||||
#' This is the function to understand the model trained (and through your model, your data).
|
||||
#'
|
||||
#' This function is for both linear and tree models.
|
||||
#'
|
||||
#' \code{data.table} is returned by the function.
|
||||
|
||||
@ -69,7 +69,7 @@ get.paths.to.leaf <- function(dt.tree) {
|
||||
#' @importFrom data.table setnames
|
||||
#' @importFrom data.table :=
|
||||
#' @importFrom magrittr %>%
|
||||
#' @param model dump generated by the \code{xgb.train} function. Avoid the creation of a dump file.
|
||||
#' @param model dump generated by the \code{xgb.train} function.
|
||||
#'
|
||||
#' @return Two graphs showing the distribution of the model deepness.
|
||||
#'
|
||||
@ -86,7 +86,7 @@ get.paths.to.leaf <- function(dt.tree) {
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item Count: number of leaf per level of deepness;
|
||||
#' \item Weighted cover: noramlized weighted cover per Leaf (weighted number of instances).
|
||||
#' \item Weighted cover: noramlized weighted cover per leaf (weighted number of instances).
|
||||
#' }
|
||||
#'
|
||||
#' This function is inspired by the blog post \url{http://aysent.github.io/2015/11/08/random-forest-leaf-visualization.html}
|
||||
|
||||
@ -10,8 +10,8 @@
|
||||
#' @importFrom stringr str_detect
|
||||
#' @importFrom stringr str_extract
|
||||
#'
|
||||
#' @param model dump generated by the \code{xgb.train} function. Avoid the creation of a dump file.
|
||||
#' @param feature_names names of each feature as a character vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.
|
||||
#' @param model dump generated by the \code{xgb.train} function.
|
||||
#' @param feature_names names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.
|
||||
#' @param features.keep number of features to keep in each position of the multi trees.
|
||||
#' @param plot.width width in pixels of the graph to produce
|
||||
#' @param plot.height height in pixels of the graph to produce
|
||||
|
||||
@ -1,12 +1,11 @@
|
||||
#' Plot a boosted tree model
|
||||
#'
|
||||
#' Read a tree model text dump.
|
||||
#' Plotting only works for boosted tree model (not linear model).
|
||||
#' Read a tree model text dump and plot the model.
|
||||
#'
|
||||
#' @importFrom data.table data.table
|
||||
#' @importFrom data.table :=
|
||||
#' @importFrom magrittr %>%
|
||||
#' @param feature_names names of each feature as a character vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.
|
||||
#' @param feature_names names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.
|
||||
#' @param model generated by the \code{xgb.train} function. Avoid the creation of a dump file.
|
||||
#' @param n_first_tree limit the plot to the n first trees. If \code{NULL}, all trees of the model are plotted. Performance can be low for huge models.
|
||||
#' @param plot.width the width of the diagram in pixels.
|
||||
@ -19,25 +18,20 @@
|
||||
#' The content of each node is organised that way:
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{feature} value ;
|
||||
#' \item \code{cover}: the sum of second order gradient of training data classified to the leaf, if it is square loss, this simply corresponds to the number of instances in that branch. Deeper in the tree a node is, lower this metric will be ;
|
||||
#' \item \code{feature} value;
|
||||
#' \item \code{cover}: the sum of second order gradient of training data classified to the leaf, if it is square loss, this simply corresponds to the number of instances in that branch. Deeper in the tree a node is, lower this metric will be;
|
||||
#' \item \code{gain}: metric the importance of the node in the model.
|
||||
#' }
|
||||
#'
|
||||
#' Each branch finishes with a leaf. For each leaf, only the \code{cover} is indicated.
|
||||
#' It uses \href{http://www.graphviz.org/}{GraphViz} library for that purpose.
|
||||
#' The function uses \href{http://www.graphviz.org/}{GraphViz} library for that purpose.
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#'
|
||||
#' #Both dataset are list with two items, a sparse matrix and labels
|
||||
#' #(labels = outcome column which will be learned).
|
||||
#' #Each column of the sparse Matrix is a feature in one hot encoding format.
|
||||
#'
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max.depth = 2,
|
||||
#' eta = 1, nthread = 2, nround = 2,objective = "binary:logistic")
|
||||
#'
|
||||
#' #agaricus.test$data@@Dimnames[[2]] represents the column names of the sparse matrix.
|
||||
#' # agaricus.train$data@@Dimnames[[2]] represents the column names of the sparse matrix.
|
||||
#' xgb.plot.tree(feature_names = agaricus.train$data@@Dimnames[[2]], model = bst)
|
||||
#'
|
||||
#' @export
|
||||
|
||||
@ -8,9 +8,9 @@ xgb.importance(feature_names = NULL, model = NULL, data = NULL,
|
||||
label = NULL, target = function(x) ((x + label) == 2))
|
||||
}
|
||||
\arguments{
|
||||
\item{feature_names}{names of each feature as a character vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.}
|
||||
\item{feature_names}{names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.}
|
||||
|
||||
\item{model}{generated by the \code{xgb.train} function. Avoid the creation of a dump file.}
|
||||
\item{model}{generated by the \code{xgb.train} function.}
|
||||
|
||||
\item{data}{the dataset used for the training step. Will be used with \code{label} parameter for co-occurence computation. More information in \code{Detail} part. This parameter is optional.}
|
||||
|
||||
@ -25,8 +25,6 @@ A \code{data.table} of the features used in the model with their average gain (a
|
||||
Create a \code{data.table} of the most important features of a model.
|
||||
}
|
||||
\details{
|
||||
This is the function to understand the model trained (and through your model, your data).
|
||||
|
||||
This function is for both linear and tree models.
|
||||
|
||||
\code{data.table} is returned by the function.
|
||||
|
||||
@ -7,7 +7,7 @@
|
||||
xgb.plot.deepness(model = NULL)
|
||||
}
|
||||
\arguments{
|
||||
\item{model}{dump generated by the \code{xgb.train} function. Avoid the creation of a dump file.}
|
||||
\item{model}{dump generated by the \code{xgb.train} function.}
|
||||
}
|
||||
\value{
|
||||
Two graphs showing the distribution of the model deepness.
|
||||
@ -28,7 +28,7 @@ The graph is made of two parts:
|
||||
|
||||
\itemize{
|
||||
\item Count: number of leaf per level of deepness;
|
||||
\item Weighted cover: noramlized weighted cover per Leaf (weighted number of instances).
|
||||
\item Weighted cover: noramlized weighted cover per leaf (weighted number of instances).
|
||||
}
|
||||
|
||||
This function is inspired by the blog post \url{http://aysent.github.io/2015/11/08/random-forest-leaf-visualization.html}
|
||||
|
||||
@ -8,9 +8,9 @@ xgb.plot.multi.trees(model, feature_names = NULL, features.keep = 5,
|
||||
plot.width = NULL, plot.height = NULL)
|
||||
}
|
||||
\arguments{
|
||||
\item{model}{dump generated by the \code{xgb.train} function. Avoid the creation of a dump file.}
|
||||
\item{model}{dump generated by the \code{xgb.train} function.}
|
||||
|
||||
\item{feature_names}{names of each feature as a character vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.}
|
||||
\item{feature_names}{names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.}
|
||||
|
||||
\item{features.keep}{number of features to keep in each position of the multi trees.}
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@ xgb.plot.tree(feature_names = NULL, model = NULL, n_first_tree = NULL,
|
||||
plot.width = NULL, plot.height = NULL)
|
||||
}
|
||||
\arguments{
|
||||
\item{feature_names}{names of each feature as a character vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.}
|
||||
\item{feature_names}{names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.}
|
||||
|
||||
\item{model}{generated by the \code{xgb.train} function. Avoid the creation of a dump file.}
|
||||
|
||||
@ -22,32 +22,26 @@ xgb.plot.tree(feature_names = NULL, model = NULL, n_first_tree = NULL,
|
||||
A \code{DiagrammeR} of the model.
|
||||
}
|
||||
\description{
|
||||
Read a tree model text dump.
|
||||
Plotting only works for boosted tree model (not linear model).
|
||||
Read a tree model text dump and plot the model.
|
||||
}
|
||||
\details{
|
||||
The content of each node is organised that way:
|
||||
|
||||
\itemize{
|
||||
\item \code{feature} value ;
|
||||
\item \code{cover}: the sum of second order gradient of training data classified to the leaf, if it is square loss, this simply corresponds to the number of instances in that branch. Deeper in the tree a node is, lower this metric will be ;
|
||||
\item \code{feature} value;
|
||||
\item \code{cover}: the sum of second order gradient of training data classified to the leaf, if it is square loss, this simply corresponds to the number of instances in that branch. Deeper in the tree a node is, lower this metric will be;
|
||||
\item \code{gain}: metric the importance of the node in the model.
|
||||
}
|
||||
|
||||
Each branch finishes with a leaf. For each leaf, only the \code{cover} is indicated.
|
||||
It uses \href{http://www.graphviz.org/}{GraphViz} library for that purpose.
|
||||
The function uses \href{http://www.graphviz.org/}{GraphViz} library for that purpose.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
|
||||
#Both dataset are list with two items, a sparse matrix and labels
|
||||
#(labels = outcome column which will be learned).
|
||||
#Each column of the sparse Matrix is a feature in one hot encoding format.
|
||||
|
||||
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max.depth = 2,
|
||||
eta = 1, nthread = 2, nround = 2,objective = "binary:logistic")
|
||||
|
||||
#agaricus.test$data@Dimnames[[2]] represents the column names of the sparse matrix.
|
||||
# agaricus.train$data@Dimnames[[2]] represents the column names of the sparse matrix.
|
||||
xgb.plot.tree(feature_names = agaricus.train$data@Dimnames[[2]], model = bst)
|
||||
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user