diff --git a/R-package/R/xgb.plot.deepness.R b/R-package/R/xgb.plot.deepness.R index 2a20532f6..0efd783ac 100644 --- a/R-package/R/xgb.plot.deepness.R +++ b/R-package/R/xgb.plot.deepness.R @@ -69,7 +69,7 @@ get.paths.to.leaf <- function(dt.tree) { #' @importFrom data.table setnames #' @importFrom data.table := #' @importFrom magrittr %>% -#' @param model dump generated by the \code{xgb.train} function. Avoid the creation of a dump file. +#' @param model dump generated by the \code{xgb.train} function. #' #' @return Two graphs showing the distribution of the model deepness. #' @@ -86,7 +86,7 @@ get.paths.to.leaf <- function(dt.tree) { #' #' \itemize{ #' \item Count: number of leaf per level of deepness; -#' \item Weighted cover: noramlized weighted cover per Leaf (weighted number of instances). +#' \item Weighted cover: noramlized weighted cover per leaf (weighted number of instances). #' } #' #' This function is inspired by the blog post \url{http://aysent.github.io/2015/11/08/random-forest-leaf-visualization.html} diff --git a/R-package/R/xgb.plot.multi.trees.R b/R-package/R/xgb.plot.multi.trees.R index f140a959f..c61cb8cd4 100644 --- a/R-package/R/xgb.plot.multi.trees.R +++ b/R-package/R/xgb.plot.multi.trees.R @@ -10,8 +10,8 @@ #' @importFrom stringr str_detect #' @importFrom stringr str_extract #' -#' @param model dump generated by the \code{xgb.train} function. Avoid the creation of a dump file. -#' @param feature_names names of each feature as a character vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}. +#' @param model dump generated by the \code{xgb.train} function. +#' @param feature_names names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}. #' @param features.keep number of features to keep in each position of the multi trees. #' @param plot.width width in pixels of the graph to produce #' @param plot.height height in pixels of the graph to produce diff --git a/R-package/R/xgb.plot.tree.R b/R-package/R/xgb.plot.tree.R index 59822ec83..3d9d55c9f 100644 --- a/R-package/R/xgb.plot.tree.R +++ b/R-package/R/xgb.plot.tree.R @@ -1,12 +1,11 @@ #' Plot a boosted tree model #' -#' Read a tree model text dump. -#' Plotting only works for boosted tree model (not linear model). +#' Read a tree model text dump and plot the model. #' #' @importFrom data.table data.table #' @importFrom data.table := #' @importFrom magrittr %>% -#' @param feature_names names of each feature as a character vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}. +#' @param feature_names names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}. #' @param model generated by the \code{xgb.train} function. Avoid the creation of a dump file. #' @param n_first_tree limit the plot to the n first trees. If \code{NULL}, all trees of the model are plotted. Performance can be low for huge models. #' @param plot.width the width of the diagram in pixels. @@ -19,25 +18,20 @@ #' The content of each node is organised that way: #' #' \itemize{ -#' \item \code{feature} value ; -#' \item \code{cover}: the sum of second order gradient of training data classified to the leaf, if it is square loss, this simply corresponds to the number of instances in that branch. Deeper in the tree a node is, lower this metric will be ; +#' \item \code{feature} value; +#' \item \code{cover}: the sum of second order gradient of training data classified to the leaf, if it is square loss, this simply corresponds to the number of instances in that branch. Deeper in the tree a node is, lower this metric will be; #' \item \code{gain}: metric the importance of the node in the model. #' } #' -#' Each branch finishes with a leaf. For each leaf, only the \code{cover} is indicated. -#' It uses \href{http://www.graphviz.org/}{GraphViz} library for that purpose. +#' The function uses \href{http://www.graphviz.org/}{GraphViz} library for that purpose. #' #' @examples #' data(agaricus.train, package='xgboost') #' -#' #Both dataset are list with two items, a sparse matrix and labels -#' #(labels = outcome column which will be learned). -#' #Each column of the sparse Matrix is a feature in one hot encoding format. -#' #' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max.depth = 2, #' eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") #' -#' #agaricus.test$data@@Dimnames[[2]] represents the column names of the sparse matrix. +#' # agaricus.train$data@@Dimnames[[2]] represents the column names of the sparse matrix. #' xgb.plot.tree(feature_names = agaricus.train$data@@Dimnames[[2]], model = bst) #' #' @export diff --git a/R-package/man/xgb.plot.deepness.Rd b/R-package/man/xgb.plot.deepness.Rd index c8ed130e2..e11a7495e 100644 --- a/R-package/man/xgb.plot.deepness.Rd +++ b/R-package/man/xgb.plot.deepness.Rd @@ -7,7 +7,7 @@ xgb.plot.deepness(model = NULL) } \arguments{ -\item{model}{dump generated by the \code{xgb.train} function. Avoid the creation of a dump file.} +\item{model}{dump generated by the \code{xgb.train} function.} } \value{ Two graphs showing the distribution of the model deepness. @@ -28,7 +28,7 @@ The graph is made of two parts: \itemize{ \item Count: number of leaf per level of deepness; - \item Weighted cover: noramlized weighted cover per Leaf (weighted number of instances). + \item Weighted cover: noramlized weighted cover per leaf (weighted number of instances). } This function is inspired by the blog post \url{http://aysent.github.io/2015/11/08/random-forest-leaf-visualization.html} diff --git a/R-package/man/xgb.plot.multi.trees.Rd b/R-package/man/xgb.plot.multi.trees.Rd index 2d0a1d3e8..4d97c58b4 100644 --- a/R-package/man/xgb.plot.multi.trees.Rd +++ b/R-package/man/xgb.plot.multi.trees.Rd @@ -8,9 +8,9 @@ xgb.plot.multi.trees(model, feature_names = NULL, features.keep = 5, plot.width = NULL, plot.height = NULL) } \arguments{ -\item{model}{dump generated by the \code{xgb.train} function. Avoid the creation of a dump file.} +\item{model}{dump generated by the \code{xgb.train} function.} -\item{feature_names}{names of each feature as a character vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.} +\item{feature_names}{names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.} \item{features.keep}{number of features to keep in each position of the multi trees.} diff --git a/R-package/man/xgb.plot.tree.Rd b/R-package/man/xgb.plot.tree.Rd index 164b013c1..c087059e0 100644 --- a/R-package/man/xgb.plot.tree.Rd +++ b/R-package/man/xgb.plot.tree.Rd @@ -8,7 +8,7 @@ xgb.plot.tree(feature_names = NULL, model = NULL, n_first_tree = NULL, plot.width = NULL, plot.height = NULL) } \arguments{ -\item{feature_names}{names of each feature as a character vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.} +\item{feature_names}{names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.} \item{model}{generated by the \code{xgb.train} function. Avoid the creation of a dump file.} @@ -22,32 +22,26 @@ xgb.plot.tree(feature_names = NULL, model = NULL, n_first_tree = NULL, A \code{DiagrammeR} of the model. } \description{ -Read a tree model text dump. -Plotting only works for boosted tree model (not linear model). +Read a tree model text dump and plot the model. } \details{ The content of each node is organised that way: \itemize{ - \item \code{feature} value ; - \item \code{cover}: the sum of second order gradient of training data classified to the leaf, if it is square loss, this simply corresponds to the number of instances in that branch. Deeper in the tree a node is, lower this metric will be ; + \item \code{feature} value; + \item \code{cover}: the sum of second order gradient of training data classified to the leaf, if it is square loss, this simply corresponds to the number of instances in that branch. Deeper in the tree a node is, lower this metric will be; \item \code{gain}: metric the importance of the node in the model. } -Each branch finishes with a leaf. For each leaf, only the \code{cover} is indicated. -It uses \href{http://www.graphviz.org/}{GraphViz} library for that purpose. +The function uses \href{http://www.graphviz.org/}{GraphViz} library for that purpose. } \examples{ data(agaricus.train, package='xgboost') -#Both dataset are list with two items, a sparse matrix and labels -#(labels = outcome column which will be learned). -#Each column of the sparse Matrix is a feature in one hot encoding format. - bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max.depth = 2, eta = 1, nthread = 2, nround = 2,objective = "binary:logistic") -#agaricus.test$data@Dimnames[[2]] represents the column names of the sparse matrix. +# agaricus.train$data@Dimnames[[2]] represents the column names of the sparse matrix. xgb.plot.tree(feature_names = agaricus.train$data@Dimnames[[2]], model = bst) }