[CI] Improve R linter script (#5944)
* [CI] Move lint to a separate script * [CI] Improved lintr launcher * Add lintr as a separate action * Add custom parsing logic to print out logs * Fix lintr issues in demos * Run R demos * Fix CRAN checks * Install XGBoost into R env before running lintr * Install devtools (needed to run demos)
This commit is contained in:
parent
8943eb4314
commit
5879acde9a
51
.github/workflows/main.yml
vendored
51
.github/workflows/main.yml
vendored
@ -6,6 +6,9 @@ name: XGBoost-CI
|
||||
# events but only for the master branch
|
||||
on: [push, pull_request]
|
||||
|
||||
env:
|
||||
R_PACKAGES: c('XML', 'igraph', 'data.table', 'magrittr', 'stringi', 'ggplot2', 'DiagrammeR', 'Ckmeans.1d.dp', 'vcd', 'testthat', 'lintr', 'knitr', 'rmarkdown', 'e1071', 'cplm', 'devtools')
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
jobs:
|
||||
test-with-jvm:
|
||||
@ -38,6 +41,49 @@ jobs:
|
||||
mvn test -pl :xgboost4j_2.12
|
||||
|
||||
|
||||
lintr:
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
|
||||
name: Run R linters on OS ${{ matrix.config.os }}, R ${{ matrix.config.r }}, Compiler ${{ matrix.config.compiler }}, Build ${{ matrix.config.build }}
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
- {os: windows-latest, r: 'release', compiler: 'mingw', build: 'autotools'}
|
||||
env:
|
||||
R_REMOTES_NO_ERRORS_FROM_WARNINGS: true
|
||||
RSPM: ${{ matrix.config.rspm }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: r-lib/actions/setup-r@master
|
||||
with:
|
||||
r-version: ${{ matrix.config.r }}
|
||||
|
||||
- name: Cache R packages
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ env.R_LIBS_USER }}
|
||||
key: ${{ runner.os }}-r-${{ matrix.config.r }}-1-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-2-
|
||||
|
||||
- name: Install dependencies
|
||||
shell: Rscript {0}
|
||||
run: |
|
||||
install.packages(${{ env.R_PACKAGES }},
|
||||
repos = 'http://cloud.r-project.org',
|
||||
dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
|
||||
- name: Run lintr
|
||||
run: |
|
||||
cd R-package
|
||||
R.exe CMD INSTALL .
|
||||
Rscript.exe tests/run_lint.R
|
||||
|
||||
|
||||
test-with-R:
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
|
||||
@ -78,8 +124,9 @@ jobs:
|
||||
- name: Install dependencies
|
||||
shell: Rscript {0}
|
||||
run: |
|
||||
install.packages(c('XML','igraph'))
|
||||
install.packages(c('data.table','magrittr','stringi','ggplot2','DiagrammeR','Ckmeans.1d.dp','vcd','testthat','lintr','knitr','rmarkdown'))
|
||||
install.packages(${{ env.R_PACKAGES }},
|
||||
repos = 'http://cloud.r-project.org',
|
||||
dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
|
||||
@ -54,7 +54,8 @@ Suggests:
|
||||
lintr,
|
||||
igraph (>= 1.0.1),
|
||||
jsonlite,
|
||||
float
|
||||
float,
|
||||
crayon
|
||||
Depends:
|
||||
R (>= 3.3.0)
|
||||
Imports:
|
||||
|
||||
@ -17,4 +17,4 @@ Benchmarks
|
||||
Notes
|
||||
====
|
||||
* Contribution of examples, benchmarks is more than welcomed!
|
||||
* If you like to share how you use xgboost to solve your problem, send a pull request:)
|
||||
* If you like to share how you use xgboost to solve your problem, send a pull request :)
|
||||
|
||||
@ -3,8 +3,8 @@ require(methods)
|
||||
|
||||
# we load in the agaricus dataset
|
||||
# In this example, we are aiming to predict whether a mushroom is edible
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
# the loaded data is stored in sparseMatrix, and label is a numeric vector in {0,1}
|
||||
@ -58,31 +58,31 @@ xgb.save(bst, "xgboost.model")
|
||||
bst2 <- xgb.load("xgboost.model")
|
||||
pred2 <- predict(bst2, test$data)
|
||||
# pred2 should be identical to pred
|
||||
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2-pred))))
|
||||
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2 - pred))))
|
||||
|
||||
# save model to R's raw vector
|
||||
raw = xgb.save.raw(bst)
|
||||
raw <- xgb.save.raw(bst)
|
||||
# load binary model to R
|
||||
bst3 <- xgb.load(raw)
|
||||
pred3 <- predict(bst3, test$data)
|
||||
# pred3 should be identical to pred
|
||||
print(paste("sum(abs(pred3-pred))=", sum(abs(pred3-pred))))
|
||||
print(paste("sum(abs(pred3-pred))=", sum(abs(pred3 - pred))))
|
||||
|
||||
#----------------Advanced features --------------
|
||||
# to use advanced features, we need to put data in xgb.DMatrix
|
||||
dtrain <- xgb.DMatrix(data = train$data, label=train$label)
|
||||
dtest <- xgb.DMatrix(data = test$data, label=test$label)
|
||||
dtrain <- xgb.DMatrix(data = train$data, label = train$label)
|
||||
dtest <- xgb.DMatrix(data = test$data, label = test$label)
|
||||
#---------------Using watchlist----------------
|
||||
# watchlist is a list of xgb.DMatrix, each of them is tagged with name
|
||||
watchlist <- list(train=dtrain, test=dtest)
|
||||
watchlist <- list(train = dtrain, test = dtest)
|
||||
# to train with watchlist, use xgb.train, which contains more advanced features
|
||||
# watchlist allows us to monitor the evaluation result on all data in the list
|
||||
print("Train xgboost using xgb.train with watchlist")
|
||||
bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nrounds=2, watchlist=watchlist,
|
||||
bst <- xgb.train(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, watchlist = watchlist,
|
||||
nthread = 2, objective = "binary:logistic")
|
||||
# we can change evaluation metrics, or use multiple evaluation metrics
|
||||
print("train xgboost using xgb.train with watchlist, watch logloss and error")
|
||||
bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nrounds=2, watchlist=watchlist,
|
||||
bst <- xgb.train(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, watchlist = watchlist,
|
||||
eval_metric = "error", eval_metric = "logloss",
|
||||
nthread = 2, objective = "binary:logistic")
|
||||
|
||||
@ -90,16 +90,16 @@ bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nrounds=2, watchlist=watchlist
|
||||
xgb.DMatrix.save(dtrain, "dtrain.buffer")
|
||||
# to load it in, simply call xgb.DMatrix
|
||||
dtrain2 <- xgb.DMatrix("dtrain.buffer")
|
||||
bst <- xgb.train(data=dtrain2, max_depth=2, eta=1, nrounds=2, watchlist=watchlist,
|
||||
bst <- xgb.train(data = dtrain2, max_depth = 2, eta = 1, nrounds = 2, watchlist = watchlist,
|
||||
nthread = 2, objective = "binary:logistic")
|
||||
# information can be extracted from xgb.DMatrix using getinfo
|
||||
label = getinfo(dtest, "label")
|
||||
label <- getinfo(dtest, "label")
|
||||
pred <- predict(bst, dtest)
|
||||
err <- as.numeric(sum(as.integer(pred > 0.5) != label))/length(label)
|
||||
err <- as.numeric(sum(as.integer(pred > 0.5) != label)) / length(label)
|
||||
print(paste("test-error=", err))
|
||||
|
||||
# You can dump the tree you learned using xgb.dump into a text file
|
||||
dump_path = file.path(tempdir(), 'dump.raw.txt')
|
||||
dump_path <- file.path(tempdir(), 'dump.raw.txt')
|
||||
xgb.dump(bst, dump_path, with_stats = TRUE)
|
||||
|
||||
# Finally, you can check which features are the most important.
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
@ -11,12 +11,12 @@ watchlist <- list(eval = dtest, train = dtrain)
|
||||
#
|
||||
print('start running example to start from a initial prediction')
|
||||
# train xgboost for 1 round
|
||||
param <- list(max_depth=2, eta=1, nthread = 2, objective='binary:logistic')
|
||||
param <- list(max_depth = 2, eta = 1, nthread = 2, objective = 'binary:logistic')
|
||||
bst <- xgb.train(param, dtrain, 1, watchlist)
|
||||
# Note: we need the margin value instead of transformed prediction in set_base_margin
|
||||
# do predict with output_margin=TRUE, will always give you margin values before logistic transformation
|
||||
ptrain <- predict(bst, dtrain, outputmargin=TRUE)
|
||||
ptest <- predict(bst, dtest, outputmargin=TRUE)
|
||||
ptrain <- predict(bst, dtrain, outputmargin = TRUE)
|
||||
ptest <- predict(bst, dtest, outputmargin = TRUE)
|
||||
# set the base_margin property of dtrain and dtest
|
||||
# base margin is the base prediction we will boost from
|
||||
setinfo(dtrain, "base_margin", ptrain)
|
||||
|
||||
@ -13,13 +13,13 @@ df <- data.table(Arthritis, keep.rownames = FALSE)
|
||||
|
||||
# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features.
|
||||
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independant values.
|
||||
df[,AgeDiscret:= as.factor(round(Age/10,0))]
|
||||
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
|
||||
|
||||
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!).
|
||||
df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))]
|
||||
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
|
||||
|
||||
# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small).
|
||||
df[,ID:=NULL]
|
||||
df[, ID := NULL]
|
||||
|
||||
#-------------Basic Training using XGBoost in caret Library-----------------
|
||||
# Set up control parameters for caret::train
|
||||
|
||||
@ -32,17 +32,17 @@ str(df)
|
||||
# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features.
|
||||
|
||||
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independant values.
|
||||
df[,AgeDiscret:= as.factor(round(Age/10,0))]
|
||||
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
|
||||
|
||||
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!).
|
||||
df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))]
|
||||
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
|
||||
|
||||
# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small).
|
||||
df[,ID:=NULL]
|
||||
df[, ID := NULL]
|
||||
|
||||
# List the different values for the column Treatment: Placebo, Treated.
|
||||
cat("Values of the categorical feature Treatment\n")
|
||||
print(levels(df[,Treatment]))
|
||||
print(levels(df[, Treatment]))
|
||||
|
||||
# Next step, we will transform the categorical data to dummy variables.
|
||||
# This method is also called one hot encoding.
|
||||
@ -52,7 +52,7 @@ print(levels(df[,Treatment]))
|
||||
#
|
||||
# Formulae Improved~.-1 used below means transform all categorical features but column Improved to binary values.
|
||||
# Column Improved is excluded because it will be our output column, the one we want to predict.
|
||||
sparse_matrix = sparse.model.matrix(Improved~.-1, data = df)
|
||||
sparse_matrix <- sparse.model.matrix(Improved ~ . - 1, data = df)
|
||||
|
||||
cat("Encoding of the sparse Matrix\n")
|
||||
print(sparse_matrix)
|
||||
@ -61,7 +61,7 @@ print(sparse_matrix)
|
||||
# 1. Set, for all rows, field in Y column to 0;
|
||||
# 2. set Y to 1 when Improved == Marked;
|
||||
# 3. Return Y column
|
||||
output_vector = df[,Y:=0][Improved == "Marked",Y:=1][,Y]
|
||||
output_vector <- df[, Y := 0][Improved == "Marked", Y := 1][, Y]
|
||||
|
||||
# Following is the same process as other demo
|
||||
cat("Learning...\n")
|
||||
|
||||
@ -1,25 +1,25 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
nrounds <- 2
|
||||
param <- list(max_depth=2, eta=1, nthread=2, objective='binary:logistic')
|
||||
param <- list(max_depth = 2, eta = 1, nthread = 2, objective = 'binary:logistic')
|
||||
|
||||
cat('running cross validation\n')
|
||||
# do cross validation, this will print result out as
|
||||
# [iteration] metric_name:mean_value+std_value
|
||||
# std_value is standard deviation of the metric
|
||||
xgb.cv(param, dtrain, nrounds, nfold=5, metrics={'error'})
|
||||
xgb.cv(param, dtrain, nrounds, nfold = 5, metrics = {'error'})
|
||||
|
||||
cat('running cross validation, disable standard deviation display\n')
|
||||
# do cross validation, this will print result out as
|
||||
# [iteration] metric_name:mean_value+std_value
|
||||
# std_value is standard deviation of the metric
|
||||
xgb.cv(param, dtrain, nrounds, nfold=5,
|
||||
metrics='error', showsd = FALSE)
|
||||
xgb.cv(param, dtrain, nrounds, nfold = 5,
|
||||
metrics = 'error', showsd = FALSE)
|
||||
|
||||
###
|
||||
# you can also do cross validation with cutomized loss function
|
||||
@ -29,18 +29,18 @@ print ('running cross validation, with cutomsized loss function')
|
||||
|
||||
logregobj <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
preds <- 1/(1 + exp(-preds))
|
||||
preds <- 1 / (1 + exp(-preds))
|
||||
grad <- preds - labels
|
||||
hess <- preds * (1 - preds)
|
||||
return(list(grad = grad, hess = hess))
|
||||
}
|
||||
evalerror <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
|
||||
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
|
||||
return(list(metric = "error", value = err))
|
||||
}
|
||||
|
||||
param <- list(max_depth=2, eta=1,
|
||||
param <- list(max_depth = 2, eta = 1,
|
||||
objective = logregobj, eval_metric = evalerror)
|
||||
# train with customized objective
|
||||
xgb.cv(params = param, data = dtrain, nrounds = nrounds, nfold = 5)
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
@ -15,7 +15,7 @@ num_round <- 2
|
||||
# this is loglikelihood loss
|
||||
logregobj <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
preds <- 1/(1 + exp(-preds))
|
||||
preds <- 1 / (1 + exp(-preds))
|
||||
grad <- preds - labels
|
||||
hess <- preds * (1 - preds)
|
||||
return(list(grad = grad, hess = hess))
|
||||
@ -29,12 +29,12 @@ logregobj <- function(preds, dtrain) {
|
||||
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
|
||||
evalerror <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
|
||||
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
|
||||
return(list(metric = "error", value = err))
|
||||
}
|
||||
|
||||
param <- list(max_depth=2, eta=1, nthread = 2, verbosity=0,
|
||||
objective=logregobj, eval_metric=evalerror)
|
||||
param <- list(max_depth = 2, eta = 1, nthread = 2, verbosity = 0,
|
||||
objective = logregobj, eval_metric = evalerror)
|
||||
print ('start training with user customized objective')
|
||||
# training with customized objective, we can also do step by step training
|
||||
# simply look at xgboost.py's implementation of train
|
||||
@ -52,13 +52,13 @@ attr(dtrain, 'label') <- getinfo(dtrain, 'label')
|
||||
logregobjattr <- function(preds, dtrain) {
|
||||
# now you can access the attribute in customized function
|
||||
labels <- attr(dtrain, 'label')
|
||||
preds <- 1/(1 + exp(-preds))
|
||||
preds <- 1 / (1 + exp(-preds))
|
||||
grad <- preds - labels
|
||||
hess <- preds * (1 - preds)
|
||||
return(list(grad = grad, hess = hess))
|
||||
}
|
||||
param <- list(max_depth=2, eta=1, nthread = 2, verbosity=0,
|
||||
objective=logregobjattr, eval_metric=evalerror)
|
||||
param <- list(max_depth = 2, eta = 1, nthread = 2, verbosity = 0,
|
||||
objective = logregobjattr, eval_metric = evalerror)
|
||||
print ('start training with user customized objective, with additional attributes in DMatrix')
|
||||
# training with customized objective, we can also do step by step training
|
||||
# simply look at xgboost.py's implementation of train
|
||||
|
||||
@ -1,20 +1,20 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
# note: for customized objective function, we leave objective as default
|
||||
# note: what we are getting is margin value in prediction
|
||||
# you must know what you are doing
|
||||
param <- list(max_depth=2, eta=1, nthread=2, verbosity=0)
|
||||
param <- list(max_depth = 2, eta = 1, nthread = 2, verbosity = 0)
|
||||
watchlist <- list(eval = dtest)
|
||||
num_round <- 20
|
||||
# user define objective function, given prediction, return gradient and second order gradient
|
||||
# this is loglikelihood loss
|
||||
logregobj <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
preds <- 1/(1 + exp(-preds))
|
||||
preds <- 1 / (1 + exp(-preds))
|
||||
grad <- preds - labels
|
||||
hess <- preds * (1 - preds)
|
||||
return(list(grad = grad, hess = hess))
|
||||
@ -27,7 +27,7 @@ logregobj <- function(preds, dtrain) {
|
||||
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
|
||||
evalerror <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
|
||||
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
|
||||
return(list(metric = "error", value = err))
|
||||
}
|
||||
print ('start training with early Stopping setting')
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
##
|
||||
@ -30,5 +30,4 @@ num_round <- 2
|
||||
bst <- xgb.train(param, dtrain, num_round, watchlist)
|
||||
ypred <- predict(bst, dtest)
|
||||
labels <- getinfo(dtest, 'label')
|
||||
cat('error of preds=', mean(as.numeric(ypred>0.5)!=labels),'\n')
|
||||
|
||||
cat('error of preds=', mean(as.numeric(ypred > 0.5) != labels), '\n')
|
||||
|
||||
@ -21,8 +21,8 @@ m <- X[, sel] %*% betas - 1 + rnorm(N)
|
||||
y <- rbinom(N, 1, plogis(m))
|
||||
|
||||
tr <- sample.int(N, N * 0.75)
|
||||
dtrain <- xgb.DMatrix(X[tr,], label = y[tr])
|
||||
dtest <- xgb.DMatrix(X[-tr,], label = y[-tr])
|
||||
dtrain <- xgb.DMatrix(X[tr, ], label = y[tr])
|
||||
dtest <- xgb.DMatrix(X[-tr, ], label = y[-tr])
|
||||
wl <- list(train = dtrain, test = dtest)
|
||||
|
||||
# An example of running 'gpu_hist' algorithm
|
||||
|
||||
@ -4,33 +4,38 @@ library(data.table)
|
||||
set.seed(1024)
|
||||
|
||||
# Function to obtain a list of interactions fitted in trees, requires input of maximum depth
|
||||
treeInteractions <- function(input_tree, input_max_depth){
|
||||
trees <- copy(input_tree) # copy tree input to prevent overwriting
|
||||
treeInteractions <- function(input_tree, input_max_depth) {
|
||||
ID_merge <- i.id <- i.feature <- NULL # Suppress warning "no visible binding for global variable"
|
||||
|
||||
trees <- data.table::copy(input_tree) # copy tree input to prevent overwriting
|
||||
if (input_max_depth < 2) return(list()) # no interactions if max depth < 2
|
||||
if (nrow(input_tree) == 1) return(list())
|
||||
|
||||
# Attach parent nodes
|
||||
for (i in 2:input_max_depth){
|
||||
if (i == 2) trees[, ID_merge:=ID] else trees[, ID_merge:=get(paste0('parent_',i-2))]
|
||||
parents_left <- trees[!is.na(Split), list(i.id=ID, i.feature=Feature, ID_merge=Yes)]
|
||||
parents_right <- trees[!is.na(Split), list(i.id=ID, i.feature=Feature, ID_merge=No)]
|
||||
for (i in 2:input_max_depth) {
|
||||
if (i == 2) trees[, ID_merge := ID] else trees[, ID_merge := get(paste0('parent_', i - 2))]
|
||||
parents_left <- trees[!is.na(Split), list(i.id = ID, i.feature = Feature, ID_merge = Yes)]
|
||||
parents_right <- trees[!is.na(Split), list(i.id = ID, i.feature = Feature, ID_merge = No)]
|
||||
|
||||
setorderv(trees, 'ID_merge')
|
||||
setorderv(parents_left, 'ID_merge')
|
||||
setorderv(parents_right, 'ID_merge')
|
||||
data.table::setorderv(trees, 'ID_merge')
|
||||
data.table::setorderv(parents_left, 'ID_merge')
|
||||
data.table::setorderv(parents_right, 'ID_merge')
|
||||
|
||||
trees <- merge(trees, parents_left, by='ID_merge', all.x=TRUE)
|
||||
trees[!is.na(i.id), c(paste0('parent_', i-1), paste0('parent_feat_', i-1)):=list(i.id, i.feature)]
|
||||
trees[, c('i.id','i.feature'):=NULL]
|
||||
trees <- merge(trees, parents_left, by = 'ID_merge', all.x = TRUE)
|
||||
trees[!is.na(i.id), c(paste0('parent_', i - 1), paste0('parent_feat_', i - 1))
|
||||
:= list(i.id, i.feature)]
|
||||
trees[, c('i.id', 'i.feature') := NULL]
|
||||
|
||||
trees <- merge(trees, parents_right, by='ID_merge', all.x=TRUE)
|
||||
trees[!is.na(i.id), c(paste0('parent_', i-1), paste0('parent_feat_', i-1)):=list(i.id, i.feature)]
|
||||
trees[, c('i.id','i.feature'):=NULL]
|
||||
trees <- merge(trees, parents_right, by = 'ID_merge', all.x = TRUE)
|
||||
trees[!is.na(i.id), c(paste0('parent_', i - 1), paste0('parent_feat_', i - 1))
|
||||
:= list(i.id, i.feature)]
|
||||
trees[, c('i.id', 'i.feature') := NULL]
|
||||
}
|
||||
|
||||
# Extract nodes with interactions
|
||||
interaction_trees <- trees[!is.na(Split) & !is.na(parent_1),
|
||||
c('Feature',paste0('parent_feat_',1:(input_max_depth-1))), with=FALSE]
|
||||
c('Feature', paste0('parent_feat_', 1:(input_max_depth - 1))),
|
||||
with = FALSE]
|
||||
interaction_trees_split <- split(interaction_trees, 1:nrow(interaction_trees))
|
||||
interaction_list <- lapply(interaction_trees_split, as.character)
|
||||
|
||||
@ -47,56 +52,59 @@ treeInteractions <- function(input_tree, input_max_depth){
|
||||
|
||||
# Generate sample data
|
||||
x <- list()
|
||||
for (i in 1:10){
|
||||
x[[i]] = i*rnorm(1000, 10)
|
||||
for (i in 1:10) {
|
||||
x[[i]] <- i * rnorm(1000, 10)
|
||||
}
|
||||
x <- as.data.table(x)
|
||||
|
||||
y = -1*x[, rowSums(.SD)] + x[['V1']]*x[['V2']] + x[['V3']]*x[['V4']]*x[['V5']] + rnorm(1000, 0.001) + 3*sin(x[['V7']])
|
||||
y <- -1 * x[, rowSums(.SD)] + x[['V1']] * x[['V2']] + x[['V3']] * x[['V4']] * x[['V5']]
|
||||
+ rnorm(1000, 0.001) + 3 * sin(x[['V7']])
|
||||
|
||||
train = as.matrix(x)
|
||||
train <- as.matrix(x)
|
||||
|
||||
# Interaction constraint list (column names form)
|
||||
interaction_list <- list(c('V1','V2'),c('V3','V4','V5'))
|
||||
interaction_list <- list(c('V1', 'V2'), c('V3', 'V4', 'V5'))
|
||||
|
||||
# Convert interaction constraint list into feature index form
|
||||
cols2ids <- function(object, col_names) {
|
||||
LUT <- seq_along(col_names) - 1
|
||||
names(LUT) <- col_names
|
||||
rapply(object, function(x) LUT[x], classes="character", how="replace")
|
||||
rapply(object, function(x) LUT[x], classes = "character", how = "replace")
|
||||
}
|
||||
interaction_list_fid = cols2ids(interaction_list, colnames(train))
|
||||
interaction_list_fid <- cols2ids(interaction_list, colnames(train))
|
||||
|
||||
# Fit model with interaction constraints
|
||||
bst = xgboost(data = train, label = y, max_depth = 4,
|
||||
bst <- xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000,
|
||||
interaction_constraints = interaction_list_fid)
|
||||
|
||||
bst_tree <- xgb.model.dt.tree(colnames(train), bst)
|
||||
bst_interactions <- treeInteractions(bst_tree, 4) # interactions constrained to combinations of V1*V2 and V3*V4*V5
|
||||
bst_interactions <- treeInteractions(bst_tree, 4)
|
||||
# interactions constrained to combinations of V1*V2 and V3*V4*V5
|
||||
|
||||
# Fit model without interaction constraints
|
||||
bst2 = xgboost(data = train, label = y, max_depth = 4,
|
||||
bst2 <- xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000)
|
||||
|
||||
bst2_tree <- xgb.model.dt.tree(colnames(train), bst2)
|
||||
bst2_interactions <- treeInteractions(bst2_tree, 4) # much more interactions
|
||||
|
||||
# Fit model with both interaction and monotonicity constraints
|
||||
bst3 = xgboost(data = train, label = y, max_depth = 4,
|
||||
bst3 <- xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000,
|
||||
interaction_constraints = interaction_list_fid,
|
||||
monotone_constraints = c(-1,0,0,0,0,0,0,0,0,0))
|
||||
monotone_constraints = c(-1, 0, 0, 0, 0, 0, 0, 0, 0, 0))
|
||||
|
||||
bst3_tree <- xgb.model.dt.tree(colnames(train), bst3)
|
||||
bst3_interactions <- treeInteractions(bst3_tree, 4) # interactions still constrained to combinations of V1*V2 and V3*V4*V5
|
||||
bst3_interactions <- treeInteractions(bst3_tree, 4)
|
||||
# interactions still constrained to combinations of V1*V2 and V3*V4*V5
|
||||
|
||||
# Show monotonic constraints still apply by checking scores after incrementing V1
|
||||
x1 <- sort(unique(x[['V1']]))
|
||||
for (i in 1:length(x1)){
|
||||
testdata <- copy(x[, -c('V1')])
|
||||
testdata[['V1']] <- x1[i]
|
||||
testdata <- testdata[, paste0('V',1:10), with=FALSE]
|
||||
testdata <- testdata[, paste0('V', 1:10), with = FALSE]
|
||||
pred <- predict(bst3, as.matrix(testdata))
|
||||
|
||||
# Should not print out anything due to monotonic constraints
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
data(mtcars)
|
||||
head(mtcars)
|
||||
bst = xgboost(data=as.matrix(mtcars[,-11]),label=mtcars[,11],
|
||||
objective='count:poisson',nrounds=5)
|
||||
pred = predict(bst,as.matrix(mtcars[,-11]))
|
||||
sqrt(mean((pred-mtcars[,11])^2))
|
||||
|
||||
bst <- xgboost(data = as.matrix(mtcars[, -11]), label = mtcars[, 11],
|
||||
objective = 'count:poisson', nrounds = 5)
|
||||
pred <- predict(bst, as.matrix(mtcars[, -11]))
|
||||
sqrt(mean((pred - mtcars[, 11]) ^ 2))
|
||||
|
||||
@ -1,23 +1,23 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
param <- list(max_depth=2, eta=1, objective='binary:logistic')
|
||||
param <- list(max_depth = 2, eta = 1, objective = 'binary:logistic')
|
||||
watchlist <- list(eval = dtest, train = dtrain)
|
||||
nrounds = 2
|
||||
nrounds <- 2
|
||||
|
||||
# training the model for two rounds
|
||||
bst = xgb.train(param, dtrain, nrounds, nthread = 2, watchlist)
|
||||
bst <- xgb.train(param, dtrain, nrounds, nthread = 2, watchlist)
|
||||
cat('start testing prediction from first n trees\n')
|
||||
labels <- getinfo(dtest,'label')
|
||||
labels <- getinfo(dtest, 'label')
|
||||
|
||||
### predict using first 1 tree
|
||||
ypred1 = predict(bst, dtest, ntreelimit=1)
|
||||
ypred1 <- predict(bst, dtest, ntreelimit = 1)
|
||||
# by default, we predict using all the trees
|
||||
ypred2 = predict(bst, dtest)
|
||||
ypred2 <- predict(bst, dtest)
|
||||
|
||||
cat('error of ypred1=', mean(as.numeric(ypred1>0.5)!=labels),'\n')
|
||||
cat('error of ypred2=', mean(as.numeric(ypred2>0.5)!=labels),'\n')
|
||||
cat('error of ypred1=', mean(as.numeric(ypred1 > 0.5) != labels), '\n')
|
||||
cat('error of ypred2=', mean(as.numeric(ypred2 > 0.5) != labels), '\n')
|
||||
|
||||
@ -5,34 +5,34 @@ require(Matrix)
|
||||
set.seed(1982)
|
||||
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
param <- list(max_depth=2, eta=1, objective='binary:logistic')
|
||||
nrounds = 4
|
||||
param <- list(max_depth = 2, eta = 1, objective = 'binary:logistic')
|
||||
nrounds <- 4
|
||||
|
||||
# training the model for two rounds
|
||||
bst = xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
|
||||
bst <- xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
|
||||
|
||||
# Model accuracy without new features
|
||||
accuracy.before <- sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label) / length(agaricus.test$label)
|
||||
accuracy.before <- (sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label)
|
||||
/ length(agaricus.test$label))
|
||||
|
||||
# by default, we predict using all the trees
|
||||
|
||||
pred_with_leaf = predict(bst, dtest, predleaf = TRUE)
|
||||
pred_with_leaf <- predict(bst, dtest, predleaf = TRUE)
|
||||
head(pred_with_leaf)
|
||||
|
||||
create.new.tree.features <- function(model, original.features){
|
||||
pred_with_leaf <- predict(model, original.features, predleaf = TRUE)
|
||||
cols <- list()
|
||||
for(i in 1:model$niter){
|
||||
for (i in 1:model$niter) {
|
||||
# max is not the real max but it s not important for the purpose of adding features
|
||||
leaf.id <- sort(unique(pred_with_leaf[,i]))
|
||||
cols[[i]] <- factor(x = pred_with_leaf[,i], level = leaf.id)
|
||||
leaf.id <- sort(unique(pred_with_leaf[, i]))
|
||||
cols[[i]] <- factor(x = pred_with_leaf[, i], level = leaf.id)
|
||||
}
|
||||
cbind(original.features, sparse.model.matrix( ~ . -1, as.data.frame(cols)))
|
||||
cbind(original.features, sparse.model.matrix(~ . - 1, as.data.frame(cols)))
|
||||
}
|
||||
|
||||
# Convert previous features to one hot encoding
|
||||
@ -47,7 +47,9 @@ watchlist <- list(train = new.dtrain)
|
||||
bst <- xgb.train(params = param, data = new.dtrain, nrounds = nrounds, nthread = 2)
|
||||
|
||||
# Model accuracy with new features
|
||||
accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) / length(agaricus.test$label)
|
||||
accuracy.after <- (sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label)
|
||||
/ length(agaricus.test$label))
|
||||
|
||||
# Here the accuracy was already good and is now perfect.
|
||||
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now", accuracy.after, "!\n"))
|
||||
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now",
|
||||
accuracy.after, "!\n"))
|
||||
|
||||
@ -1,14 +1,14 @@
|
||||
# running all scripts in demo folder
|
||||
demo(basic_walkthrough)
|
||||
demo(custom_objective)
|
||||
demo(boost_from_prediction)
|
||||
demo(predict_first_ntree)
|
||||
demo(generalized_linear_model)
|
||||
demo(cross_validation)
|
||||
demo(create_sparse_matrix)
|
||||
demo(predict_leaf_indices)
|
||||
demo(early_stopping)
|
||||
demo(poisson_regression)
|
||||
demo(caret_wrapper)
|
||||
demo(tweedie_regression)
|
||||
#demo(gpu_accelerated) # can only run when built with GPU support
|
||||
demo(basic_walkthrough, package = 'xgboost')
|
||||
demo(custom_objective, package = 'xgboost')
|
||||
demo(boost_from_prediction, package = 'xgboost')
|
||||
demo(predict_first_ntree, package = 'xgboost')
|
||||
demo(generalized_linear_model, package = 'xgboost')
|
||||
demo(cross_validation, package = 'xgboost')
|
||||
demo(create_sparse_matrix, package = 'xgboost')
|
||||
demo(predict_leaf_indices, package = 'xgboost')
|
||||
demo(early_stopping, package = 'xgboost')
|
||||
demo(poisson_regression, package = 'xgboost')
|
||||
demo(caret_wrapper, package = 'xgboost')
|
||||
demo(tweedie_regression, package = 'xgboost')
|
||||
#demo(gpu_accelerated, package = 'xgboost') # can only run when built with GPU support
|
||||
|
||||
2
R-package/demo/tweedie_regression.R
Executable file → Normal file
2
R-package/demo/tweedie_regression.R
Executable file → Normal file
@ -46,4 +46,4 @@ var_imp <- xgb.importance(attr(x, 'Dimnames')[[2]], model = bst)
|
||||
|
||||
preds <- predict(bst, d_train)
|
||||
|
||||
rmse <- sqrt(sum(mean((y - preds)^2)))
|
||||
rmse <- sqrt(sum(mean((y - preds) ^ 2)))
|
||||
|
||||
71
R-package/tests/run_lint.R
Normal file
71
R-package/tests/run_lint.R
Normal file
@ -0,0 +1,71 @@
|
||||
library(lintr)
|
||||
library(crayon)
|
||||
|
||||
my_linters <- list(
|
||||
absolute_path_linter = lintr::absolute_path_linter,
|
||||
assignment_linter = lintr::assignment_linter,
|
||||
closed_curly_linter = lintr::closed_curly_linter,
|
||||
commas_linter = lintr::commas_linter,
|
||||
# commented_code_linter = lintr::commented_code_linter,
|
||||
infix_spaces_linter = lintr::infix_spaces_linter,
|
||||
line_length_linter = lintr::line_length_linter,
|
||||
no_tab_linter = lintr::no_tab_linter,
|
||||
object_usage_linter = lintr::object_usage_linter,
|
||||
# snake_case_linter = lintr::snake_case_linter,
|
||||
# multiple_dots_linter = lintr::multiple_dots_linter,
|
||||
object_length_linter = lintr::object_length_linter,
|
||||
open_curly_linter = lintr::open_curly_linter,
|
||||
# single_quotes_linter = lintr::single_quotes_linter,
|
||||
spaces_inside_linter = lintr::spaces_inside_linter,
|
||||
spaces_left_parentheses_linter = lintr::spaces_left_parentheses_linter,
|
||||
trailing_blank_lines_linter = lintr::trailing_blank_lines_linter,
|
||||
trailing_whitespace_linter = lintr::trailing_whitespace_linter,
|
||||
true_false = lintr::T_and_F_symbol_linter
|
||||
)
|
||||
|
||||
results <- lapply(
|
||||
list.files(path = '.', pattern = '\\.[Rr]$', recursive = TRUE),
|
||||
function (r_file) {
|
||||
cat(sprintf("Processing %s ...\n", r_file))
|
||||
list(r_file = r_file,
|
||||
output = lintr::lint(filename = r_file, linters = my_linters))
|
||||
})
|
||||
num_issue <- Reduce(sum, lapply(results, function (e) length(e$output)))
|
||||
|
||||
lint2str <- function(lint_entry) {
|
||||
color <- function(type) {
|
||||
switch(type,
|
||||
"warning" = crayon::magenta,
|
||||
"error" = crayon::red,
|
||||
"style" = crayon::blue,
|
||||
crayon::bold
|
||||
)
|
||||
}
|
||||
|
||||
paste0(
|
||||
lapply(lint_entry$output,
|
||||
function (lint_line) {
|
||||
paste0(
|
||||
crayon::bold(lint_entry$r_file, ":",
|
||||
as.character(lint_line$line_number), ":",
|
||||
as.character(lint_line$column_number), ": ", sep = ""),
|
||||
color(lint_line$type)(lint_line$type, ": ", sep = ""),
|
||||
crayon::bold(lint_line$message), "\n",
|
||||
lint_line$line, "\n",
|
||||
lintr:::highlight_string(lint_line$message, lint_line$column_number, lint_line$ranges),
|
||||
"\n",
|
||||
collapse = "")
|
||||
}),
|
||||
collapse = "")
|
||||
}
|
||||
|
||||
if (num_issue > 0) {
|
||||
cat(sprintf('R linters found %d issues:\n', num_issue))
|
||||
for (entry in results) {
|
||||
if (length(entry$output)) {
|
||||
cat(paste0('**** ', crayon::bold(entry$r_file), '\n'))
|
||||
cat(paste0(lint2str(entry), collapse = ''))
|
||||
}
|
||||
}
|
||||
quit(save = 'no', status = 1) # Signal error to parent shell
|
||||
}
|
||||
@ -1,26 +0,0 @@
|
||||
context("Code is of high quality and lint free")
|
||||
test_that("Code Lint", {
|
||||
skip_on_cran()
|
||||
my_linters <- list(
|
||||
absolute_path_linter = lintr::absolute_path_linter,
|
||||
assignment_linter = lintr::assignment_linter,
|
||||
closed_curly_linter = lintr::closed_curly_linter,
|
||||
commas_linter = lintr::commas_linter,
|
||||
# commented_code_linter = lintr::commented_code_linter,
|
||||
infix_spaces_linter = lintr::infix_spaces_linter,
|
||||
line_length_linter = lintr::line_length_linter,
|
||||
no_tab_linter = lintr::no_tab_linter,
|
||||
object_usage_linter = lintr::object_usage_linter,
|
||||
# snake_case_linter = lintr::snake_case_linter,
|
||||
# multiple_dots_linter = lintr::multiple_dots_linter,
|
||||
object_length_linter = lintr::object_length_linter,
|
||||
open_curly_linter = lintr::open_curly_linter,
|
||||
# single_quotes_linter = lintr::single_quotes_linter,
|
||||
spaces_inside_linter = lintr::spaces_inside_linter,
|
||||
spaces_left_parentheses_linter = lintr::spaces_left_parentheses_linter,
|
||||
trailing_blank_lines_linter = lintr::trailing_blank_lines_linter,
|
||||
trailing_whitespace_linter = lintr::trailing_whitespace_linter,
|
||||
true_false = lintr::T_and_F_symbol_linter
|
||||
)
|
||||
lintr::expect_lint_free(linters = my_linters) # uncomment this if you want to check code quality
|
||||
})
|
||||
@ -7,8 +7,8 @@ context("Models from previous versions of XGBoost can be loaded")
|
||||
metadata <- model_generator_metadata()
|
||||
|
||||
run_model_param_check <- function (config) {
|
||||
expect_equal(config$learner$learner_model_param$num_feature, '4')
|
||||
expect_equal(config$learner$learner_train_param$booster, 'gbtree')
|
||||
testthat::expect_equal(config$learner$learner_model_param$num_feature, '4')
|
||||
testthat::expect_equal(config$learner$learner_train_param$booster, 'gbtree')
|
||||
}
|
||||
|
||||
get_num_tree <- function (booster) {
|
||||
@ -27,22 +27,24 @@ run_booster_check <- function (booster, name) {
|
||||
config <- jsonlite::fromJSON(xgb.config(booster))
|
||||
run_model_param_check(config)
|
||||
if (name == 'cls') {
|
||||
expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds * metadata$kClasses)
|
||||
expect_equal(as.numeric(config$learner$learner_model_param$base_score), 0.5)
|
||||
expect_equal(config$learner$learner_train_param$objective, 'multi:softmax')
|
||||
expect_equal(as.numeric(config$learner$learner_model_param$num_class), metadata$kClasses)
|
||||
testthat::expect_equal(get_num_tree(booster),
|
||||
metadata$kForests * metadata$kRounds * metadata$kClasses)
|
||||
testthat::expect_equal(as.numeric(config$learner$learner_model_param$base_score), 0.5)
|
||||
testthat::expect_equal(config$learner$learner_train_param$objective, 'multi:softmax')
|
||||
testthat::expect_equal(as.numeric(config$learner$learner_model_param$num_class),
|
||||
metadata$kClasses)
|
||||
} else if (name == 'logit') {
|
||||
expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
|
||||
expect_equal(as.numeric(config$learner$learner_model_param$num_class), 0)
|
||||
expect_equal(config$learner$learner_train_param$objective, 'binary:logistic')
|
||||
testthat::expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
|
||||
testthat::expect_equal(as.numeric(config$learner$learner_model_param$num_class), 0)
|
||||
testthat::expect_equal(config$learner$learner_train_param$objective, 'binary:logistic')
|
||||
} else if (name == 'ltr') {
|
||||
expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
|
||||
expect_equal(config$learner$learner_train_param$objective, 'rank:ndcg')
|
||||
testthat::expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
|
||||
testthat::expect_equal(config$learner$learner_train_param$objective, 'rank:ndcg')
|
||||
} else {
|
||||
expect_equal(name, 'reg')
|
||||
expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
|
||||
expect_equal(as.numeric(config$learner$learner_model_param$base_score), 0.5)
|
||||
expect_equal(config$learner$learner_train_param$objective, 'reg:squarederror')
|
||||
testthat::expect_equal(name, 'reg')
|
||||
testthat::expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
|
||||
testthat::expect_equal(as.numeric(config$learner$learner_model_param$base_score), 0.5)
|
||||
testthat::expect_equal(config$learner$learner_train_param$objective, 'reg:squarederror')
|
||||
}
|
||||
}
|
||||
|
||||
@ -73,5 +75,4 @@ test_that("Models from previous versions of XGBoost can be loaded", {
|
||||
predict(booster, newdata = pred_data)
|
||||
run_booster_check(booster, name)
|
||||
})
|
||||
expect_true(TRUE)
|
||||
})
|
||||
|
||||
@ -46,6 +46,10 @@ def test_with_autotools(args):
|
||||
'R.exe', '-q', '-e',
|
||||
"library(testthat); setwd('tests'); source('testthat.R')"
|
||||
])
|
||||
subprocess.check_call([
|
||||
'R.exe', '-q', '-e',
|
||||
"demo(runall, package = 'xgboost')"
|
||||
])
|
||||
|
||||
|
||||
def test_with_cmake(args):
|
||||
@ -79,6 +83,10 @@ def test_with_cmake(args):
|
||||
'R.exe', '-q', '-e',
|
||||
"library(testthat); setwd('tests'); source('testthat.R')"
|
||||
])
|
||||
subprocess.check_call([
|
||||
'R.exe', '-q', '-e',
|
||||
"demo(runall, package = 'xgboost')"
|
||||
])
|
||||
|
||||
|
||||
def main(args):
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user