[CI] Improve R linter script (#5944)

* [CI] Move lint to a separate script

* [CI] Improved lintr launcher

* Add lintr as a separate action

* Add custom parsing logic to print out logs

* Fix lintr issues in demos

* Run R demos

* Fix CRAN checks

* Install XGBoost into R env before running lintr

* Install devtools (needed to run demos)
This commit is contained in:
Philip Hyunsu Cho 2020-07-27 00:55:35 -07:00 committed by GitHub
parent 8943eb4314
commit 5879acde9a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 317 additions and 207 deletions

View File

@ -6,6 +6,9 @@ name: XGBoost-CI
# events but only for the master branch
on: [push, pull_request]
env:
R_PACKAGES: c('XML', 'igraph', 'data.table', 'magrittr', 'stringi', 'ggplot2', 'DiagrammeR', 'Ckmeans.1d.dp', 'vcd', 'testthat', 'lintr', 'knitr', 'rmarkdown', 'e1071', 'cplm', 'devtools')
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
test-with-jvm:
@ -38,6 +41,49 @@ jobs:
mvn test -pl :xgboost4j_2.12
lintr:
runs-on: ${{ matrix.config.os }}
name: Run R linters on OS ${{ matrix.config.os }}, R ${{ matrix.config.r }}, Compiler ${{ matrix.config.compiler }}, Build ${{ matrix.config.build }}
strategy:
matrix:
config:
- {os: windows-latest, r: 'release', compiler: 'mingw', build: 'autotools'}
env:
R_REMOTES_NO_ERRORS_FROM_WARNINGS: true
RSPM: ${{ matrix.config.rspm }}
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: r-lib/actions/setup-r@master
with:
r-version: ${{ matrix.config.r }}
- name: Cache R packages
uses: actions/cache@v2
with:
path: ${{ env.R_LIBS_USER }}
key: ${{ runner.os }}-r-${{ matrix.config.r }}-1-${{ hashFiles('R-package/DESCRIPTION') }}
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-2-
- name: Install dependencies
shell: Rscript {0}
run: |
install.packages(${{ env.R_PACKAGES }},
repos = 'http://cloud.r-project.org',
dependencies = c('Depends', 'Imports', 'LinkingTo'))
- name: Run lintr
run: |
cd R-package
R.exe CMD INSTALL .
Rscript.exe tests/run_lint.R
test-with-R:
runs-on: ${{ matrix.config.os }}
@ -78,8 +124,9 @@ jobs:
- name: Install dependencies
shell: Rscript {0}
run: |
install.packages(c('XML','igraph'))
install.packages(c('data.table','magrittr','stringi','ggplot2','DiagrammeR','Ckmeans.1d.dp','vcd','testthat','lintr','knitr','rmarkdown'))
install.packages(${{ env.R_PACKAGES }},
repos = 'http://cloud.r-project.org',
dependencies = c('Depends', 'Imports', 'LinkingTo'))
- uses: actions/setup-python@v2
with:

View File

@ -54,7 +54,8 @@ Suggests:
lintr,
igraph (>= 1.0.1),
jsonlite,
float
float,
crayon
Depends:
R (>= 3.3.0)
Imports:

View File

@ -61,7 +61,7 @@ pred2 <- predict(bst2, test$data)
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2 - pred))))
# save model to R's raw vector
raw = xgb.save.raw(bst)
raw <- xgb.save.raw(bst)
# load binary model to R
bst3 <- xgb.load(raw)
pred3 <- predict(bst3, test$data)
@ -93,13 +93,13 @@ dtrain2 <- xgb.DMatrix("dtrain.buffer")
bst <- xgb.train(data = dtrain2, max_depth = 2, eta = 1, nrounds = 2, watchlist = watchlist,
nthread = 2, objective = "binary:logistic")
# information can be extracted from xgb.DMatrix using getinfo
label = getinfo(dtest, "label")
label <- getinfo(dtest, "label")
pred <- predict(bst, dtest)
err <- as.numeric(sum(as.integer(pred > 0.5) != label)) / length(label)
print(paste("test-error=", err))
# You can dump the tree you learned using xgb.dump into a text file
dump_path = file.path(tempdir(), 'dump.raw.txt')
dump_path <- file.path(tempdir(), 'dump.raw.txt')
xgb.dump(bst, dump_path, with_stats = TRUE)
# Finally, you can check which features are the most important.

View File

@ -52,7 +52,7 @@ print(levels(df[,Treatment]))
#
# Formulae Improved~.-1 used below means transform all categorical features but column Improved to binary values.
# Column Improved is excluded because it will be our output column, the one we want to predict.
sparse_matrix = sparse.model.matrix(Improved~.-1, data = df)
sparse_matrix <- sparse.model.matrix(Improved ~ . - 1, data = df)
cat("Encoding of the sparse Matrix\n")
print(sparse_matrix)
@ -61,7 +61,7 @@ print(sparse_matrix)
# 1. Set, for all rows, field in Y column to 0;
# 2. set Y to 1 when Improved == Marked;
# 3. Return Y column
output_vector = df[,Y:=0][Improved == "Marked",Y:=1][,Y]
output_vector <- df[, Y := 0][Improved == "Marked", Y := 1][, Y]
# Following is the same process as other demo
cat("Learning...\n")

View File

@ -31,4 +31,3 @@ bst <- xgb.train(param, dtrain, num_round, watchlist)
ypred <- predict(bst, dtest)
labels <- getinfo(dtest, 'label')
cat('error of preds=', mean(as.numeric(ypred > 0.5) != labels), '\n')

View File

@ -5,7 +5,9 @@ set.seed(1024)
# Function to obtain a list of interactions fitted in trees, requires input of maximum depth
treeInteractions <- function(input_tree, input_max_depth) {
trees <- copy(input_tree) # copy tree input to prevent overwriting
ID_merge <- i.id <- i.feature <- NULL # Suppress warning "no visible binding for global variable"
trees <- data.table::copy(input_tree) # copy tree input to prevent overwriting
if (input_max_depth < 2) return(list()) # no interactions if max depth < 2
if (nrow(input_tree) == 1) return(list())
@ -15,22 +17,25 @@ treeInteractions <- function(input_tree, input_max_depth){
parents_left <- trees[!is.na(Split), list(i.id = ID, i.feature = Feature, ID_merge = Yes)]
parents_right <- trees[!is.na(Split), list(i.id = ID, i.feature = Feature, ID_merge = No)]
setorderv(trees, 'ID_merge')
setorderv(parents_left, 'ID_merge')
setorderv(parents_right, 'ID_merge')
data.table::setorderv(trees, 'ID_merge')
data.table::setorderv(parents_left, 'ID_merge')
data.table::setorderv(parents_right, 'ID_merge')
trees <- merge(trees, parents_left, by = 'ID_merge', all.x = TRUE)
trees[!is.na(i.id), c(paste0('parent_', i-1), paste0('parent_feat_', i-1)):=list(i.id, i.feature)]
trees[!is.na(i.id), c(paste0('parent_', i - 1), paste0('parent_feat_', i - 1))
:= list(i.id, i.feature)]
trees[, c('i.id', 'i.feature') := NULL]
trees <- merge(trees, parents_right, by = 'ID_merge', all.x = TRUE)
trees[!is.na(i.id), c(paste0('parent_', i-1), paste0('parent_feat_', i-1)):=list(i.id, i.feature)]
trees[!is.na(i.id), c(paste0('parent_', i - 1), paste0('parent_feat_', i - 1))
:= list(i.id, i.feature)]
trees[, c('i.id', 'i.feature') := NULL]
}
# Extract nodes with interactions
interaction_trees <- trees[!is.na(Split) & !is.na(parent_1),
c('Feature',paste0('parent_feat_',1:(input_max_depth-1))), with=FALSE]
c('Feature', paste0('parent_feat_', 1:(input_max_depth - 1))),
with = FALSE]
interaction_trees_split <- split(interaction_trees, 1:nrow(interaction_trees))
interaction_list <- lapply(interaction_trees_split, as.character)
@ -48,13 +53,14 @@ treeInteractions <- function(input_tree, input_max_depth){
# Generate sample data
x <- list()
for (i in 1:10) {
x[[i]] = i*rnorm(1000, 10)
x[[i]] <- i * rnorm(1000, 10)
}
x <- as.data.table(x)
y = -1*x[, rowSums(.SD)] + x[['V1']]*x[['V2']] + x[['V3']]*x[['V4']]*x[['V5']] + rnorm(1000, 0.001) + 3*sin(x[['V7']])
y <- -1 * x[, rowSums(.SD)] + x[['V1']] * x[['V2']] + x[['V3']] * x[['V4']] * x[['V5']]
+ rnorm(1000, 0.001) + 3 * sin(x[['V7']])
train = as.matrix(x)
train <- as.matrix(x)
# Interaction constraint list (column names form)
interaction_list <- list(c('V1', 'V2'), c('V3', 'V4', 'V5'))
@ -65,31 +71,33 @@ cols2ids <- function(object, col_names) {
names(LUT) <- col_names
rapply(object, function(x) LUT[x], classes = "character", how = "replace")
}
interaction_list_fid = cols2ids(interaction_list, colnames(train))
interaction_list_fid <- cols2ids(interaction_list, colnames(train))
# Fit model with interaction constraints
bst = xgboost(data = train, label = y, max_depth = 4,
bst <- xgboost(data = train, label = y, max_depth = 4,
eta = 0.1, nthread = 2, nrounds = 1000,
interaction_constraints = interaction_list_fid)
bst_tree <- xgb.model.dt.tree(colnames(train), bst)
bst_interactions <- treeInteractions(bst_tree, 4) # interactions constrained to combinations of V1*V2 and V3*V4*V5
bst_interactions <- treeInteractions(bst_tree, 4)
# interactions constrained to combinations of V1*V2 and V3*V4*V5
# Fit model without interaction constraints
bst2 = xgboost(data = train, label = y, max_depth = 4,
bst2 <- xgboost(data = train, label = y, max_depth = 4,
eta = 0.1, nthread = 2, nrounds = 1000)
bst2_tree <- xgb.model.dt.tree(colnames(train), bst2)
bst2_interactions <- treeInteractions(bst2_tree, 4) # much more interactions
# Fit model with both interaction and monotonicity constraints
bst3 = xgboost(data = train, label = y, max_depth = 4,
bst3 <- xgboost(data = train, label = y, max_depth = 4,
eta = 0.1, nthread = 2, nrounds = 1000,
interaction_constraints = interaction_list_fid,
monotone_constraints = c(-1, 0, 0, 0, 0, 0, 0, 0, 0, 0))
bst3_tree <- xgb.model.dt.tree(colnames(train), bst3)
bst3_interactions <- treeInteractions(bst3_tree, 4) # interactions still constrained to combinations of V1*V2 and V3*V4*V5
bst3_interactions <- treeInteractions(bst3_tree, 4)
# interactions still constrained to combinations of V1*V2 and V3*V4*V5
# Show monotonic constraints still apply by checking scores after incrementing V1
x1 <- sort(unique(x[['V1']]))

View File

@ -1,7 +1,6 @@
data(mtcars)
head(mtcars)
bst = xgboost(data=as.matrix(mtcars[,-11]),label=mtcars[,11],
bst <- xgboost(data = as.matrix(mtcars[, -11]), label = mtcars[, 11],
objective = 'count:poisson', nrounds = 5)
pred = predict(bst,as.matrix(mtcars[,-11]))
pred <- predict(bst, as.matrix(mtcars[, -11]))
sqrt(mean((pred - mtcars[, 11]) ^ 2))

View File

@ -7,17 +7,17 @@ dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
param <- list(max_depth = 2, eta = 1, objective = 'binary:logistic')
watchlist <- list(eval = dtest, train = dtrain)
nrounds = 2
nrounds <- 2
# training the model for two rounds
bst = xgb.train(param, dtrain, nrounds, nthread = 2, watchlist)
bst <- xgb.train(param, dtrain, nrounds, nthread = 2, watchlist)
cat('start testing prediction from first n trees\n')
labels <- getinfo(dtest, 'label')
### predict using first 1 tree
ypred1 = predict(bst, dtest, ntreelimit=1)
ypred1 <- predict(bst, dtest, ntreelimit = 1)
# by default, we predict using all the trees
ypred2 = predict(bst, dtest)
ypred2 <- predict(bst, dtest)
cat('error of ypred1=', mean(as.numeric(ypred1 > 0.5) != labels), '\n')
cat('error of ypred2=', mean(as.numeric(ypred2 > 0.5) != labels), '\n')

View File

@ -11,17 +11,17 @@ dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label)
param <- list(max_depth = 2, eta = 1, objective = 'binary:logistic')
nrounds = 4
nrounds <- 4
# training the model for two rounds
bst = xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
bst <- xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
# Model accuracy without new features
accuracy.before <- sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label) / length(agaricus.test$label)
accuracy.before <- (sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label)
/ length(agaricus.test$label))
# by default, we predict using all the trees
pred_with_leaf = predict(bst, dtest, predleaf = TRUE)
pred_with_leaf <- predict(bst, dtest, predleaf = TRUE)
head(pred_with_leaf)
create.new.tree.features <- function(model, original.features){
@ -47,7 +47,9 @@ watchlist <- list(train = new.dtrain)
bst <- xgb.train(params = param, data = new.dtrain, nrounds = nrounds, nthread = 2)
# Model accuracy with new features
accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) / length(agaricus.test$label)
accuracy.after <- (sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label)
/ length(agaricus.test$label))
# Here the accuracy was already good and is now perfect.
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now", accuracy.after, "!\n"))
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now",
accuracy.after, "!\n"))

View File

@ -1,14 +1,14 @@
# running all scripts in demo folder
demo(basic_walkthrough)
demo(custom_objective)
demo(boost_from_prediction)
demo(predict_first_ntree)
demo(generalized_linear_model)
demo(cross_validation)
demo(create_sparse_matrix)
demo(predict_leaf_indices)
demo(early_stopping)
demo(poisson_regression)
demo(caret_wrapper)
demo(tweedie_regression)
#demo(gpu_accelerated) # can only run when built with GPU support
demo(basic_walkthrough, package = 'xgboost')
demo(custom_objective, package = 'xgboost')
demo(boost_from_prediction, package = 'xgboost')
demo(predict_first_ntree, package = 'xgboost')
demo(generalized_linear_model, package = 'xgboost')
demo(cross_validation, package = 'xgboost')
demo(create_sparse_matrix, package = 'xgboost')
demo(predict_leaf_indices, package = 'xgboost')
demo(early_stopping, package = 'xgboost')
demo(poisson_regression, package = 'xgboost')
demo(caret_wrapper, package = 'xgboost')
demo(tweedie_regression, package = 'xgboost')
#demo(gpu_accelerated, package = 'xgboost') # can only run when built with GPU support

0
R-package/demo/tweedie_regression.R Executable file → Normal file
View File

View File

@ -0,0 +1,71 @@
library(lintr)
library(crayon)
my_linters <- list(
absolute_path_linter = lintr::absolute_path_linter,
assignment_linter = lintr::assignment_linter,
closed_curly_linter = lintr::closed_curly_linter,
commas_linter = lintr::commas_linter,
# commented_code_linter = lintr::commented_code_linter,
infix_spaces_linter = lintr::infix_spaces_linter,
line_length_linter = lintr::line_length_linter,
no_tab_linter = lintr::no_tab_linter,
object_usage_linter = lintr::object_usage_linter,
# snake_case_linter = lintr::snake_case_linter,
# multiple_dots_linter = lintr::multiple_dots_linter,
object_length_linter = lintr::object_length_linter,
open_curly_linter = lintr::open_curly_linter,
# single_quotes_linter = lintr::single_quotes_linter,
spaces_inside_linter = lintr::spaces_inside_linter,
spaces_left_parentheses_linter = lintr::spaces_left_parentheses_linter,
trailing_blank_lines_linter = lintr::trailing_blank_lines_linter,
trailing_whitespace_linter = lintr::trailing_whitespace_linter,
true_false = lintr::T_and_F_symbol_linter
)
results <- lapply(
list.files(path = '.', pattern = '\\.[Rr]$', recursive = TRUE),
function (r_file) {
cat(sprintf("Processing %s ...\n", r_file))
list(r_file = r_file,
output = lintr::lint(filename = r_file, linters = my_linters))
})
num_issue <- Reduce(sum, lapply(results, function (e) length(e$output)))
lint2str <- function(lint_entry) {
color <- function(type) {
switch(type,
"warning" = crayon::magenta,
"error" = crayon::red,
"style" = crayon::blue,
crayon::bold
)
}
paste0(
lapply(lint_entry$output,
function (lint_line) {
paste0(
crayon::bold(lint_entry$r_file, ":",
as.character(lint_line$line_number), ":",
as.character(lint_line$column_number), ": ", sep = ""),
color(lint_line$type)(lint_line$type, ": ", sep = ""),
crayon::bold(lint_line$message), "\n",
lint_line$line, "\n",
lintr:::highlight_string(lint_line$message, lint_line$column_number, lint_line$ranges),
"\n",
collapse = "")
}),
collapse = "")
}
if (num_issue > 0) {
cat(sprintf('R linters found %d issues:\n', num_issue))
for (entry in results) {
if (length(entry$output)) {
cat(paste0('**** ', crayon::bold(entry$r_file), '\n'))
cat(paste0(lint2str(entry), collapse = ''))
}
}
quit(save = 'no', status = 1) # Signal error to parent shell
}

View File

@ -1,26 +0,0 @@
context("Code is of high quality and lint free")
test_that("Code Lint", {
skip_on_cran()
my_linters <- list(
absolute_path_linter = lintr::absolute_path_linter,
assignment_linter = lintr::assignment_linter,
closed_curly_linter = lintr::closed_curly_linter,
commas_linter = lintr::commas_linter,
# commented_code_linter = lintr::commented_code_linter,
infix_spaces_linter = lintr::infix_spaces_linter,
line_length_linter = lintr::line_length_linter,
no_tab_linter = lintr::no_tab_linter,
object_usage_linter = lintr::object_usage_linter,
# snake_case_linter = lintr::snake_case_linter,
# multiple_dots_linter = lintr::multiple_dots_linter,
object_length_linter = lintr::object_length_linter,
open_curly_linter = lintr::open_curly_linter,
# single_quotes_linter = lintr::single_quotes_linter,
spaces_inside_linter = lintr::spaces_inside_linter,
spaces_left_parentheses_linter = lintr::spaces_left_parentheses_linter,
trailing_blank_lines_linter = lintr::trailing_blank_lines_linter,
trailing_whitespace_linter = lintr::trailing_whitespace_linter,
true_false = lintr::T_and_F_symbol_linter
)
lintr::expect_lint_free(linters = my_linters) # uncomment this if you want to check code quality
})

View File

@ -7,8 +7,8 @@ context("Models from previous versions of XGBoost can be loaded")
metadata <- model_generator_metadata()
run_model_param_check <- function (config) {
expect_equal(config$learner$learner_model_param$num_feature, '4')
expect_equal(config$learner$learner_train_param$booster, 'gbtree')
testthat::expect_equal(config$learner$learner_model_param$num_feature, '4')
testthat::expect_equal(config$learner$learner_train_param$booster, 'gbtree')
}
get_num_tree <- function (booster) {
@ -27,22 +27,24 @@ run_booster_check <- function (booster, name) {
config <- jsonlite::fromJSON(xgb.config(booster))
run_model_param_check(config)
if (name == 'cls') {
expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds * metadata$kClasses)
expect_equal(as.numeric(config$learner$learner_model_param$base_score), 0.5)
expect_equal(config$learner$learner_train_param$objective, 'multi:softmax')
expect_equal(as.numeric(config$learner$learner_model_param$num_class), metadata$kClasses)
testthat::expect_equal(get_num_tree(booster),
metadata$kForests * metadata$kRounds * metadata$kClasses)
testthat::expect_equal(as.numeric(config$learner$learner_model_param$base_score), 0.5)
testthat::expect_equal(config$learner$learner_train_param$objective, 'multi:softmax')
testthat::expect_equal(as.numeric(config$learner$learner_model_param$num_class),
metadata$kClasses)
} else if (name == 'logit') {
expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
expect_equal(as.numeric(config$learner$learner_model_param$num_class), 0)
expect_equal(config$learner$learner_train_param$objective, 'binary:logistic')
testthat::expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
testthat::expect_equal(as.numeric(config$learner$learner_model_param$num_class), 0)
testthat::expect_equal(config$learner$learner_train_param$objective, 'binary:logistic')
} else if (name == 'ltr') {
expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
expect_equal(config$learner$learner_train_param$objective, 'rank:ndcg')
testthat::expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
testthat::expect_equal(config$learner$learner_train_param$objective, 'rank:ndcg')
} else {
expect_equal(name, 'reg')
expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
expect_equal(as.numeric(config$learner$learner_model_param$base_score), 0.5)
expect_equal(config$learner$learner_train_param$objective, 'reg:squarederror')
testthat::expect_equal(name, 'reg')
testthat::expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
testthat::expect_equal(as.numeric(config$learner$learner_model_param$base_score), 0.5)
testthat::expect_equal(config$learner$learner_train_param$objective, 'reg:squarederror')
}
}
@ -73,5 +75,4 @@ test_that("Models from previous versions of XGBoost can be loaded", {
predict(booster, newdata = pred_data)
run_booster_check(booster, name)
})
expect_true(TRUE)
})

View File

@ -46,6 +46,10 @@ def test_with_autotools(args):
'R.exe', '-q', '-e',
"library(testthat); setwd('tests'); source('testthat.R')"
])
subprocess.check_call([
'R.exe', '-q', '-e',
"demo(runall, package = 'xgboost')"
])
def test_with_cmake(args):
@ -79,6 +83,10 @@ def test_with_cmake(args):
'R.exe', '-q', '-e',
"library(testthat); setwd('tests'); source('testthat.R')"
])
subprocess.check_call([
'R.exe', '-q', '-e',
"demo(runall, package = 'xgboost')"
])
def main(args):