From d7fce99564221f942eafa241ba2b999ba4db0179 Mon Sep 17 00:00:00 2001 From: terrytangyuan Date: Wed, 28 Oct 2015 22:22:51 -0400 Subject: [PATCH] Lint fix on consistent assignment --- R-package/R/predict.xgb.Booster.R | 2 +- R-package/R/utils.R | 2 +- R-package/R/xgb.cv.R | 18 +++++----- R-package/R/xgb.importance.R | 2 +- R-package/R/xgb.model.dt.tree.R | 2 +- R-package/R/xgb.train.R | 44 ++++++++++++------------- R-package/tests/testthat/test_basic.R | 18 +++++----- R-package/tests/testthat/test_helpers.R | 4 +-- 8 files changed, 46 insertions(+), 46 deletions(-) diff --git a/R-package/R/predict.xgb.Booster.R b/R-package/R/predict.xgb.Booster.R index 9cc1867da..432581e76 100644 --- a/R-package/R/predict.xgb.Booster.R +++ b/R-package/R/predict.xgb.Booster.R @@ -48,7 +48,7 @@ setMethod("predict", signature = "xgb.Booster", stop("predict: ntreelimit must be equal to or greater than 1") } } - option = 0 + option <- 0 if (outputmargin) { option <- option + 1 } diff --git a/R-package/R/utils.R b/R-package/R/utils.R index 459eb068e..2c7c74fc3 100644 --- a/R-package/R/utils.R +++ b/R-package/R/utils.R @@ -261,7 +261,7 @@ xgb.cv.mknfold <- function(dall, nfold, param, stratified, folds) { ret <- list() for (k in 1:nfold) { dtest <- slice(dall, folds[[k]]) - didx = c() + didx <- c() for (i in 1:nfold) { if (i != k) { didx <- append(didx, folds[[i]]) diff --git a/R-package/R/xgb.cv.R b/R-package/R/xgb.cv.R index 3f1be704f..af79bde4e 100644 --- a/R-package/R/xgb.cv.R +++ b/R-package/R/xgb.cv.R @@ -124,15 +124,15 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing = stop("xgb.cv: cannot assign two different objectives") if (!is.null(params$objective)) if (class(params$objective) == 'function') { - obj = params$objective - params[['objective']] = NULL + obj <- params$objective + params[['objective']] <- NULL } # if (!is.null(params$eval_metric) && !is.null(feval)) # stop("xgb.cv: cannot assign two different evaluation metrics") if (!is.null(params$eval_metric)) if (class(params$eval_metric)=='function') { - feval = params$eval_metric - params[['eval_metric']] = NULL + feval <- params$eval_metric + params[['eval_metric']] <- NULL } # Early Stopping @@ -144,9 +144,9 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing = if (is.null(maximize)) { if (params$eval_metric %in% c('rmse','logloss','error','merror','mlogloss')) { - maximize = FALSE + maximize <- FALSE } else { - maximize = TRUE + maximize <- TRUE } } @@ -167,16 +167,16 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing = mat_pred <- FALSE if (!is.null(obj_type) && obj_type == 'multi:softprob') { - num_class = params[['num_class']] + num_class <- params[['num_class']] if (is.null(num_class)) stop('must set num_class to use softmax') predictValues <- matrix(0,xgb.numrow(dtrain),num_class) - mat_pred = TRUE + mat_pred <- TRUE } else predictValues <- rep(0,xgb.numrow(dtrain)) history <- c() - print.every.n = max(as.integer(print.every.n), 1L) + print.every.n <- max(as.integer(print.every.n), 1L) for (i in 1:nrounds) { msg <- list() for (k in 1:nfold) { diff --git a/R-package/R/xgb.importance.R b/R-package/R/xgb.importance.R index 0b0703587..14c5bbd44 100644 --- a/R-package/R/xgb.importance.R +++ b/R-package/R/xgb.importance.R @@ -125,7 +125,7 @@ treeDump <- function(feature_names, text, keepDetail){ } linearDump <- function(feature_names, text){ - which(text == "weight:") %>% {a=.+1;text[a:length(text)]} %>% as.numeric %>% data.table(Feature = feature_names, Weight = .) + which(text == "weight:") %>% {a <- .+1; text[a:length(text)]} %>% as.numeric %>% data.table(Feature = feature_names, Weight = .) } # Avoid error messages during CRAN check. diff --git a/R-package/R/xgb.model.dt.tree.R b/R-package/R/xgb.model.dt.tree.R index cef988962..b0f5ee279 100644 --- a/R-package/R/xgb.model.dt.tree.R +++ b/R-package/R/xgb.model.dt.tree.R @@ -81,7 +81,7 @@ xgb.model.dt.tree <- function(feature_names = NULL, filename_dump = NULL, model } if(!is.null(model)){ - text = xgb.dump(model = model, with.stats = T) + text <- xgb.dump(model = model, with.stats = T) } else if(!is.null(filename_dump)){ text <- readLines(filename_dump) %>% str_trim(side = "both") } diff --git a/R-package/R/xgb.train.R b/R-package/R/xgb.train.R index b1d79d866..4bf1d36f6 100644 --- a/R-package/R/xgb.train.R +++ b/R-package/R/xgb.train.R @@ -140,27 +140,27 @@ xgb.train <- function(params=list(), data, nrounds, watchlist = list(), warning('watchlist is provided but verbose=0, no evaluation information will be printed') } - dot.params = list(...) - nms.params = names(params) - nms.dot.params = names(dot.params) + dot.params <- list(...) + nms.params <- names(params) + nms.dot.params <- names(dot.params) if (length(intersect(nms.params,nms.dot.params))>0) stop("Duplicated term in parameters. Please check your list of params.") - params = append(params, dot.params) + params <- append(params, dot.params) # customized objective and evaluation metric interface if (!is.null(params$objective) && !is.null(obj)) stop("xgb.train: cannot assign two different objectives") if (!is.null(params$objective)) if (class(params$objective)=='function') { - obj = params$objective - params$objective = NULL + obj <- params$objective + params$objective <- NULL } if (!is.null(params$eval_metric) && !is.null(feval)) stop("xgb.train: cannot assign two different evaluation metrics") if (!is.null(params$eval_metric)) if (class(params$eval_metric)=='function') { - feval = params$eval_metric - params$eval_metric = NULL + feval <- params$eval_metric + params$eval_metric <- NULL } # Early stopping @@ -174,19 +174,19 @@ xgb.train <- function(params=list(), data, nrounds, watchlist = list(), if (is.null(maximize)) { if (params$eval_metric %in% c('rmse','logloss','error','merror','mlogloss')) { - maximize = FALSE + maximize <- FALSE } else { - maximize = TRUE + maximize <- TRUE } } if (maximize) { - bestScore = 0 + bestScore <- 0 } else { - bestScore = Inf + bestScore <- Inf } - bestInd = 0 - earlyStopflag = FALSE + bestInd <- 0 + earlyStopflag <- FALSE if (length(watchlist)>1) warning('Only the first data set in watchlist is used for early stopping process.') @@ -195,7 +195,7 @@ xgb.train <- function(params=list(), data, nrounds, watchlist = list(), handle <- xgb.Booster(params, append(watchlist, dtrain)) bst <- xgb.handleToBooster(handle) - print.every.n=max( as.integer(print.every.n), 1L) + print.every.n <- max( as.integer(print.every.n), 1L) for (i in 1:nrounds) { succ <- xgb.iter.update(bst$handle, dtrain, i - 1, obj) if (length(watchlist) != 0) { @@ -204,14 +204,14 @@ xgb.train <- function(params=list(), data, nrounds, watchlist = list(), cat(paste(msg, "\n", sep="")) if (!is.null(early.stop.round)) { - score = strsplit(msg,':|\\s+')[[1]][3] - score = as.numeric(score) + score <- strsplit(msg,':|\\s+')[[1]][3] + score <- as.numeric(score) if ((maximize && score>bestScore) || (!maximize && score=early.stop.round) { - earlyStopflag = TRUE + earlyStopflag <- TRUE cat('Stopping. Best iteration:',bestInd) break } @@ -226,8 +226,8 @@ xgb.train <- function(params=list(), data, nrounds, watchlist = list(), } bst <- xgb.Booster.check(bst) if (!is.null(early.stop.round)) { - bst$bestScore = bestScore - bst$bestInd = bestInd + bst$bestScore <- bestScore + bst$bestInd <- bestInd } return(bst) } diff --git a/R-package/tests/testthat/test_basic.R b/R-package/tests/testthat/test_basic.R index 791f1246c..88bd905ca 100644 --- a/R-package/tests/testthat/test_basic.R +++ b/R-package/tests/testthat/test_basic.R @@ -4,30 +4,30 @@ context("basic functions") data(agaricus.train, package='xgboost') data(agaricus.test, package='xgboost') -train = agaricus.train -test = agaricus.test +train <- agaricus.train +test <- agaricus.test test_that("train and predict", { - bst = xgboost(data = train$data, label = train$label, max.depth = 2, + bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nthread = 2, nround = 2, objective = "binary:logistic") - pred = predict(bst, test$data) + pred <- predict(bst, test$data) }) test_that("early stopping", { - res = xgb.cv(data = train$data, label = train$label, max.depth = 2, nfold = 5, + res <- xgb.cv(data = train$data, label = train$label, max.depth = 2, nfold = 5, eta = 0.3, nthread = 2, nround = 20, objective = "binary:logistic", early.stop.round = 3, maximize = FALSE) expect_true(nrow(res)<20) - bst = xgboost(data = train$data, label = train$label, max.depth = 2, + bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 0.3, nthread = 2, nround = 20, objective = "binary:logistic", early.stop.round = 3, maximize = FALSE) - pred = predict(bst, test$data) + pred <- predict(bst, test$data) }) test_that("save_period", { - bst = xgboost(data = train$data, label = train$label, max.depth = 2, + bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 0.3, nthread = 2, nround = 20, objective = "binary:logistic", save_period = 10, save_name = "xgb.model") - pred = predict(bst, test$data) + pred <- predict(bst, test$data) }) diff --git a/R-package/tests/testthat/test_helpers.R b/R-package/tests/testthat/test_helpers.R index 4d80146e3..9cef61c49 100644 --- a/R-package/tests/testthat/test_helpers.R +++ b/R-package/tests/testthat/test_helpers.R @@ -11,8 +11,8 @@ df <- data.table(Arthritis, keep.rownames = F) df[,AgeDiscret:= as.factor(round(Age/10,0))] df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))] df[,ID:=NULL] -sparse_matrix = sparse.model.matrix(Improved~.-1, data = df) -output_vector = df[,Y:=0][Improved == "Marked",Y:=1][,Y] +sparse_matrix <- sparse.model.matrix(Improved~.-1, data = df) +output_vector <- df[,Y:=0][Improved == "Marked",Y:=1][,Y] bst <- xgboost(data = sparse_matrix, label = output_vector, max.depth = 9, eta = 1, nthread = 2, nround = 10,objective = "binary:logistic")