From afbd016764311041fb5133063e3fc767014c2e96 Mon Sep 17 00:00:00 2001 From: david-cortes Date: Sun, 8 Dec 2024 17:27:29 +0100 Subject: [PATCH] avoid warnings and prints --- R-package/tests/testthat/test_basic.R | 7 ++++--- R-package/tests/testthat/test_callbacks.R | 3 ++- R-package/tests/testthat/test_custom_objective.R | 10 ++++++---- R-package/tests/testthat/test_dmatrix.R | 7 ++++--- R-package/tests/testthat/test_glm.R | 8 +++++--- R-package/tests/testthat/test_helpers.R | 1 + R-package/tests/testthat/test_ranking.R | 4 ++-- 7 files changed, 24 insertions(+), 16 deletions(-) diff --git a/R-package/tests/testthat/test_basic.R b/R-package/tests/testthat/test_basic.R index f2d6f78d240b..fdec78e88083 100644 --- a/R-package/tests/testthat/test_basic.R +++ b/R-package/tests/testthat/test_basic.R @@ -249,7 +249,8 @@ test_that("train and predict RF", { objective = "binary:logistic", eval_metric = "error", num_parallel_tree = 20, subsample = 0.6, colsample_bytree = 0.1 ), - evals = list(train = xgb.DMatrix(train$data, label = lb)) + evals = list(train = xgb.DMatrix(train$data, label = lb)), + verbose = 0 ) expect_equal(xgb.get.num.boosted.rounds(bst), 1) @@ -488,9 +489,9 @@ test_that("max_delta_step works", { ) nrounds <- 5 # model with no restriction on max_delta_step - bst1 <- xgb.train(params, dtrain, nrounds, evals = evals, verbose = 1) + bst1 <- xgb.train(params, dtrain, nrounds, evals = evals, verbose = 0) # model with restricted max_delta_step - bst2 <- xgb.train(c(params, list(max_delta_step = 1)), dtrain, nrounds, evals = evals, verbose = 1) + bst2 <- xgb.train(c(params, list(max_delta_step = 1)), dtrain, nrounds, evals = evals, verbose = 0) # the no-restriction model is expected to have consistently lower loss during the initial iterations expect_true(all(attributes(bst1)$evaluation_log$train_logloss < attributes(bst2)$evaluation_log$train_logloss)) expect_lt(mean(attributes(bst1)$evaluation_log$train_logloss) / mean(attributes(bst2)$evaluation_log$train_logloss), 0.8) diff --git a/R-package/tests/testthat/test_callbacks.R b/R-package/tests/testthat/test_callbacks.R index 5d5bab6428ad..7eef3cb46aa9 100644 --- a/R-package/tests/testthat/test_callbacks.R +++ b/R-package/tests/testthat/test_callbacks.R @@ -277,7 +277,7 @@ test_that("early stopping xgb.train works", { ) expect_equal(attributes(bst)$evaluation_log, attributes(bst0)$evaluation_log) - fname <- file.path(tempdir(), "model.bin") + fname <- file.path(tempdir(), "model.ubj") xgb.save(bst, fname) loaded <- xgb.load(fname) @@ -335,6 +335,7 @@ test_that("early stopping works with titanic", { ), nrounds = 100, early_stopping_rounds = 3, + verbose = 0, evals = list(train = xgb.DMatrix(dtx, label = dty)) ) diff --git a/R-package/tests/testthat/test_custom_objective.R b/R-package/tests/testthat/test_custom_objective.R index b37d20392224..1d08b8ebf280 100644 --- a/R-package/tests/testthat/test_custom_objective.R +++ b/R-package/tests/testthat/test_custom_objective.R @@ -33,7 +33,7 @@ param <- list(max_depth = 2, eta = 1, nthread = n_threads, num_round <- 2 test_that("custom objective works", { - bst <- xgb.train(param, dtrain, num_round, evals) + bst <- xgb.train(param, dtrain, num_round, evals, verbose = 0) expect_equal(class(bst), "xgb.Booster") expect_false(is.null(attributes(bst)$evaluation_log)) expect_false(is.null(attributes(bst)$evaluation_log$eval_error)) @@ -48,7 +48,7 @@ test_that("custom objective in CV works", { }) test_that("custom objective with early stop works", { - bst <- xgb.train(param, dtrain, 10, evals) + bst <- xgb.train(param, dtrain, 10, evals, verbose = 0) expect_equal(class(bst), "xgb.Booster") train_log <- attributes(bst)$evaluation_log$train_error expect_true(all(diff(train_log) <= 0)) @@ -66,7 +66,7 @@ test_that("custom objective using DMatrix attr works", { return(list(grad = grad, hess = hess)) } param$objective <- logregobjattr - bst <- xgb.train(param, dtrain, num_round, evals) + bst <- xgb.train(param, dtrain, num_round, evals, verbose = 0) expect_equal(class(bst), "xgb.Booster") }) @@ -89,7 +89,9 @@ test_that("custom objective with multi-class shape", { } param$objective <- fake_softprob param$eval_metric <- fake_merror - bst <- xgb.train(c(param, list(num_class = n_classes)), dtrain, 1) + expect_warning({ + bst <- xgb.train(c(param, list(num_class = n_classes)), dtrain, nrounds = 1) + }) }) softmax <- function(values) { diff --git a/R-package/tests/testthat/test_dmatrix.R b/R-package/tests/testthat/test_dmatrix.R index 1b726d817f89..ead67d86a258 100644 --- a/R-package/tests/testthat/test_dmatrix.R +++ b/R-package/tests/testthat/test_dmatrix.R @@ -41,13 +41,13 @@ test_that("xgb.DMatrix: basic construction", { params <- list(tree_method = "hist", nthread = n_threads) bst_fd <- xgb.train( - params, nrounds = 8, fd, evals = list(train = fd) + params, nrounds = 8, fd, evals = list(train = fd), verbose = 0 ) bst_dgr <- xgb.train( - params, nrounds = 8, fdgr, evals = list(train = fdgr) + params, nrounds = 8, fdgr, evals = list(train = fdgr), verbose = 0 ) bst_dgc <- xgb.train( - params, nrounds = 8, fdgc, evals = list(train = fdgc) + params, nrounds = 8, fdgc, evals = list(train = fdgc), verbose = 0 ) raw_fd <- xgb.save.raw(bst_fd, raw_format = "ubj") @@ -130,6 +130,7 @@ test_that("xgb.DMatrix: saving, loading", { expect_equal(length(cnames), 126) tmp_file <- tempfile('xgb.DMatrix_') xgb.DMatrix.save(dtrain, tmp_file) + xgb.set.config(verbosity = 0) dtrain <- xgb.DMatrix(tmp_file) expect_equal(colnames(dtrain), cnames) diff --git a/R-package/tests/testthat/test_glm.R b/R-package/tests/testthat/test_glm.R index b0212cd53f36..226439319dc8 100644 --- a/R-package/tests/testthat/test_glm.R +++ b/R-package/tests/testthat/test_glm.R @@ -61,7 +61,7 @@ test_that("gblinear early stopping works", { agaricus.test$data, label = agaricus.test$label, nthread = n_threads ) - param <- list( + param <- xgb.params( objective = "binary:logistic", eval_metric = "error", booster = "gblinear", nthread = n_threads, eta = 0.8, alpha = 0.0001, lambda = 0.0001, updater = "coord_descent" @@ -70,14 +70,16 @@ test_that("gblinear early stopping works", { es_round <- 1 n <- 10 booster <- xgb.train( - param, dtrain, n, list(eval = dtest, train = dtrain), early_stopping_rounds = es_round + param, dtrain, nrounds = n, evals = list(eval = dtest, train = dtrain), + early_stopping_rounds = es_round, verbose = 0 ) expect_equal(xgb.attr(booster, "best_iteration"), 4) predt_es <- predict(booster, dtrain) n <- xgb.attr(booster, "best_iteration") + es_round + 1 booster <- xgb.train( - param, dtrain, n, list(eval = dtest, train = dtrain), early_stopping_rounds = es_round + param, dtrain, nrounds = n, evals = list(eval = dtest, train = dtrain), + early_stopping_rounds = es_round, verbose = 0 ) predt <- predict(booster, dtrain) expect_equal(predt_es, predt) diff --git a/R-package/tests/testthat/test_helpers.R b/R-package/tests/testthat/test_helpers.R index aabc778eacd7..74cc16d70579 100644 --- a/R-package/tests/testthat/test_helpers.R +++ b/R-package/tests/testthat/test_helpers.R @@ -464,6 +464,7 @@ test_that("xgb.plot.multi.trees works with and without feature names", { .skip_if_vcd_not_available() xgb.plot.multi.trees(model = bst.Tree.unnamed, features_keep = 3) xgb.plot.multi.trees(model = bst.Tree, features_keep = 3) + expect_true(TRUE) }) test_that("xgb.plot.deepness works", { diff --git a/R-package/tests/testthat/test_ranking.R b/R-package/tests/testthat/test_ranking.R index 0e7db42da0b2..3a87bc60944b 100644 --- a/R-package/tests/testthat/test_ranking.R +++ b/R-package/tests/testthat/test_ranking.R @@ -15,7 +15,7 @@ test_that('Test ranking with unweighted data', { params <- list(eta = 1, tree_method = 'exact', objective = 'rank:pairwise', max_depth = 1, eval_metric = 'auc', eval_metric = 'aucpr', nthread = n_threads) - bst <- xgb.train(params, dtrain, nrounds = 10, evals = list(train = dtrain)) + bst <- xgb.train(params, dtrain, nrounds = 10, evals = list(train = dtrain), verbose = 0) # Check if the metric is monotone increasing expect_true(all(diff(attributes(bst)$evaluation_log$train_auc) >= 0)) expect_true(all(diff(attributes(bst)$evaluation_log$train_aucpr) >= 0)) @@ -39,7 +39,7 @@ test_that('Test ranking with weighted data', { eta = 1, tree_method = "exact", objective = "rank:pairwise", max_depth = 1, eval_metric = "auc", eval_metric = "aucpr", nthread = n_threads ) - bst <- xgb.train(params, dtrain, nrounds = 10, evals = list(train = dtrain)) + bst <- xgb.train(params, dtrain, nrounds = 10, evals = list(train = dtrain), verbose = 0) # Check if the metric is monotone increasing expect_true(all(diff(attributes(bst)$evaluation_log$train_auc) >= 0)) expect_true(all(diff(attributes(bst)$evaluation_log$train_aucpr) >= 0))