From 743c372368e7fa7fd30782b6a23c796deb734c85 Mon Sep 17 00:00:00 2001
From: James Lamb <jaylamb20@gmail.com>
Date: Mon, 6 Nov 2023 23:25:08 -0600
Subject: [PATCH 1/3] [R-package] standardize naming of internal functions

---
 R-package/R/callback.R                        |  8 +--
 R-package/R/lgb.Booster.R                     | 34 +++++-----
 R-package/R/lgb.DataProcessor.R               |  2 +-
 R-package/R/lgb.Dataset.R                     | 54 +++++++--------
 R-package/R/lgb.Predictor.R                   | 10 +--
 R-package/R/lgb.cv.R                          | 38 +++++------
 R-package/R/lgb.drop_serialized.R             |  2 +-
 R-package/R/lgb.importance.R                  |  2 +-
 R-package/R/lgb.interprete.R                  |  8 +--
 R-package/R/lgb.make_serializable.R           |  2 +-
 R-package/R/lgb.model.dt.tree.R               |  7 +-
 R-package/R/lgb.plot.interpretation.R         |  6 +-
 R-package/R/lgb.restore_handle.R              |  2 +-
 R-package/R/lgb.train.R                       | 34 ++++++----
 R-package/R/lightgbm.R                        | 10 +--
 R-package/R/saveRDS.lgb.Booster.R             |  2 +-
 R-package/R/utils.R                           | 20 +++---
 R-package/tests/testthat/test_Predictor.R     | 10 +--
 R-package/tests/testthat/test_basic.R         | 22 +++----
 R-package/tests/testthat/test_dataset.R       | 16 ++---
 .../tests/testthat/test_learning_to_rank.R    |  2 +-
 R-package/tests/testthat/test_lgb.Booster.R   | 40 +++++------
 R-package/tests/testthat/test_utils.R         | 66 +++++++++----------
 23 files changed, 203 insertions(+), 194 deletions(-)

diff --git a/R-package/R/callback.R b/R-package/R/callback.R
index 3569b47f5b14..c436409ddafb 100644
--- a/R-package/R/callback.R
+++ b/R-package/R/callback.R
@@ -323,17 +323,17 @@ cb_early_stop <- function(stopping_rounds, first_metric_only, verbose) {
 }
 
 # Extract callback names from the list of callbacks
-callback.names <- function(cb_list) {
+.callback_names <- function(cb_list) {
   return(unlist(lapply(cb_list, attr, "name")))
 }
 
-add.cb <- function(cb_list, cb) {
+.add_cb <- function(cb_list, cb) {
 
   # Combine two elements
   cb_list <- c(cb_list, cb)
 
   # Set names of elements
-  names(cb_list) <- callback.names(cb_list = cb_list)
+  names(cb_list) <- .callback_names(cb_list = cb_list)
 
   if ("cb_early_stop" %in% names(cb_list)) {
 
@@ -349,7 +349,7 @@ add.cb <- function(cb_list, cb) {
 
 }
 
-categorize.callbacks <- function(cb_list) {
+.categorize_callbacks <- function(cb_list) {
 
   # Check for pre-iteration or post-iteration
   return(
diff --git a/R-package/R/lgb.Booster.R b/R-package/R/lgb.Booster.R
index 755b171724f9..e5b4c8001d40 100644
--- a/R-package/R/lgb.Booster.R
+++ b/R-package/R/lgb.Booster.R
@@ -31,12 +31,12 @@ Booster <- R6::R6Class(
 
       if (!is.null(train_set)) {
 
-        if (!lgb.is.Dataset(train_set)) {
+        if (!.is_Dataset(train_set)) {
           stop("lgb.Booster: Can only use lgb.Dataset as training data")
         }
         train_set_handle <- train_set$.__enclos_env__$private$get_handle()
         params <- utils::modifyList(params, train_set$get_params())
-        params_str <- lgb.params2str(params = params)
+        params_str <- .lgb_params2str(params = params)
         # Store booster handle
         handle <- .Call(
           LGBM_BoosterCreate_R
@@ -130,7 +130,7 @@ Booster <- R6::R6Class(
     # Add validation data
     add_valid = function(data, name) {
 
-      if (!lgb.is.Dataset(data)) {
+      if (!.is_Dataset(data)) {
         stop("lgb.Booster.add_valid: Can only use lgb.Dataset as validation data")
       }
 
@@ -167,7 +167,7 @@ Booster <- R6::R6Class(
         params <- utils::modifyList(self$params, params)
       }
 
-      params_str <- lgb.params2str(params = params)
+      params_str <- .lgb_params2str(params = params)
 
       self$restore_handle()
 
@@ -193,7 +193,7 @@ Booster <- R6::R6Class(
 
       if (!is.null(train_set)) {
 
-        if (!lgb.is.Dataset(train_set)) {
+        if (!.is_Dataset(train_set)) {
           stop("lgb.Booster.update: Only can use lgb.Dataset as training data")
         }
 
@@ -340,7 +340,7 @@ Booster <- R6::R6Class(
     # Evaluate data on metrics
     eval = function(data, name, feval = NULL) {
 
-      if (!lgb.is.Dataset(data)) {
+      if (!.is_Dataset(data)) {
         stop("lgb.Booster.eval: Can only use lgb.Dataset to eval")
       }
 
@@ -508,17 +508,17 @@ Booster <- R6::R6Class(
       # NOTE: doing this here instead of in Predictor$predict() to keep
       #       Predictor$predict() as fast as possible
       if (length(params) > 0L) {
-        params <- lgb.check.wrapper_param(
+        params <- .check_wrapper_param(
           main_param_name = "predict_raw_score"
           , params = params
           , alternative_kwarg_value = rawscore
         )
-        params <- lgb.check.wrapper_param(
+        params <- .check_wrapper_param(
           main_param_name = "predict_leaf_index"
           , params = params
           , alternative_kwarg_value = predleaf
         )
-        params <- lgb.check.wrapper_param(
+        params <- .check_wrapper_param(
           main_param_name = "predict_contrib"
           , params = params
           , alternative_kwarg_value = predcontrib
@@ -586,7 +586,7 @@ Booster <- R6::R6Class(
         , predcontrib
         , start_iteration
         , num_iteration
-        , lgb.params2str(params = params)
+        , .lgb_params2str(params = params)
       )
 
       private$fast_predict_config <- list(
@@ -622,7 +622,7 @@ Booster <- R6::R6Class(
     },
 
     check_null_handle = function() {
-      return(lgb.is.null.handle(private$handle))
+      return(.is_null_handle(private$handle))
     },
 
     restore_handle = function() {
@@ -959,7 +959,7 @@ predict.lgb.Booster <- function(object,
                                 params = list(),
                                 ...) {
 
-  if (!lgb.is.Booster(x = object)) {
+  if (!.is_Booster(x = object)) {
     stop("predict.lgb.Booster: object should be an ", sQuote("lgb.Booster"))
   }
 
@@ -1114,7 +1114,7 @@ lgb.configure_fast_predict <- function(model,
                                        num_iteration = NULL,
                                        type = "response",
                                        params = list()) {
-  if (!lgb.is.Booster(x = model)) {
+  if (!.is_Booster(x = model)) {
     stop("lgb.configure_fast_predict: model should be an ", sQuote("lgb.Booster"))
   }
   if (type == "class") {
@@ -1160,7 +1160,7 @@ lgb.configure_fast_predict <- function(model,
 print.lgb.Booster <- function(x, ...) {
   # nolint start
   handle <- x$.__enclos_env__$private$handle
-  handle_is_null <- lgb.is.null.handle(handle)
+  handle_is_null <- .is_null_handle(handle)
 
   if (!handle_is_null) {
     ntrees <- x$current_iter()
@@ -1316,7 +1316,7 @@ lgb.load <- function(filename = NULL, model_str = NULL) {
 #' @export
 lgb.save <- function(booster, filename, num_iteration = NULL) {
 
-  if (!lgb.is.Booster(x = booster)) {
+  if (!.is_Booster(x = booster)) {
     stop("lgb.save: booster should be an ", sQuote("lgb.Booster"))
   }
 
@@ -1372,7 +1372,7 @@ lgb.save <- function(booster, filename, num_iteration = NULL) {
 #' @export
 lgb.dump <- function(booster, num_iteration = NULL) {
 
-  if (!lgb.is.Booster(x = booster)) {
+  if (!.is_Booster(x = booster)) {
     stop("lgb.dump: booster should be an ", sQuote("lgb.Booster"))
   }
 
@@ -1430,7 +1430,7 @@ lgb.dump <- function(booster, num_iteration = NULL) {
 #' @export
 lgb.get.eval.result <- function(booster, data_name, eval_name, iters = NULL, is_err = FALSE) {
 
-  if (!lgb.is.Booster(x = booster)) {
+  if (!.is_Booster(x = booster)) {
     stop("lgb.get.eval.result: Can only use ", sQuote("lgb.Booster"), " to get eval result")
   }
 
diff --git a/R-package/R/lgb.DataProcessor.R b/R-package/R/lgb.DataProcessor.R
index fc7061945b62..c35ce4f93bd3 100644
--- a/R-package/R/lgb.DataProcessor.R
+++ b/R-package/R/lgb.DataProcessor.R
@@ -39,7 +39,7 @@ DataProcessor <- R6::R6Class(
             )
           }
           data_num_class <- length(self$factor_levels)
-          params <- lgb.check.wrapper_param(
+          params <- .check_wrapper_param(
               main_param_name = "num_class"
               , params = params
               , alternative_kwarg_value = data_num_class
diff --git a/R-package/R/lgb.Dataset.R b/R-package/R/lgb.Dataset.R
index e2892ea4bae0..2f5a488ac21c 100644
--- a/R-package/R/lgb.Dataset.R
+++ b/R-package/R/lgb.Dataset.R
@@ -55,10 +55,10 @@ Dataset <- R6::R6Class(
                           init_score = NULL) {
 
       # validate inputs early to avoid unnecessary computation
-      if (!(is.null(reference) || lgb.is.Dataset(reference))) {
+      if (!(is.null(reference) || .is_Dataset(reference))) {
           stop("lgb.Dataset: If provided, reference must be a ", sQuote("lgb.Dataset"))
       }
-      if (!(is.null(predictor) || lgb.is.Predictor(predictor))) {
+      if (!(is.null(predictor) || .is_Predictor(predictor))) {
           stop("lgb.Dataset: If provided, predictor must be a ", sQuote("lgb.Predictor"))
       }
 
@@ -135,7 +135,7 @@ Dataset <- R6::R6Class(
     construct = function() {
 
       # Check for handle null
-      if (!lgb.is.null.handle(x = private$handle)) {
+      if (!.is_null_handle(x = private$handle)) {
         return(invisible(self))
       }
 
@@ -191,7 +191,7 @@ Dataset <- R6::R6Class(
       }
 
       # Generate parameter str
-      params_str <- lgb.params2str(params = private$params)
+      params_str <- .lgb_params2str(params = private$params)
 
       # Get handle of reference dataset
       ref_handle <- NULL
@@ -277,7 +277,7 @@ Dataset <- R6::R6Class(
         )
 
       }
-      if (lgb.is.null.handle(x = handle)) {
+      if (.is_null_handle(x = handle)) {
         stop("lgb.Dataset.construct: cannot create Dataset handle")
       }
       # Setup class and private type
@@ -345,7 +345,7 @@ Dataset <- R6::R6Class(
     dim = function() {
 
       # Check for handle
-      if (!lgb.is.null.handle(x = private$handle)) {
+      if (!.is_null_handle(x = private$handle)) {
 
         num_row <- 0L
         num_col <- 0L
@@ -385,7 +385,7 @@ Dataset <- R6::R6Class(
 
     # Get number of bins for feature
     get_feature_num_bin = function(feature) {
-      if (lgb.is.null.handle(x = private$handle)) {
+      if (.is_null_handle(x = private$handle)) {
         stop("Cannot get number of bins in feature before constructing Dataset.")
       }
       if (is.character(feature)) {
@@ -409,7 +409,7 @@ Dataset <- R6::R6Class(
     get_colnames = function() {
 
       # Check for handle
-      if (!lgb.is.null.handle(x = private$handle)) {
+      if (!.is_null_handle(x = private$handle)) {
         private$colnames <- .Call(
           LGBM_DatasetGetFeatureNames_R
           , private$handle
@@ -449,7 +449,7 @@ Dataset <- R6::R6Class(
 
       # Write column names
       private$colnames <- colnames
-      if (!lgb.is.null.handle(x = private$handle)) {
+      if (!.is_null_handle(x = private$handle)) {
 
         # Merge names with tab separation
         merged_name <- paste0(as.list(private$colnames), collapse = "\t")
@@ -478,7 +478,7 @@ Dataset <- R6::R6Class(
       # Check for info name and handle
       if (is.null(private$info[[field_name]])) {
 
-        if (lgb.is.null.handle(x = private$handle)) {
+        if (.is_null_handle(x = private$handle)) {
           stop("Cannot perform Dataset$get_field() before constructing Dataset.")
         }
 
@@ -536,7 +536,7 @@ Dataset <- R6::R6Class(
       # Store information privately
       private$info[[field_name]] <- data
 
-      if (!lgb.is.null.handle(x = private$handle) && !is.null(data)) {
+      if (!.is_null_handle(x = private$handle) && !is.null(data)) {
 
         if (length(data) > 0L) {
 
@@ -583,14 +583,14 @@ Dataset <- R6::R6Class(
         return(invisible(self))
       }
       new_params <- utils::modifyList(private$params, params)
-      if (lgb.is.null.handle(x = private$handle)) {
+      if (.is_null_handle(x = private$handle)) {
         private$params <- new_params
       } else {
         tryCatch({
           .Call(
             LGBM_DatasetUpdateParamChecking_R
-            , lgb.params2str(params = private$params)
-            , lgb.params2str(params = new_params)
+            , .lgb_params2str(params = private$params)
+            , .lgb_params2str(params = new_params)
           )
           private$params <- new_params
         }, error = function(e) {
@@ -663,7 +663,7 @@ Dataset <- R6::R6Class(
           please set ", sQuote("free_raw_data = FALSE"), " when you construct lgb.Dataset")
       }
 
-      if (!lgb.is.Dataset(reference)) {
+      if (!.is_Dataset(reference)) {
         stop("set_reference: Can only use lgb.Dataset as a reference")
       }
 
@@ -711,7 +711,7 @@ Dataset <- R6::R6Class(
     get_handle = function() {
 
       # Get handle and construct if needed
-      if (lgb.is.null.handle(x = private$handle)) {
+      if (.is_null_handle(x = private$handle)) {
         self$construct()
       }
       return(private$handle)
@@ -734,7 +734,7 @@ Dataset <- R6::R6Class(
       if (!is.null(predictor)) {
 
         # Predictor is unknown
-        if (!lgb.is.Predictor(predictor)) {
+        if (!.is_Predictor(predictor)) {
           stop("set_predictor: Can only use lgb.Predictor as predictor")
         }
 
@@ -888,7 +888,7 @@ lgb.Dataset.create.valid <- function(dataset,
                                      init_score = NULL,
                                      params = list()) {
 
-  if (!lgb.is.Dataset(x = dataset)) {
+  if (!.is_Dataset(x = dataset)) {
     stop("lgb.Dataset.create.valid: input data should be an lgb.Dataset object")
   }
 
@@ -922,7 +922,7 @@ lgb.Dataset.create.valid <- function(dataset,
 #' @export
 lgb.Dataset.construct <- function(dataset) {
 
-  if (!lgb.is.Dataset(x = dataset)) {
+  if (!.is_Dataset(x = dataset)) {
     stop("lgb.Dataset.construct: input data should be an lgb.Dataset object")
   }
 
@@ -954,7 +954,7 @@ lgb.Dataset.construct <- function(dataset) {
 #' @export
 dim.lgb.Dataset <- function(x) {
 
-  if (!lgb.is.Dataset(x = x)) {
+  if (!.is_Dataset(x = x)) {
     stop("dim.lgb.Dataset: input data should be an lgb.Dataset object")
   }
 
@@ -989,7 +989,7 @@ dim.lgb.Dataset <- function(x) {
 #' @export
 dimnames.lgb.Dataset <- function(x) {
 
-  if (!lgb.is.Dataset(x = x)) {
+  if (!.is_Dataset(x = x)) {
     stop("dimnames.lgb.Dataset: input data should be an lgb.Dataset object")
   }
 
@@ -1062,7 +1062,7 @@ slice <- function(dataset, idxset) {
 #' @export
 slice.lgb.Dataset <- function(dataset, idxset) {
 
-  if (!lgb.is.Dataset(x = dataset)) {
+  if (!.is_Dataset(x = dataset)) {
     stop("slice.lgb.Dataset: input dataset should be an lgb.Dataset object")
   }
 
@@ -1110,7 +1110,7 @@ get_field <- function(dataset, field_name) {
 get_field.lgb.Dataset <- function(dataset, field_name) {
 
   # Check if dataset is not a dataset
-  if (!lgb.is.Dataset(x = dataset)) {
+  if (!.is_Dataset(x = dataset)) {
     stop("get_field.lgb.Dataset(): input dataset should be an lgb.Dataset object")
   }
 
@@ -1158,7 +1158,7 @@ set_field <- function(dataset, field_name, data) {
 #' @export
 set_field.lgb.Dataset <- function(dataset, field_name, data) {
 
-  if (!lgb.is.Dataset(x = dataset)) {
+  if (!.is_Dataset(x = dataset)) {
     stop("set_field.lgb.Dataset: input dataset should be an lgb.Dataset object")
   }
 
@@ -1189,7 +1189,7 @@ set_field.lgb.Dataset <- function(dataset, field_name, data) {
 #' @export
 lgb.Dataset.set.categorical <- function(dataset, categorical_feature) {
 
-  if (!lgb.is.Dataset(x = dataset)) {
+  if (!.is_Dataset(x = dataset)) {
     stop("lgb.Dataset.set.categorical: input dataset should be an lgb.Dataset object")
   }
 
@@ -1222,7 +1222,7 @@ lgb.Dataset.set.categorical <- function(dataset, categorical_feature) {
 #' @export
 lgb.Dataset.set.reference <- function(dataset, reference) {
 
-  if (!lgb.is.Dataset(x = dataset)) {
+  if (!.is_Dataset(x = dataset)) {
     stop("lgb.Dataset.set.reference: input dataset should be an lgb.Dataset object")
   }
 
@@ -1248,7 +1248,7 @@ lgb.Dataset.set.reference <- function(dataset, reference) {
 #' @export
 lgb.Dataset.save <- function(dataset, fname) {
 
-  if (!lgb.is.Dataset(x = dataset)) {
+  if (!.is_Dataset(x = dataset)) {
     stop("lgb.Dataset.save: input dataset should be an lgb.Dataset object")
   }
 
diff --git a/R-package/R/lgb.Predictor.R b/R-package/R/lgb.Predictor.R
index 0b7b39e2d8c2..17e0cb8f746d 100644
--- a/R-package/R/lgb.Predictor.R
+++ b/R-package/R/lgb.Predictor.R
@@ -28,7 +28,7 @@ Predictor <- R6::R6Class(
 
     # Initialize will create a starter model
     initialize = function(modelfile, params = list(), fast_predict_config = list()) {
-      private$params <- lgb.params2str(params = params)
+      private$params <- .lgb_params2str(params = params)
       handle <- NULL
 
       if (is.character(modelfile)) {
@@ -46,7 +46,7 @@ Predictor <- R6::R6Class(
         handle <- modelfile
         private$need_free_handle <- FALSE
 
-      } else if (lgb.is.Booster(modelfile)) {
+      } else if (.is_Booster(modelfile)) {
 
         handle <- modelfile$get_handle()
         private$need_free_handle <- FALSE
@@ -512,7 +512,7 @@ Predictor <- R6::R6Class(
         return(FALSE)
       }
 
-      if (lgb.is.null.handle(private$fast_predict_config$handle)) {
+      if (.is_null_handle(private$fast_predict_config$handle)) {
         warning(paste0("Model had fast CSR predict configuration, but it is inactive."
                        , " Try re-generating it through 'lgb.configure_fast_predict'."))
         return(FALSE)
@@ -527,8 +527,8 @@ Predictor <- R6::R6Class(
         private$fast_predict_config$rawscore == rawscore &&
         private$fast_predict_config$predleaf == predleaf &&
         private$fast_predict_config$predcontrib == predcontrib &&
-        lgb.equal.or.both.null(private$fast_predict_config$start_iteration, start_iteration) &&
-        lgb.equal.or.both.null(private$fast_predict_config$num_iteration, num_iteration)
+        .equal_or_both_null(private$fast_predict_config$start_iteration, start_iteration) &&
+        .equal_or_both_null(private$fast_predict_config$num_iteration, num_iteration)
       )
     }
   )
diff --git a/R-package/R/lgb.cv.R b/R-package/R/lgb.cv.R
index f81026fe673f..70901a3ff2d2 100644
--- a/R-package/R/lgb.cv.R
+++ b/R-package/R/lgb.cv.R
@@ -99,7 +99,7 @@ lgb.cv <- function(params = list()
   }
 
   # If 'data' is not an lgb.Dataset, try to construct one using 'label'
-  if (!lgb.is.Dataset(x = data)) {
+  if (!.is_Dataset(x = data)) {
     if (is.null(label)) {
       stop("'label' must be provided for lgb.cv if 'data' is not an 'lgb.Dataset'")
     }
@@ -110,27 +110,27 @@ lgb.cv <- function(params = list()
   # in `params`.
   # this ensures that the model stored with Booster$save() correctly represents
   # what was passed in
-  params <- lgb.check.wrapper_param(
+  params <- .check_wrapper_param(
     main_param_name = "verbosity"
     , params = params
     , alternative_kwarg_value = verbose
   )
-  params <- lgb.check.wrapper_param(
+  params <- .check_wrapper_param(
     main_param_name = "num_iterations"
     , params = params
     , alternative_kwarg_value = nrounds
   )
-  params <- lgb.check.wrapper_param(
+  params <- .check_wrapper_param(
     main_param_name = "metric"
     , params = params
     , alternative_kwarg_value = NULL
   )
-  params <- lgb.check.wrapper_param(
+  params <- .check_wrapper_param(
     main_param_name = "objective"
     , params = params
     , alternative_kwarg_value = obj
   )
-  params <- lgb.check.wrapper_param(
+  params <- .check_wrapper_param(
     main_param_name = "early_stopping_round"
     , params = params
     , alternative_kwarg_value = early_stopping_rounds
@@ -148,7 +148,7 @@ lgb.cv <- function(params = list()
   # (for backwards compatibility). If it is a list of functions, store
   # all of them. This makes it possible to pass any mix of strings like "auc"
   # and custom functions to eval
-  params <- lgb.check.eval(params = params, eval = eval)
+  params <- .check_eval(params = params, eval = eval)
   eval_functions <- list(NULL)
   if (is.function(eval)) {
     eval_functions <- list(eval)
@@ -166,7 +166,7 @@ lgb.cv <- function(params = list()
   # Check for boosting from a trained model
   if (is.character(init_model)) {
     predictor <- Predictor$new(modelfile = init_model)
-  } else if (lgb.is.Booster(x = init_model)) {
+  } else if (.is_Booster(x = init_model)) {
     predictor <- init_model$to_predictor()
   }
 
@@ -193,7 +193,7 @@ lgb.cv <- function(params = list()
   } else if (!is.null(data$get_colnames())) {
     cnames <- data$get_colnames()
   }
-  params[["interaction_constraints"]] <- lgb.check_interaction_constraints(
+  params[["interaction_constraints"]] <- .check_interaction_constraints(
     interaction_constraints = interaction_constraints
     , column_names = cnames
   )
@@ -232,7 +232,7 @@ lgb.cv <- function(params = list()
     }
 
     # Create folds
-    folds <- generate.cv.folds(
+    folds <- .generate_cv_folds(
       nfold = nfold
       , nrows = nrow(data)
       , stratified = stratified
@@ -245,12 +245,12 @@ lgb.cv <- function(params = list()
 
   # Add printing log callback
   if (params[["verbosity"]] > 0L && eval_freq > 0L) {
-    callbacks <- add.cb(cb_list = callbacks, cb = cb_print_evaluation(period = eval_freq))
+    callbacks <- .add_cb(cb_list = callbacks, cb = cb_print_evaluation(period = eval_freq))
   }
 
   # Add evaluation log callback
   if (record) {
-    callbacks <- add.cb(cb_list = callbacks, cb = cb_record_evaluation())
+    callbacks <- .add_cb(cb_list = callbacks, cb = cb_record_evaluation())
   }
 
   # Did user pass parameters that indicate they want to use early stopping?
@@ -282,7 +282,7 @@ lgb.cv <- function(params = list()
 
   # If user supplied early_stopping_rounds, add the early stopping callback
   if (using_early_stopping) {
-    callbacks <- add.cb(
+    callbacks <- .add_cb(
       cb_list = callbacks
       , cb = cb_early_stop(
         stopping_rounds = early_stopping_rounds
@@ -292,7 +292,7 @@ lgb.cv <- function(params = list()
     )
   }
 
-  cb <- categorize.callbacks(cb_list = callbacks)
+  cb <- .categorize_callbacks(cb_list = callbacks)
 
   # Construct booster for each fold. The data.table() code below is used to
   # guarantee that indices are sorted while keeping init_score and weight together
@@ -387,7 +387,7 @@ lgb.cv <- function(params = list()
     })
 
     # Prepare collection of evaluation results
-    merged_msg <- lgb.merge.cv.result(
+    merged_msg <- .lgb_merge_cv_result(
       msg = msg
       , showsd = showsd
     )
@@ -463,7 +463,7 @@ lgb.cv <- function(params = list()
 }
 
 # Generates random (stratified if needed) CV folds
-generate.cv.folds <- function(nfold, nrows, stratified, label, group, params) {
+.generate_cv_folds <- function(nfold, nrows, stratified, label, group, params) {
 
   # Check for group existence
   if (is.null(group)) {
@@ -476,7 +476,7 @@ generate.cv.folds <- function(nfold, nrows, stratified, label, group, params) {
 
       y <- label[rnd_idx]
       y <- as.factor(y)
-      folds <- lgb.stratified.folds(y = y, k = nfold)
+      folds <- .lgb_stratified_folds(y = y, k = nfold)
 
     } else {
 
@@ -528,7 +528,7 @@ generate.cv.folds <- function(nfold, nrows, stratified, label, group, params) {
 # It was borrowed from caret::createFolds and simplified
 # by always returning an unnamed list of fold indices.
 #' @importFrom stats quantile
-lgb.stratified.folds <- function(y, k) {
+.lgb_stratified_folds <- function(y, k) {
 
   # Group the numeric data based on their magnitudes
   # and sample within those groups.
@@ -594,7 +594,7 @@ lgb.stratified.folds <- function(y, k) {
   return(out)
 }
 
-lgb.merge.cv.result <- function(msg, showsd) {
+.lgb_merge_cv_result <- function(msg, showsd) {
 
   if (length(msg) == 0L) {
     stop("lgb.cv: size of cv result error")
diff --git a/R-package/R/lgb.drop_serialized.R b/R-package/R/lgb.drop_serialized.R
index bcc2480e8ccc..e53f2cafac11 100644
--- a/R-package/R/lgb.drop_serialized.R
+++ b/R-package/R/lgb.drop_serialized.R
@@ -13,7 +13,7 @@
 #' @seealso \link{lgb.restore_handle}, \link{lgb.make_serializable}.
 #' @export
 lgb.drop_serialized <- function(model) {
-  if (!lgb.is.Booster(x = model)) {
+  if (!.is_Booster(x = model)) {
     stop("lgb.drop_serialized: model should be an ", sQuote("lgb.Booster"))
   }
   model$drop_raw()
diff --git a/R-package/R/lgb.importance.R b/R-package/R/lgb.importance.R
index 5a58770553f9..27efb17392df 100644
--- a/R-package/R/lgb.importance.R
+++ b/R-package/R/lgb.importance.R
@@ -39,7 +39,7 @@
 #' @export
 lgb.importance <- function(model, percentage = TRUE) {
 
-  if (!lgb.is.Booster(x = model)) {
+  if (!.is_Booster(x = model)) {
     stop("'model' has to be an object of class lgb.Booster")
   }
 
diff --git a/R-package/R/lgb.interprete.R b/R-package/R/lgb.interprete.R
index 7de772664d8b..976315262792 100644
--- a/R-package/R/lgb.interprete.R
+++ b/R-package/R/lgb.interprete.R
@@ -86,7 +86,7 @@ lgb.interprete <- function(model,
   )
 
   for (i in seq_along(idxset)) {
-    tree_interpretation_dt_list[[i]] <- single.row.interprete(
+    tree_interpretation_dt_list[[i]] <- .single_row_interprete(
       tree_dt = tree_dt
       , num_class = num_class
       , tree_index_mat = tree_index_mat_list[[i]]
@@ -151,7 +151,7 @@ single.tree.interprete <- function(tree_dt,
 }
 
 #' @importFrom data.table := rbindlist setorder
-multiple.tree.interprete <- function(tree_dt,
+.multiple_tree_interprete <- function(tree_dt,
                                      tree_index,
                                      leaf_index) {
 
@@ -186,7 +186,7 @@ multiple.tree.interprete <- function(tree_dt,
 }
 
 #' @importFrom data.table set setnames
-single.row.interprete <- function(tree_dt, num_class, tree_index_mat, leaf_index_mat) {
+.single_row_interprete <- function(tree_dt, num_class, tree_index_mat, leaf_index_mat) {
 
   # Prepare vector list
   tree_interpretation <- vector(mode = "list", length = num_class)
@@ -194,7 +194,7 @@ single.row.interprete <- function(tree_dt, num_class, tree_index_mat, leaf_index
   # Loop throughout each class
   for (i in seq_len(num_class)) {
 
-    next_interp_dt <- multiple.tree.interprete(
+    next_interp_dt <- .multiple_tree_interprete(
       tree_dt = tree_dt
       , tree_index = tree_index_mat[, i]
       , leaf_index = leaf_index_mat[, i]
diff --git a/R-package/R/lgb.make_serializable.R b/R-package/R/lgb.make_serializable.R
index 58bdd194df4d..5a639aacb2b5 100644
--- a/R-package/R/lgb.make_serializable.R
+++ b/R-package/R/lgb.make_serializable.R
@@ -13,7 +13,7 @@
 #' @seealso \link{lgb.restore_handle}, \link{lgb.drop_serialized}.
 #' @export
 lgb.make_serializable <- function(model) {
-  if (!lgb.is.Booster(x = model)) {
+  if (!.is_Booster(x = model)) {
     stop("lgb.make_serializable: model should be an ", sQuote("lgb.Booster"))
   }
   model$save_raw()
diff --git a/R-package/R/lgb.model.dt.tree.R b/R-package/R/lgb.model.dt.tree.R
index 8b0d8d81e2e8..5d994accfa7f 100644
--- a/R-package/R/lgb.model.dt.tree.R
+++ b/R-package/R/lgb.model.dt.tree.R
@@ -62,7 +62,10 @@ lgb.model.dt.tree <- function(model, num_iteration = NULL) {
   )
 
   # Parse tree model
-  tree_list <- lapply(parsed_json_model$tree_info, single.tree.parse)
+  tree_list <- lapply(
+    X = parsed_json_model$tree_info
+    , FUN = .single_tree_parse
+  )
 
   # Combine into single data.table
   tree_dt <- data.table::rbindlist(l = tree_list, use.names = TRUE)
@@ -84,7 +87,7 @@ lgb.model.dt.tree <- function(model, num_iteration = NULL) {
 
 
 #' @importFrom data.table := data.table rbindlist
-single.tree.parse <- function(lgb_tree) {
+.single_tree_parse <- function(lgb_tree) {
 
   # Traverse tree function
   pre_order_traversal <- function(env = NULL, tree_node_leaf, current_depth = 0L, parent_index = NA_integer_) {
diff --git a/R-package/R/lgb.plot.interpretation.R b/R-package/R/lgb.plot.interpretation.R
index a88f14bf83f0..8b95371eb3c2 100644
--- a/R-package/R/lgb.plot.interpretation.R
+++ b/R-package/R/lgb.plot.interpretation.R
@@ -89,7 +89,7 @@ lgb.plot.interpretation <- function(tree_interpretation_dt,
   if (num_class == 1L) {
 
     # Only one class, plot straight away
-    multiple.tree.plot.interpretation(
+    .multiple_tree_plot_interpretation(
       tree_interpretation = tree_interpretation_dt
       , top_n = top_n
       , title = NULL
@@ -118,7 +118,7 @@ lgb.plot.interpretation <- function(tree_interpretation_dt,
         , old = names(plot_dt)
         , new = c("Feature", "Contribution")
       )
-      multiple.tree.plot.interpretation(
+      .multiple_tree_plot_interpretation(
         tree_interpretation = plot_dt
         , top_n = top_n
         , title = paste("Class", i - 1L)
@@ -131,7 +131,7 @@ lgb.plot.interpretation <- function(tree_interpretation_dt,
 }
 
 #' @importFrom graphics barplot
-multiple.tree.plot.interpretation <- function(tree_interpretation,
+.multiple_tree_plot_interpretation <- function(tree_interpretation,
                                               top_n,
                                               title,
                                               cex) {
diff --git a/R-package/R/lgb.restore_handle.R b/R-package/R/lgb.restore_handle.R
index 4de93d46c96a..0ed25ef26f3d 100644
--- a/R-package/R/lgb.restore_handle.R
+++ b/R-package/R/lgb.restore_handle.R
@@ -35,7 +35,7 @@
 #' model_new$check_null_handle()
 #' @export
 lgb.restore_handle <- function(model) {
-  if (!lgb.is.Booster(x = model)) {
+  if (!.is_Booster(x = model)) {
     stop("lgb.restore_handle: model should be an ", sQuote("lgb.Booster"))
   }
   model$restore_handle()
diff --git a/R-package/R/lgb.train.R b/R-package/R/lgb.train.R
index 20916c9844b5..6979558d22cd 100644
--- a/R-package/R/lgb.train.R
+++ b/R-package/R/lgb.train.R
@@ -63,11 +63,11 @@ lgb.train <- function(params = list(),
   if (nrounds <= 0L) {
     stop("nrounds should be greater than zero")
   }
-  if (!lgb.is.Dataset(x = data)) {
+  if (!.is_Dataset(x = data)) {
     stop("lgb.train: data must be an lgb.Dataset instance")
   }
   if (length(valids) > 0L) {
-    if (!identical(class(valids), "list") || !all(vapply(valids, lgb.is.Dataset, logical(1L)))) {
+    if (!identical(class(valids), "list") || !all(vapply(valids, .is_Dataset, logical(1L)))) {
       stop("lgb.train: valids must be a list of lgb.Dataset elements")
     }
     evnames <- names(valids)
@@ -80,27 +80,27 @@ lgb.train <- function(params = list(),
   # in `params`.
   # this ensures that the model stored with Booster$save() correctly represents
   # what was passed in
-  params <- lgb.check.wrapper_param(
+  params <- .check_wrapper_param(
     main_param_name = "verbosity"
     , params = params
     , alternative_kwarg_value = verbose
   )
-  params <- lgb.check.wrapper_param(
+  params <- .check_wrapper_param(
     main_param_name = "num_iterations"
     , params = params
     , alternative_kwarg_value = nrounds
   )
-  params <- lgb.check.wrapper_param(
+  params <- .check_wrapper_param(
     main_param_name = "metric"
     , params = params
     , alternative_kwarg_value = NULL
   )
-  params <- lgb.check.wrapper_param(
+  params <- .check_wrapper_param(
     main_param_name = "objective"
     , params = params
     , alternative_kwarg_value = obj
   )
-  params <- lgb.check.wrapper_param(
+  params <- .check_wrapper_param(
     main_param_name = "early_stopping_round"
     , params = params
     , alternative_kwarg_value = early_stopping_rounds
@@ -118,7 +118,7 @@ lgb.train <- function(params = list(),
   # (for backwards compatibility). If it is a list of functions, store
   # all of them. This makes it possible to pass any mix of strings like "auc"
   # and custom functions to eval
-  params <- lgb.check.eval(params = params, eval = eval)
+  params <- .check_eval(params = params, eval = eval)
   eval_functions <- list(NULL)
   if (is.function(eval)) {
     eval_functions <- list(eval)
@@ -136,7 +136,7 @@ lgb.train <- function(params = list(),
   # Check for boosting from a trained model
   if (is.character(init_model)) {
     predictor <- Predictor$new(modelfile = init_model)
-  } else if (lgb.is.Booster(x = init_model)) {
+  } else if (.is_Booster(x = init_model)) {
     predictor <- init_model$to_predictor()
   }
 
@@ -166,7 +166,7 @@ lgb.train <- function(params = list(),
   } else if (!is.null(data$get_colnames())) {
     cnames <- data$get_colnames()
   }
-  params[["interaction_constraints"]] <- lgb.check_interaction_constraints(
+  params[["interaction_constraints"]] <- .check_interaction_constraints(
     interaction_constraints = interaction_constraints
     , column_names = cnames
   )
@@ -212,12 +212,18 @@ lgb.train <- function(params = list(),
 
   # Add printing log callback
   if (params[["verbosity"]] > 0L && eval_freq > 0L) {
-    callbacks <- add.cb(cb_list = callbacks, cb = cb_print_evaluation(period = eval_freq))
+    callbacks <- .add_cb(
+        cb_list = callbacks
+        , cb = cb_print_evaluation(period = eval_freq)
+    )
   }
 
   # Add evaluation log callback
   if (record && length(valids) > 0L) {
-    callbacks <- add.cb(cb_list = callbacks, cb = cb_record_evaluation())
+    callbacks <- .add_cb(
+        cb_list = callbacks
+        , cb = cb_record_evaluation()
+    )
   }
 
   # Did user pass parameters that indicate they want to use early stopping?
@@ -249,7 +255,7 @@ lgb.train <- function(params = list(),
 
   # If user supplied early_stopping_rounds, add the early stopping callback
   if (using_early_stopping) {
-    callbacks <- add.cb(
+    callbacks <- .add_cb(
       cb_list = callbacks
       , cb = cb_early_stop(
         stopping_rounds = early_stopping_rounds
@@ -259,7 +265,7 @@ lgb.train <- function(params = list(),
     )
   }
 
-  cb <- categorize.callbacks(cb_list = callbacks)
+  cb <- .categorize_callbacks(cb_list = callbacks)
 
   # Construct booster with datasets
   booster <- Booster$new(params = params, train_set = data)
diff --git a/R-package/R/lightgbm.R b/R-package/R/lightgbm.R
index 711b3ef0dc38..e5df7a93fc97 100644
--- a/R-package/R/lightgbm.R
+++ b/R-package/R/lightgbm.R
@@ -184,21 +184,21 @@ lightgbm <- function(data,
   }
 
   if (is.null(num_threads)) {
-    num_threads <- lgb.get.default.num.threads()
+    num_threads <- .get_default_num_threads()
   }
-  params <- lgb.check.wrapper_param(
+  params <- .check_wrapper_param(
     main_param_name = "num_threads"
     , params = params
     , alternative_kwarg_value = num_threads
   )
-  params <- lgb.check.wrapper_param(
+  params <- .check_wrapper_param(
     main_param_name = "verbosity"
     , params = params
     , alternative_kwarg_value = verbose
   )
 
   # Process factors as labels and auto-determine objective
-  if (!lgb.is.Dataset(data)) {
+  if (!.is_Dataset(data)) {
     data_processor <- DataProcessor$new()
     temp <- data_processor$process_label(
         label = label
@@ -220,7 +220,7 @@ lightgbm <- function(data,
   dtrain <- data
 
   # Check whether data is lgb.Dataset, if not then create lgb.Dataset manually
-  if (!lgb.is.Dataset(x = dtrain)) {
+  if (!.is_Dataset(x = dtrain)) {
     dtrain <- lgb.Dataset(data = data, label = label, weight = weights, init_score = init_score)
   }
 
diff --git a/R-package/R/saveRDS.lgb.Booster.R b/R-package/R/saveRDS.lgb.Booster.R
index 5d3af097301f..d75056e69734 100644
--- a/R-package/R/saveRDS.lgb.Booster.R
+++ b/R-package/R/saveRDS.lgb.Booster.R
@@ -57,7 +57,7 @@ saveRDS.lgb.Booster <- function(object,
 
   warning("'saveRDS.lgb.Booster' is deprecated and will be removed in a future release. Use saveRDS() instead.")
 
-  if (!lgb.is.Booster(x = object)) {
+  if (!.is_Booster(x = object)) {
     stop("saveRDS.lgb.Booster: object should be an ", sQuote("lgb.Booster"))
   }
 
diff --git a/R-package/R/utils.R b/R-package/R/utils.R
index c9ba780316df..6fc59cfcbc4a 100644
--- a/R-package/R/utils.R
+++ b/R-package/R/utils.R
@@ -1,16 +1,16 @@
-lgb.is.Booster <- function(x) {
+.is_Booster <- function(x) {
   return(all(c("R6", "lgb.Booster") %in% class(x)))  # nolint: class_equals
 }
 
-lgb.is.Dataset <- function(x) {
+.is_Dataset <- function(x) {
   return(all(c("R6", "lgb.Dataset") %in% class(x)))  # nolint: class_equals
 }
 
-lgb.is.Predictor <- function(x) {
+.is_Predictor <- function(x) {
   return(all(c("R6", "lgb.Predictor") %in% class(x)))  # nolint: class_equals
 }
 
-lgb.is.null.handle <- function(x) {
+.is_null_handle <- function(x) {
   if (is.null(x)) {
     return(TRUE)
   }
@@ -19,7 +19,7 @@ lgb.is.null.handle <- function(x) {
   )
 }
 
-lgb.params2str <- function(params) {
+.lgb_params2str <- function(params) {
 
   if (!identical(class(params), "list")) {
     stop("params must be a list")
@@ -59,7 +59,7 @@ lgb.params2str <- function(params) {
 
 }
 
-lgb.check_interaction_constraints <- function(interaction_constraints, column_names) {
+.check_interaction_constraints <- function(interaction_constraints, column_names) {
 
   # Convert interaction constraints to feature numbers
   string_constraints <- list()
@@ -129,7 +129,7 @@ lgb.check_interaction_constraints <- function(interaction_constraints, column_na
 #     This has to account for the fact that `eval` could be a character vector,
 #     a function, a list of functions, or a list with a mix of strings and
 #     functions
-lgb.check.eval <- function(params, eval) {
+.check_eval <- function(params, eval) {
 
   if (is.null(params$metric)) {
     params$metric <- list()
@@ -194,7 +194,7 @@ lgb.check.eval <- function(params, eval) {
 # [return]
 #     params with num_iterations set to the chosen value, and other aliases
 #     of num_iterations removed
-lgb.check.wrapper_param <- function(main_param_name, params, alternative_kwarg_value) {
+.check_wrapper_param <- function(main_param_name, params, alternative_kwarg_value) {
 
   aliases <- .PARAMETER_ALIASES()[[main_param_name]]
   aliases_provided <- aliases[aliases %in% names(params)]
@@ -225,7 +225,7 @@ lgb.check.wrapper_param <- function(main_param_name, params, alternative_kwarg_v
 }
 
 #' @importFrom parallel detectCores
-lgb.get.default.num.threads <- function() {
+.get_default_num_threads <- function() {
   if (requireNamespace("RhpcBLASctl", quietly = TRUE)) {  # nolint: undesirable_function
     return(RhpcBLASctl::get_num_cores())
   } else {
@@ -247,7 +247,7 @@ lgb.get.default.num.threads <- function() {
   }
 }
 
-lgb.equal.or.both.null <- function(a, b) {
+.equal_or_both_null <- function(a, b) {
   if (is.null(a)) {
     if (!is.null(b)) {
       return(FALSE)
diff --git a/R-package/tests/testthat/test_Predictor.R b/R-package/tests/testthat/test_Predictor.R
index 90be1d08cf67..192171c915bf 100644
--- a/R-package/tests/testthat/test_Predictor.R
+++ b/R-package/tests/testthat/test_Predictor.R
@@ -17,16 +17,16 @@ test_that("Predictor$finalize() should not fail", {
     bst$save_model(filename = model_file)
     predictor <- Predictor$new(modelfile = model_file)
 
-    expect_true(lgb.is.Predictor(predictor))
+    expect_true(.is_Predictor(predictor))
 
-    expect_false(lgb.is.null.handle(predictor$.__enclos_env__$private$handle))
+    expect_false(.is_null_handle(predictor$.__enclos_env__$private$handle))
 
     predictor$finalize()
-    expect_true(lgb.is.null.handle(predictor$.__enclos_env__$private$handle))
+    expect_true(.is_null_handle(predictor$.__enclos_env__$private$handle))
 
     # calling finalize() a second time shouldn't cause any issues
     predictor$finalize()
-    expect_true(lgb.is.null.handle(predictor$.__enclos_env__$private$handle))
+    expect_true(.is_null_handle(predictor$.__enclos_env__$private$handle))
 })
 
 test_that("predictions do not fail for integer input", {
@@ -79,7 +79,7 @@ test_that("start_iteration works correctly", {
         , valids = list("test" = dtest)
         , early_stopping_rounds = 2L
     )
-    expect_true(lgb.is.Booster(bst))
+    expect_true(.is_Booster(bst))
     pred1 <- predict(bst, newdata = test$data, type = "raw")
     pred_contrib1 <- predict(bst, test$data, type = "contrib")
     pred2 <- rep(0.0, length(pred1))
diff --git a/R-package/tests/testthat/test_basic.R b/R-package/tests/testthat/test_basic.R
index 9b84017476a7..75abd26dd152 100644
--- a/R-package/tests/testthat/test_basic.R
+++ b/R-package/tests/testthat/test_basic.R
@@ -1094,7 +1094,7 @@ test_that("lgb.train() works as expected with sparse features", {
     , nrounds = nrounds
   )
 
-  expect_true(lgb.is.Booster(bst))
+  expect_true(.is_Booster(bst))
   expect_equal(bst$current_iter(), nrounds)
   parsed_model <- jsonlite::fromJSON(bst$dump_model())
   expect_equal(parsed_model$objective, "binary sigmoid:1")
@@ -1816,7 +1816,7 @@ test_that("lgb.train() supports non-ASCII feature names", {
     )
     , colnames = feature_names
   )
-  expect_true(lgb.is.Booster(bst))
+  expect_true(.is_Booster(bst))
   dumped_model <- jsonlite::fromJSON(bst$dump_model())
 
   # UTF-8 strings are not well-supported on Windows
@@ -2522,7 +2522,7 @@ test_that("lgb.train() fit on linearly-relatead data improves when using linear
     , params = params
     , valids = list("train" = dtrain)
   )
-  expect_true(lgb.is.Booster(bst))
+  expect_true(.is_Booster(bst))
 
   dtrain <- .new_dataset()
   bst_linear <- lgb.train(
@@ -2531,7 +2531,7 @@ test_that("lgb.train() fit on linearly-relatead data improves when using linear
     , params = utils::modifyList(params, list(linear_tree = TRUE))
     , valids = list("train" = dtrain)
   )
-  expect_true(lgb.is.Booster(bst_linear))
+  expect_true(.is_Booster(bst_linear))
 
   bst_last_mse <- bst$record_evals[["train"]][["l2"]][["eval"]][[10L]]
   bst_lin_last_mse <- bst_linear$record_evals[["train"]][["l2"]][["eval"]][[10L]]
@@ -2599,7 +2599,7 @@ test_that("lgb.train() works with linear learners even if Dataset has missing va
     , params = params
     , valids = list("train" = dtrain)
   )
-  expect_true(lgb.is.Booster(bst))
+  expect_true(.is_Booster(bst))
 
   dtrain <- .new_dataset()
   bst_linear <- lgb.train(
@@ -2608,7 +2608,7 @@ test_that("lgb.train() works with linear learners even if Dataset has missing va
     , params = utils::modifyList(params, list(linear_tree = TRUE))
     , valids = list("train" = dtrain)
   )
-  expect_true(lgb.is.Booster(bst_linear))
+  expect_true(.is_Booster(bst_linear))
 
   bst_last_mse <- bst$record_evals[["train"]][["l2"]][["eval"]][[10L]]
   bst_lin_last_mse <- bst_linear$record_evals[["train"]][["l2"]][["eval"]][[10L]]
@@ -2649,7 +2649,7 @@ test_that("lgb.train() works with linear learners, bagging, and a Dataset that h
     , params = params
     , valids = list("train" = dtrain)
   )
-  expect_true(lgb.is.Booster(bst))
+  expect_true(.is_Booster(bst))
 
   dtrain <- .new_dataset()
   bst_linear <- lgb.train(
@@ -2658,7 +2658,7 @@ test_that("lgb.train() works with linear learners, bagging, and a Dataset that h
     , params = utils::modifyList(params, list(linear_tree = TRUE))
     , valids = list("train" = dtrain)
   )
-  expect_true(lgb.is.Booster(bst_linear))
+  expect_true(.is_Booster(bst_linear))
 
   bst_last_mse <- bst$record_evals[["train"]][["l2"]][["eval"]][[10L]]
   bst_lin_last_mse <- bst_linear$record_evals[["train"]][["l2"]][["eval"]][[10L]]
@@ -2699,7 +2699,7 @@ test_that("lgb.train() works with linear learners and data where a feature has o
     , nrounds = 10L
     , params = utils::modifyList(params, list(linear_tree = TRUE))
   )
-  expect_true(lgb.is.Booster(bst_linear))
+  expect_true(.is_Booster(bst_linear))
 })
 
 test_that("lgb.train() works with linear learners when Dataset has categorical features", {
@@ -2732,7 +2732,7 @@ test_that("lgb.train() works with linear learners when Dataset has categorical f
     , params = params
     , valids = list("train" = dtrain)
   )
-  expect_true(lgb.is.Booster(bst))
+  expect_true(.is_Booster(bst))
 
   dtrain <- .new_dataset()
   bst_linear <- lgb.train(
@@ -2741,7 +2741,7 @@ test_that("lgb.train() works with linear learners when Dataset has categorical f
     , params = utils::modifyList(params, list(linear_tree = TRUE))
     , valids = list("train" = dtrain)
   )
-  expect_true(lgb.is.Booster(bst_linear))
+  expect_true(.is_Booster(bst_linear))
 
   bst_last_mse <- bst$record_evals[["train"]][["l2"]][["eval"]][[10L]]
   bst_lin_last_mse <- bst_linear$record_evals[["train"]][["l2"]][["eval"]][[10L]]
diff --git a/R-package/tests/testthat/test_dataset.R b/R-package/tests/testthat/test_dataset.R
index 401d1babf5e9..587d5154a581 100644
--- a/R-package/tests/testthat/test_dataset.R
+++ b/R-package/tests/testthat/test_dataset.R
@@ -206,7 +206,7 @@ test_that("lgb.Dataset: Dataset should be able to construct from matrix and retu
     , rawData
     , nrow(rawData)
     , ncol(rawData)
-    , lightgbm:::lgb.params2str(params = list())
+    , lightgbm:::.lgb_params2str(params = list())
     , ref_handle
   )
   expect_true(methods::is(handle, "externalptr"))
@@ -322,7 +322,7 @@ test_that("Dataset$update_parameters() does nothing for empty inputs", {
   res <- ds$update_params(
     params = list()
   )
-  expect_true(lgb.is.Dataset(res))
+  expect_true(.is_Dataset(res))
 
   new_params <- ds$get_params()
   expect_identical(new_params, initial_params)
@@ -343,7 +343,7 @@ test_that("Dataset$update_params() works correctly for recognized Dataset parame
   res <- ds$update_params(
     params = new_params
   )
-  expect_true(lgb.is.Dataset(res))
+  expect_true(.is_Dataset(res))
 
   updated_params <- ds$get_params()
   for (param_name in names(new_params)) {
@@ -356,17 +356,17 @@ test_that("Dataset$finalize() should not fail on an already-finalized Dataset",
     data = test_data
     , label = test_label
   )
-  expect_true(lgb.is.null.handle(dtest$.__enclos_env__$private$handle))
+  expect_true(.is_null_handle(dtest$.__enclos_env__$private$handle))
 
   dtest$construct()
-  expect_false(lgb.is.null.handle(dtest$.__enclos_env__$private$handle))
+  expect_false(.is_null_handle(dtest$.__enclos_env__$private$handle))
 
   dtest$finalize()
-  expect_true(lgb.is.null.handle(dtest$.__enclos_env__$private$handle))
+  expect_true(.is_null_handle(dtest$.__enclos_env__$private$handle))
 
   # calling finalize() a second time shouldn't cause any issues
   dtest$finalize()
-  expect_true(lgb.is.null.handle(dtest$.__enclos_env__$private$handle))
+  expect_true(.is_null_handle(dtest$.__enclos_env__$private$handle))
 })
 
 test_that("lgb.Dataset: should be able to run lgb.train() immediately after using lgb.Dataset() on a file", {
@@ -401,7 +401,7 @@ test_that("lgb.Dataset: should be able to run lgb.train() immediately after usin
     , data = dtest_read_in
   )
 
-  expect_true(lgb.is.Booster(x = bst))
+  expect_true(.is_Booster(x = bst))
 })
 
 test_that("lgb.Dataset: should be able to run lgb.cv() immediately after using lgb.Dataset() on a file", {
diff --git a/R-package/tests/testthat/test_learning_to_rank.R b/R-package/tests/testthat/test_learning_to_rank.R
index b4ebe7bd67c3..e99aff44ceb3 100644
--- a/R-package/tests/testthat/test_learning_to_rank.R
+++ b/R-package/tests/testthat/test_learning_to_rank.R
@@ -25,7 +25,7 @@ test_that("learning-to-rank with lgb.train() works as expected", {
         , data = dtrain
         , nrounds = 10L
     )
-    expect_true(lgb.is.Booster(model))
+    expect_true(.is_Booster(model))
 
     dumped_model <- jsonlite::fromJSON(
         model$dump_model()
diff --git a/R-package/tests/testthat/test_lgb.Booster.R b/R-package/tests/testthat/test_lgb.Booster.R
index 7ebb236cd923..c1fc02630c13 100644
--- a/R-package/tests/testthat/test_lgb.Booster.R
+++ b/R-package/tests/testthat/test_lgb.Booster.R
@@ -11,16 +11,16 @@ test_that("Booster$finalize() should not fail", {
         , verbose = .LGB_VERBOSITY
         , nrounds = 3L
     )
-    expect_true(lgb.is.Booster(bst))
+    expect_true(.is_Booster(bst))
 
-    expect_false(lgb.is.null.handle(bst$.__enclos_env__$private$handle))
+    expect_false(.is_null_handle(bst$.__enclos_env__$private$handle))
 
     bst$finalize()
-    expect_true(lgb.is.null.handle(bst$.__enclos_env__$private$handle))
+    expect_true(.is_null_handle(bst$.__enclos_env__$private$handle))
 
     # calling finalize() a second time shouldn't cause any issues
     bst$finalize()
-    expect_true(lgb.is.null.handle(bst$.__enclos_env__$private$handle))
+    expect_true(.is_null_handle(bst$.__enclos_env__$private$handle))
 })
 
 test_that("lgb.get.eval.result() should throw an informative error if booster is not an lgb.Booster", {
@@ -188,7 +188,7 @@ test_that("Loading a Booster from a text file works", {
         , params = params
         , nrounds = 2L
     )
-    expect_true(lgb.is.Booster(bst))
+    expect_true(.is_Booster(bst))
 
     pred <- predict(bst, test$data)
     model_file <- tempfile(fileext = ".model")
@@ -232,7 +232,7 @@ test_that("boosters with linear models at leaves can be written to text file and
         , params = params
         , verbose = .LGB_VERBOSITY
     )
-    expect_true(lgb.is.Booster(bst))
+    expect_true(.is_Booster(bst))
 
     # save predictions, then write the model to a file and destroy it in R
     preds <- predict(bst, X)
@@ -269,7 +269,7 @@ test_that("Loading a Booster from a string works", {
         )
         , nrounds = 2L
     )
-    expect_true(lgb.is.Booster(bst))
+    expect_true(.is_Booster(bst))
 
     pred <- predict(bst, test$data)
     model_string <- bst$save_model_to_string()
@@ -376,7 +376,7 @@ test_that("If a string and a file are both passed to lgb.load() the file is used
         )
         , nrounds = 2L
     )
-    expect_true(lgb.is.Booster(bst))
+    expect_true(.is_Booster(bst))
 
     pred <- predict(bst, test$data)
     model_file <- tempfile(fileext = ".model")
@@ -411,7 +411,7 @@ test_that("Creating a Booster from a Dataset should work", {
         ),
         train_set = dtrain
     )
-    expect_true(lgb.is.Booster(bst))
+    expect_true(.is_Booster(bst))
     expect_equal(bst$current_iter(), 0L)
     expect_true(is.na(bst$best_score))
     expect_true(all(bst$predict(agaricus.train$data) == 0.5))
@@ -446,10 +446,10 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w
             , num_threads = .LGB_MAX_THREADS
         )
     )
-    expect_true(lgb.is.Booster(bst))
+    expect_true(.is_Booster(bst))
     expect_equal(bst$current_iter(), nrounds)
     expect_equal(bst$eval_train()[[1L]][["value"]], 0.1115352)
-    expect_true(lgb.is.Booster(bst_from_ds))
+    expect_true(.is_Booster(bst_from_ds))
     expect_equal(bst_from_ds$current_iter(), nrounds)
     expect_equal(bst_from_ds$eval_train()[[1L]][["value"]], 5.65704892)
     dumped_model <- jsonlite::fromJSON(bst$dump_model())
@@ -531,7 +531,7 @@ test_that("Booster$rollback_one_iter() should work as expected", {
         , nrounds = nrounds
     )
     expect_equal(bst$current_iter(), nrounds)
-    expect_true(lgb.is.Booster(bst))
+    expect_true(.is_Booster(bst))
     logloss <- bst$eval_train()[[1L]][["value"]]
     expect_equal(logloss, 0.01904786)
 
@@ -539,7 +539,7 @@ test_that("Booster$rollback_one_iter() should work as expected", {
 
     # rollback_one_iter() should return a booster and modify the original
     # booster in place
-    expect_true(lgb.is.Booster(x))
+    expect_true(.is_Booster(x))
     expect_equal(bst$current_iter(), nrounds - 1L)
 
     # score should now come from the model as of 4 iterations
@@ -565,7 +565,7 @@ test_that("Booster$update() passing a train_set works as expected", {
         )
         , nrounds = nrounds
     )
-    expect_true(lgb.is.Booster(bst))
+    expect_true(.is_Booster(bst))
     expect_equal(bst$current_iter(), nrounds)
     bst$update(
         train_set = Dataset$new(
@@ -574,7 +574,7 @@ test_that("Booster$update() passing a train_set works as expected", {
             , params = list(verbose = .LGB_VERBOSITY)
         )
     )
-    expect_true(lgb.is.Booster(bst))
+    expect_true(.is_Booster(bst))
     expect_equal(bst$current_iter(), nrounds + 1L)
 
     # train with 3 rounds directly
@@ -590,7 +590,7 @@ test_that("Booster$update() passing a train_set works as expected", {
         )
         , nrounds = nrounds +  1L
     )
-    expect_true(lgb.is.Booster(bst2))
+    expect_true(.is_Booster(bst2))
     expect_equal(bst2$current_iter(), nrounds +  1L)
 
     # model with 2 rounds + 1 update should be identical to 3 rounds
@@ -716,7 +716,7 @@ test_that("Saving a model with different feature importance types works", {
         )
         , nrounds = 2L
     )
-    expect_true(lgb.is.Booster(bst))
+    expect_true(.is_Booster(bst))
 
     .feat_importance_from_string <- function(model_string) {
         file_lines <- strsplit(model_string, "\n", fixed = TRUE)[[1L]]
@@ -772,7 +772,7 @@ test_that("Saving a model with unknown importance type fails", {
         )
         , nrounds = 2L
     )
-    expect_true(lgb.is.Booster(bst))
+    expect_true(.is_Booster(bst))
 
     UNSUPPORTED_IMPORTANCE <- 2L
     expect_error({
@@ -1372,7 +1372,7 @@ test_that("boosters with linear models at leaves work with saveRDS.lgb.Booster a
         , nrounds = 10L
         , params = params
     )
-    expect_true(lgb.is.Booster(bst))
+    expect_true(.is_Booster(bst))
 
     # save predictions, then write the model to a file and destroy it in R
     preds <- predict(bst, X)
@@ -1412,7 +1412,7 @@ test_that("boosters with linear models at leaves can be written to RDS and re-lo
         , nrounds = 10L
         , params = params
     )
-    expect_true(lgb.is.Booster(bst))
+    expect_true(.is_Booster(bst))
 
     # save predictions, then write the model to a file and destroy it in R
     preds <- predict(bst, X)
diff --git a/R-package/tests/testthat/test_utils.R b/R-package/tests/testthat/test_utils.R
index 4ab05e075ae3..1761f42c426c 100644
--- a/R-package/tests/testthat/test_utils.R
+++ b/R-package/tests/testthat/test_utils.R
@@ -1,12 +1,12 @@
-test_that("lgb.params2str() works as expected for empty lists", {
-    out_str <- lgb.params2str(
+test_that(".lgb_params2str() works as expected for empty lists", {
+    out_str <- .lgb_params2str(
         params = list()
     )
     expect_identical(class(out_str), "character")
     expect_equal(out_str, "")
 })
 
-test_that("lgb.params2str() works as expected for a key in params with multiple different-length elements", {
+test_that(".lgb_params2str() works as expected for a key in params with multiple different-length elements", {
     metrics <- c("a", "ab", "abc", "abcdefg")
     params <- list(
         objective = "magic"
@@ -14,7 +14,7 @@ test_that("lgb.params2str() works as expected for a key in params with multiple
         , nrounds = 10L
         , learning_rate = 0.0000001
     )
-    out_str <- lgb.params2str(
+    out_str <- .lgb_params2str(
         params = params
     )
     expect_identical(class(out_str), "character")
@@ -24,8 +24,8 @@ test_that("lgb.params2str() works as expected for a key in params with multiple
     )
 })
 
-test_that("lgb.params2str() passes through duplicated params", {
-    out_str <- lgb.params2str(
+test_that(".lgb_params2str() passes through duplicated params", {
+    out_str <- .lgb_params2str(
         params = list(
             objective = "regression"
             , bagging_fraction = 0.8
@@ -35,8 +35,8 @@ test_that("lgb.params2str() passes through duplicated params", {
     expect_equal(out_str, "objective=regression bagging_fraction=0.8 bagging_fraction=0.5")
 })
 
-test_that("lgb.check.eval works as expected with no metric", {
-    params <- lgb.check.eval(
+test_that(".check_eval works as expected with no metric", {
+    params <- .check_eval(
         params = list(device = "cpu")
         , eval = "binary_error"
     )
@@ -44,8 +44,8 @@ test_that("lgb.check.eval works as expected with no metric", {
     expect_identical(params[["metric"]], list("binary_error"))
 })
 
-test_that("lgb.check.eval adds eval to metric in params", {
-    params <- lgb.check.eval(
+test_that(".check_eval adds eval to metric in params", {
+    params <- .check_eval(
         params = list(metric = "auc")
         , eval = "binary_error"
     )
@@ -53,8 +53,8 @@ test_that("lgb.check.eval adds eval to metric in params", {
     expect_identical(params[["metric"]], list("auc", "binary_error"))
 })
 
-test_that("lgb.check.eval adds eval to metric in params if two evaluation names are provided", {
-    params <- lgb.check.eval(
+test_that(".check_eval adds eval to metric in params if two evaluation names are provided", {
+    params <- .check_eval(
         params = list(metric = "auc")
         , eval = c("binary_error", "binary_logloss")
     )
@@ -62,8 +62,8 @@ test_that("lgb.check.eval adds eval to metric in params if two evaluation names
     expect_identical(params[["metric"]], list("auc", "binary_error", "binary_logloss"))
 })
 
-test_that("lgb.check.eval adds eval to metric in params if a list is provided", {
-    params <- lgb.check.eval(
+test_that(".check_eval adds eval to metric in params if a list is provided", {
+    params <- .check_eval(
         params = list(metric = "auc")
         , eval = list("binary_error", "binary_logloss")
     )
@@ -71,8 +71,8 @@ test_that("lgb.check.eval adds eval to metric in params if a list is provided",
     expect_identical(params[["metric"]], list("auc", "binary_error", "binary_logloss"))
 })
 
-test_that("lgb.check.eval drops duplicate metrics and preserves order", {
-    params <- lgb.check.eval(
+test_that(".check_eval drops duplicate metrics and preserves order", {
+    params <- .check_eval(
         params = list(metric = "l1")
         , eval = list("l2", "rmse", "l1", "rmse")
     )
@@ -80,9 +80,9 @@ test_that("lgb.check.eval drops duplicate metrics and preserves order", {
     expect_identical(params[["metric"]], list("l1", "l2", "rmse"))
 })
 
-test_that("lgb.check.wrapper_param() uses passed-in keyword arg if no alias found in params", {
+test_that(".check_wrapper_param() uses passed-in keyword arg if no alias found in params", {
     kwarg_val <- sample(seq_len(100L), size = 1L)
-    params <- lgb.check.wrapper_param(
+    params <- .check_wrapper_param(
         main_param_name = "num_iterations"
         , params = list()
         , alternative_kwarg_value = kwarg_val
@@ -90,10 +90,10 @@ test_that("lgb.check.wrapper_param() uses passed-in keyword arg if no alias foun
     expect_equal(params[["num_iterations"]], kwarg_val)
 })
 
-test_that("lgb.check.wrapper_param() prefers main parameter to alias and keyword arg", {
+test_that(".check_wrapper_param() prefers main parameter to alias and keyword arg", {
     num_iterations <- sample(seq_len(100L), size = 1L)
     kwarg_val <- sample(seq_len(100L), size = 1L)
-    params <- lgb.check.wrapper_param(
+    params <- .check_wrapper_param(
         main_param_name = "num_iterations"
         , params = list(
             num_iterations = num_iterations
@@ -108,11 +108,11 @@ test_that("lgb.check.wrapper_param() prefers main parameter to alias and keyword
     expect_identical(params, list(num_iterations = num_iterations))
 })
 
-test_that("lgb.check.wrapper_param() prefers alias to keyword arg", {
+test_that(".check_wrapper_param() prefers alias to keyword arg", {
     n_estimators <- sample(seq_len(100L), size = 1L)
     num_tree <- sample(seq_len(100L), size = 1L)
     kwarg_val <- sample(seq_len(100L), size = 1L)
-    params <- lgb.check.wrapper_param(
+    params <- .check_wrapper_param(
         main_param_name = "num_iterations"
         , params = list(
             num_tree = num_tree
@@ -124,7 +124,7 @@ test_that("lgb.check.wrapper_param() prefers alias to keyword arg", {
     expect_identical(params, list(num_iterations = num_tree))
 
     # switching the order shouldn't switch which one is chosen
-    params2 <- lgb.check.wrapper_param(
+    params2 <- .check_wrapper_param(
         main_param_name = "num_iterations"
         , params = list(
             n_estimators = n_estimators
@@ -136,14 +136,14 @@ test_that("lgb.check.wrapper_param() prefers alias to keyword arg", {
     expect_identical(params2, list(num_iterations = num_tree))
 })
 
-test_that("lgb.equal.or.both.null produces expected results", {
-    expect_true(lgb.equal.or.both.null(NULL, NULL))
-    expect_false(lgb.equal.or.both.null(1.0, NULL))
-    expect_false(lgb.equal.or.both.null(NULL, 1.0))
-    expect_true(lgb.equal.or.both.null(1.0, 1.0))
-    expect_true(lgb.equal.or.both.null(1.0, 1L))
-    expect_false(lgb.equal.or.both.null(NA, NULL))
-    expect_false(lgb.equal.or.both.null(NULL, NA))
-    expect_false(lgb.equal.or.both.null(10.0, 1L))
-    expect_true(lgb.equal.or.both.null(0L, 0L))
+test_that(".equal_or_both_null produces expected results", {
+    expect_true(.equal_or_both_null(NULL, NULL))
+    expect_false(.equal_or_both_null(1.0, NULL))
+    expect_false(.equal_or_both_null(NULL, 1.0))
+    expect_true(.equal_or_both_null(1.0, 1.0))
+    expect_true(.equal_or_both_null(1.0, 1L))
+    expect_false(.equal_or_both_null(NA, NULL))
+    expect_false(.equal_or_both_null(NULL, NA))
+    expect_false(.equal_or_both_null(10.0, 1L))
+    expect_true(.equal_or_both_null(0L, 0L))
 })

From eb0ae49449959f18895a5ccf69fb34900ca1c50c Mon Sep 17 00:00:00 2001
From: James Lamb <jaylamb20@gmail.com>
Date: Wed, 8 Nov 2023 22:54:27 -0600
Subject: [PATCH 2/3] replace .lgb_params2str() with .params2str()

---
 R-package/R/lgb.Booster.R               |  6 +++---
 R-package/R/lgb.Dataset.R               |  6 +++---
 R-package/R/lgb.Predictor.R             |  2 +-
 R-package/R/utils.R                     |  2 +-
 R-package/tests/testthat/test_dataset.R |  2 +-
 R-package/tests/testthat/test_utils.R   | 12 ++++++------
 6 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/R-package/R/lgb.Booster.R b/R-package/R/lgb.Booster.R
index e5b4c8001d40..99231cec7f73 100644
--- a/R-package/R/lgb.Booster.R
+++ b/R-package/R/lgb.Booster.R
@@ -36,7 +36,7 @@ Booster <- R6::R6Class(
         }
         train_set_handle <- train_set$.__enclos_env__$private$get_handle()
         params <- utils::modifyList(params, train_set$get_params())
-        params_str <- .lgb_params2str(params = params)
+        params_str <- .params2str(params = params)
         # Store booster handle
         handle <- .Call(
           LGBM_BoosterCreate_R
@@ -167,7 +167,7 @@ Booster <- R6::R6Class(
         params <- utils::modifyList(self$params, params)
       }
 
-      params_str <- .lgb_params2str(params = params)
+      params_str <- .params2str(params = params)
 
       self$restore_handle()
 
@@ -586,7 +586,7 @@ Booster <- R6::R6Class(
         , predcontrib
         , start_iteration
         , num_iteration
-        , .lgb_params2str(params = params)
+        , .params2str(params = params)
       )
 
       private$fast_predict_config <- list(
diff --git a/R-package/R/lgb.Dataset.R b/R-package/R/lgb.Dataset.R
index 2f5a488ac21c..ddc338d2cae3 100644
--- a/R-package/R/lgb.Dataset.R
+++ b/R-package/R/lgb.Dataset.R
@@ -191,7 +191,7 @@ Dataset <- R6::R6Class(
       }
 
       # Generate parameter str
-      params_str <- .lgb_params2str(params = private$params)
+      params_str <- .params2str(params = private$params)
 
       # Get handle of reference dataset
       ref_handle <- NULL
@@ -589,8 +589,8 @@ Dataset <- R6::R6Class(
         tryCatch({
           .Call(
             LGBM_DatasetUpdateParamChecking_R
-            , .lgb_params2str(params = private$params)
-            , .lgb_params2str(params = new_params)
+            , .params2str(params = private$params)
+            , .params2str(params = new_params)
           )
           private$params <- new_params
         }, error = function(e) {
diff --git a/R-package/R/lgb.Predictor.R b/R-package/R/lgb.Predictor.R
index 17e0cb8f746d..3a411efd75ba 100644
--- a/R-package/R/lgb.Predictor.R
+++ b/R-package/R/lgb.Predictor.R
@@ -28,7 +28,7 @@ Predictor <- R6::R6Class(
 
     # Initialize will create a starter model
     initialize = function(modelfile, params = list(), fast_predict_config = list()) {
-      private$params <- .lgb_params2str(params = params)
+      private$params <- .params2str(params = params)
       handle <- NULL
 
       if (is.character(modelfile)) {
diff --git a/R-package/R/utils.R b/R-package/R/utils.R
index 6fc59cfcbc4a..1ac6f197ca77 100644
--- a/R-package/R/utils.R
+++ b/R-package/R/utils.R
@@ -19,7 +19,7 @@
   )
 }
 
-.lgb_params2str <- function(params) {
+.params2str <- function(params) {
 
   if (!identical(class(params), "list")) {
     stop("params must be a list")
diff --git a/R-package/tests/testthat/test_dataset.R b/R-package/tests/testthat/test_dataset.R
index 587d5154a581..a8585baa2621 100644
--- a/R-package/tests/testthat/test_dataset.R
+++ b/R-package/tests/testthat/test_dataset.R
@@ -206,7 +206,7 @@ test_that("lgb.Dataset: Dataset should be able to construct from matrix and retu
     , rawData
     , nrow(rawData)
     , ncol(rawData)
-    , lightgbm:::.lgb_params2str(params = list())
+    , lightgbm:::.params2str(params = list())
     , ref_handle
   )
   expect_true(methods::is(handle, "externalptr"))
diff --git a/R-package/tests/testthat/test_utils.R b/R-package/tests/testthat/test_utils.R
index 1761f42c426c..898aed9b0915 100644
--- a/R-package/tests/testthat/test_utils.R
+++ b/R-package/tests/testthat/test_utils.R
@@ -1,12 +1,12 @@
-test_that(".lgb_params2str() works as expected for empty lists", {
-    out_str <- .lgb_params2str(
+test_that(".params2str() works as expected for empty lists", {
+    out_str <- .params2str(
         params = list()
     )
     expect_identical(class(out_str), "character")
     expect_equal(out_str, "")
 })
 
-test_that(".lgb_params2str() works as expected for a key in params with multiple different-length elements", {
+test_that(".params2str() works as expected for a key in params with multiple different-length elements", {
     metrics <- c("a", "ab", "abc", "abcdefg")
     params <- list(
         objective = "magic"
@@ -14,7 +14,7 @@ test_that(".lgb_params2str() works as expected for a key in params with multiple
         , nrounds = 10L
         , learning_rate = 0.0000001
     )
-    out_str <- .lgb_params2str(
+    out_str <- .params2str(
         params = params
     )
     expect_identical(class(out_str), "character")
@@ -24,8 +24,8 @@ test_that(".lgb_params2str() works as expected for a key in params with multiple
     )
 })
 
-test_that(".lgb_params2str() passes through duplicated params", {
-    out_str <- .lgb_params2str(
+test_that(".params2str() passes through duplicated params", {
+    out_str <- .params2str(
         params = list(
             objective = "regression"
             , bagging_fraction = 0.8

From 078a5fa88294876ec8f7f4cefd1a37a1c4b9bdf3 Mon Sep 17 00:00:00 2001
From: James Lamb <jaylamb20@gmail.com>
Date: Thu, 9 Nov 2023 11:01:41 -0600
Subject: [PATCH 3/3] Apply suggestions from code review
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Co-authored-by: José Morales <jmoralz92@gmail.com>
---
 R-package/R/lgb.cv.R | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/R-package/R/lgb.cv.R b/R-package/R/lgb.cv.R
index 70901a3ff2d2..11768c5bfa0b 100644
--- a/R-package/R/lgb.cv.R
+++ b/R-package/R/lgb.cv.R
@@ -387,7 +387,7 @@ lgb.cv <- function(params = list()
     })
 
     # Prepare collection of evaluation results
-    merged_msg <- .lgb_merge_cv_result(
+    merged_msg <- .merge_cv_result(
       msg = msg
       , showsd = showsd
     )
@@ -476,7 +476,7 @@ lgb.cv <- function(params = list()
 
       y <- label[rnd_idx]
       y <- as.factor(y)
-      folds <- .lgb_stratified_folds(y = y, k = nfold)
+      folds <- .stratified_folds(y = y, k = nfold)
 
     } else {
 
@@ -528,7 +528,7 @@ lgb.cv <- function(params = list()
 # It was borrowed from caret::createFolds and simplified
 # by always returning an unnamed list of fold indices.
 #' @importFrom stats quantile
-.lgb_stratified_folds <- function(y, k) {
+.stratified_folds <- function(y, k) {
 
   # Group the numeric data based on their magnitudes
   # and sample within those groups.
@@ -594,7 +594,7 @@ lgb.cv <- function(params = list()
   return(out)
 }
 
-.lgb_merge_cv_result <- function(msg, showsd) {
+.merge_cv_result <- function(msg, showsd) {
 
   if (length(msg) == 0L) {
     stop("lgb.cv: size of cv result error")