Skip to content

Commit

Permalink
Merge branch 'master' into macos-version
Browse files Browse the repository at this point in the history
  • Loading branch information
jameslamb authored Dec 11, 2024
2 parents 9a83af6 + 186c7cd commit 43db19b
Show file tree
Hide file tree
Showing 77 changed files with 663 additions and 532 deletions.
2 changes: 1 addition & 1 deletion .ci/conda-envs/ci-core.txt
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

# direct imports
cffi>=1.16
dask>=2023.5.0
dask>=2023.5.0,<2024.12
joblib>=1.3.2
matplotlib-base>=3.7.3
numpy>=1.24.4
Expand Down
79 changes: 0 additions & 79 deletions .ci/install-old-r-packages.R

This file was deleted.

2 changes: 1 addition & 1 deletion .ci/test-r-package-windows.ps1
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ Write-Output "Done installing Rtools"
Write-Output "Installing CMake"
Add-Type -AssemblyName System.IO.Compression.FileSystem
[System.IO.Compression.ZipFile]::ExtractToDirectory("$env:CMAKE_PATH/cmake.zip", "$env:CMAKE_PATH") ; Assert-Output $?
# Remove old CMake shiped with RTools
# Remove old CMake shipped with RTools
Remove-Item "$env:RTOOLS_MINGW_BIN/cmake.exe" -Force -ErrorAction Ignore
Write-Output "Done installing CMake"

Expand Down
21 changes: 5 additions & 16 deletions .ci/test-r-package.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,7 @@ fi

# Get details needed for installing R components
R_MAJOR_VERSION="${R_VERSION%.*}"
if [[ "${R_MAJOR_VERSION}" == "3" ]]; then
export R_MAC_VERSION=3.6.3
export R_MAC_PKG_URL=${CRAN_MIRROR}/bin/macosx/R-${R_MAC_VERSION}.nn.pkg
export R_LINUX_VERSION="3.6.3-1bionic"
export R_APT_REPO="bionic-cran35/"
elif [[ "${R_MAJOR_VERSION}" == "4" ]]; then
if [[ "${R_MAJOR_VERSION}" == "4" ]]; then
export R_MAC_VERSION=4.3.1
export R_MAC_PKG_URL=${CRAN_MIRROR}/bin/macosx/big-sur-${ARCH}/base/R-${R_MAC_VERSION}-${ARCH}.pkg
export R_LINUX_VERSION="4.3.1-1.2204.0"
Expand Down Expand Up @@ -108,16 +103,10 @@ if [[ $OS_NAME == "macos" ]]; then
export R_TIDYCMD=/usr/local/bin/tidy
fi

# fix for issue where CRAN was not returning {evaluate}, {lattice}, or {waldo} when using R 3.6
# "Warning: dependency ‘lattice’ is not available"
if [[ "${R_MAJOR_VERSION}" == "3" ]]; then
Rscript --vanilla ./.ci/install-old-r-packages.R
else
# {Matrix} needs {lattice}, so this needs to run before manually installing {Matrix}.
# This should be unnecessary on R >=4.4.0
# ref: https://github.com/microsoft/LightGBM/issues/6433
Rscript --vanilla -e "install.packages('lattice', repos = '${CRAN_MIRROR}', lib = '${R_LIB_PATH}')"
fi
# {Matrix} needs {lattice}, so this needs to run before manually installing {Matrix}.
# This should be unnecessary on R >=4.4.0
# ref: https://github.com/microsoft/LightGBM/issues/6433
Rscript --vanilla -e "install.packages('lattice', repos = '${CRAN_MIRROR}', lib = '${R_LIB_PATH}')"

# manually install {Matrix}, as {Matrix}=1.7-0 raised its R floor all the way to R 4.4.0
# ref: https://github.com/microsoft/LightGBM/issues/6433
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/lock.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ jobs:
This pull request has been automatically locked since there has not been any recent activity since it was closed.
To start a new related discussion, open a new issue at https://github.com/microsoft/LightGBM/issues
including a reference to this.
# what shoulld the locking status be?
# what should the locking status be?
issue-lock-reason: 'resolved'
pr-lock-reason: 'resolved'
process-only: 'issues, prs'
19 changes: 1 addition & 18 deletions .github/workflows/r_package.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,6 @@ concurrency:
cancel-in-progress: true

env:
# https://github.com/actions/checkout/issues/1590#issuecomment-2207052044
#
# this could be removed (hopefully) when R 3.6 support is removed
ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true
# in CMake-driven builds, parallelize compilation
CMAKE_BUILD_PARALLEL_LEVEL: 4
# on Debian-based images, avoid interactive prompts
Expand Down Expand Up @@ -48,12 +44,6 @@ jobs:
################
# CMake builds #
################
- os: ubuntu-latest
task: r-package
compiler: gcc
r_version: 3.6
build_type: cmake
container: 'ubuntu:18.04'
- os: ubuntu-latest
task: r-package
compiler: gcc
Expand Down Expand Up @@ -174,19 +164,12 @@ jobs:
run: |
git config --global --add safe.directory "${GITHUB_WORKSPACE}"
- name: Checkout repository
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 5
submodules: true
- name: Install pandoc
uses: r-lib/actions/setup-pandoc@v2
if: matrix.container != 'ubuntu:18.04'
# R 3.6 binary isn't easily available on Ubuntu 18.04,
# but setup-pandoc>=2.7.1 is uses a too-new glibc for it.
# ref: https://github.com/microsoft/LightGBM/issues/6298
- name: Install pandoc
uses: r-lib/actions/[email protected]
if: matrix.container == 'ubuntu:18.04'
- name: Install tinytex
if: startsWith(matrix.os, 'windows')
uses: r-lib/actions/setup-tinytex@v2
Expand Down
8 changes: 7 additions & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -38,4 +38,10 @@ repos:
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.10.0.1
hooks:
- id: shellcheck
- id: shellcheck
- repo: https://github.com/crate-ci/typos
rev: v1.23.2
hooks:
- id: typos
args: ["--force-exclude"]
exclude: (\.gitignore$)|(^\.editorconfig$)
21 changes: 21 additions & 0 deletions .typos.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
default.extend-ignore-re = [
"/Ot",
"mis-alignment",
"mis-spelled",
"posix-seh-rt",
]

[default.extend-words]
MAPE = "MAPE"
datas = "datas"
interprete = "interprete"
mape = "mape"
splitted = "splitted"

[default.extend-identifiers]
ERRORs = "ERRORs"
GAM = "GAM"
ND24s = "ND24s"
WARNINGs = "WARNINGs"
fullset = "fullset"
thess = "thess"
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ option(USE_SWIG "Enable SWIG to generate Java API" OFF)
option(USE_TIMETAG "Set to ON to output time costs" OFF)
option(USE_CUDA "Enable CUDA-accelerated training " OFF)
option(USE_DEBUG "Set to ON for Debug mode" OFF)
option(USE_SANITIZER "Use santizer flags" OFF)
option(USE_SANITIZER "Use sanitizer flags" OFF)
set(
ENABLED_SANITIZERS
"address" "leak" "undefined"
Expand Down
4 changes: 2 additions & 2 deletions R-package/R/lgb.Booster.R
Original file line number Diff line number Diff line change
Expand Up @@ -1114,7 +1114,7 @@ predict.lgb.Booster <- function(object,
#'
#' Requesting a different prediction type or passing parameters to \link{predict.lgb.Booster}
#' will cause it to ignore the fast-predict configuration and take the slow route instead
#' (but be aware that an existing configuration might not always be overriden by supplying
#' (but be aware that an existing configuration might not always be overridden by supplying
#' different parameters or prediction type, so make sure to check that the output is what
#' was expected when a prediction is to be made on a single row for something different than
#' what is configured).
Expand All @@ -1128,7 +1128,7 @@ predict.lgb.Booster <- function(object,
#' and as such, this function will produce an error if passing \code{csr=TRUE} and
#' \code{type = "contrib"} together.
#' @inheritParams lgb_predict_shared_params
#' @param model LighGBM model object (class \code{lgb.Booster}).
#' @param model LightGBM model object (class \code{lgb.Booster}).
#'
#' \bold{The object will be modified in-place}.
#' @param csr Whether the prediction function is going to be called on sparse CSR inputs.
Expand Down
2 changes: 1 addition & 1 deletion R-package/R/lgb.importance.R
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
#' \item{\code{Feature}: Feature names in the model.}
#' \item{\code{Gain}: The total gain of this feature's splits.}
#' \item{\code{Cover}: The number of observation related to this feature.}
#' \item{\code{Frequency}: The number of times a feature splited in trees.}
#' \item{\code{Frequency}: The number of times a feature split in trees.}
#' }
#'
#' @examples
Expand Down
2 changes: 1 addition & 1 deletion R-package/R/lgb.model.dt.tree.R
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
#' \emph{New in version 4.4.0}
#'
#' @return
#' A \code{data.table} with detailed information about model trees' nodes and leafs.
#' A \code{data.table} with detailed information about model trees' nodes and leaves.
#'
#' The columns of the \code{data.table} are:
#'
Expand Down
2 changes: 1 addition & 1 deletion R-package/R/lightgbm.R
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ NULL
#' system, but be aware that getting the number of cores detected correctly requires package
#' \code{RhpcBLASctl} to be installed.
#'
#' This parameter gets overriden by \code{num_threads} and its aliases under \code{params}
#' This parameter gets overridden by \code{num_threads} and its aliases under \code{params}
#' if passed there.
#'
#' \emph{New in version 4.0.0}
Expand Down
2 changes: 1 addition & 1 deletion R-package/demo/cross_validation.R
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ logregobj <- function(preds, dtrain) {

# User-defined evaluation function returns a pair (metric_name, result, higher_better)
# NOTE: when you do customized loss function, the default prediction value is margin
# This may make built-in evalution metric calculate wrong results
# This may make built-in evaluation metric calculate wrong results
# For example, we are doing logistic loss, the prediction is score before logistic transformation
# Keep this in mind when you use the customization, and maybe you need write customized evaluation function
evalerror <- function(preds, dtrain) {
Expand Down
2 changes: 1 addition & 1 deletion R-package/demo/early_stopping.R
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ logregobj <- function(preds, dtrain) {

# User-defined evaluation function returns a pair (metric_name, result, higher_better)
# NOTE: when you do customized loss function, the default prediction value is margin
# This may make built-in evalution metric calculate wrong results
# This may make built-in evaluation metric calculate wrong results
# For example, we are doing logistic loss, the prediction is score before logistic transformation
# The built-in evaluation error assumes input is after logistic transformation
# Keep this in mind when you use the customization, and maybe you need write customized evaluation function
Expand Down
4 changes: 2 additions & 2 deletions R-package/man/lgb.configure_fast_predict.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion R-package/man/lgb.importance.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion R-package/man/lgb.model.dt.tree.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion R-package/man/lightgbm.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions R-package/tests/testthat/test_basic.R
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ set.seed(708L)
# to an accumulator then returns the current value.
# This is used to mock the situation where an evaluation
# metric increases every iteration
ACCUMULATOR_NAME <- "INCREASING_METRIC_ACUMULATOR"
ACCUMULATOR_NAME <- "INCREASING_METRIC_ACCUMULATOR"
assign(x = ACCUMULATOR_NAME, value = 0.0, envir = .GlobalEnv)

.increasing_metric <- function(preds, dtrain) {
Expand Down Expand Up @@ -1777,7 +1777,7 @@ test_that("lgb.train() works with early stopping for regression with a metric th
, early_stopping_rounds + 1L
)

# Booster should understand thatt all three of these metrics should be minimized
# Booster should understand that all three of these metrics should be minimized
eval_info <- bst$.__enclos_env__$private$get_eval_info()
expect_identical(eval_info, c("mape", "rmse", "l1"))
expect_identical(
Expand Down
2 changes: 1 addition & 1 deletion R-package/tests/testthat/test_custom_objective.R
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ logregobj <- function(preds, dtrain) {

# User-defined evaluation function returns a pair (metric_name, result, higher_better)
# NOTE: when you do customized loss function, the default prediction value is margin
# This may make built-in evalution metric calculate wrong results
# This may make built-in evaluation metric calculate wrong results
# Keep this in mind when you use the customization, and maybe you need write customized evaluation function
evalerror <- function(preds, dtrain) {
labels <- get_field(dtrain, "label")
Expand Down
2 changes: 1 addition & 1 deletion R-package/tests/testthat/test_lgb.interprete.R
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
log(x / (1.0 - x))
}

test_that("lgb.intereprete works as expected for binary classification", {
test_that("lgb.interprete works as expected for binary classification", {
data(agaricus.train, package = "lightgbm")
train <- agaricus.train
dtrain <- lgb.Dataset(train$data, label = train$label)
Expand Down
4 changes: 2 additions & 2 deletions R-package/tests/testthat/test_lgb.plot.interpretation.R
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
log(x / (1.0 - x))
}

test_that("lgb.plot.interepretation works as expected for binary classification", {
test_that("lgb.plot.interpretation works as expected for binary classification", {
data(agaricus.train, package = "lightgbm")
train <- agaricus.train
dtrain <- lgb.Dataset(train$data, label = train$label)
Expand Down Expand Up @@ -57,7 +57,7 @@ test_that("lgb.plot.interepretation works as expected for binary classification"
expect_null(plot_res)
})

test_that("lgb.plot.interepretation works as expected for multiclass classification", {
test_that("lgb.plot.interpretation works as expected for multiclass classification", {
data(iris)

# We must convert factors to numeric
Expand Down
Loading

0 comments on commit 43db19b

Please sign in to comment.