diff --git a/.ci/test-r-package-windows.ps1 b/.ci/test-r-package-windows.ps1
index 1ce698a49c72..a3f524b60be7 100644
--- a/.ci/test-r-package-windows.ps1
+++ b/.ci/test-r-package-windows.ps1
@@ -171,7 +171,7 @@ Write-Output "Done installing Rtools"
Write-Output "Installing CMake"
Add-Type -AssemblyName System.IO.Compression.FileSystem
[System.IO.Compression.ZipFile]::ExtractToDirectory("$env:CMAKE_PATH/cmake.zip", "$env:CMAKE_PATH") ; Assert-Output $?
-# Remove old CMake shiped with RTools
+# Remove old CMake shipped with RTools
Remove-Item "$env:RTOOLS_MINGW_BIN/cmake.exe" -Force -ErrorAction Ignore
Write-Output "Done installing CMake"
diff --git a/.github/workflows/lock.yml b/.github/workflows/lock.yml
index 4efe658b7f45..195fd5f1c8f1 100644
--- a/.github/workflows/lock.yml
+++ b/.github/workflows/lock.yml
@@ -39,7 +39,7 @@ jobs:
This pull request has been automatically locked since there has not been any recent activity since it was closed.
To start a new related discussion, open a new issue at https://github.com/microsoft/LightGBM/issues
including a reference to this.
- # what shoulld the locking status be?
+ # what should the locking status be?
issue-lock-reason: 'resolved'
pr-lock-reason: 'resolved'
process-only: 'issues, prs'
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 7e5e5dd8e9d9..b334db19b8e7 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -38,4 +38,10 @@ repos:
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.10.0.1
hooks:
- - id: shellcheck
+ - id: shellcheck
+ - repo: https://github.com/crate-ci/typos
+ rev: v1.23.2
+ hooks:
+ - id: typos
+ args: ["--force-exclude"]
+ exclude: (\.gitignore$)|(^\.editorconfig$)
diff --git a/.typos.toml b/.typos.toml
new file mode 100644
index 000000000000..6dc2c2c97529
--- /dev/null
+++ b/.typos.toml
@@ -0,0 +1,21 @@
+default.extend-ignore-re = [
+ "/Ot",
+ "mis-alignment",
+ "mis-spelled",
+ "posix-seh-rt",
+]
+
+[default.extend-words]
+MAPE = "MAPE"
+datas = "datas"
+interprete = "interprete"
+mape = "mape"
+splitted = "splitted"
+
+[default.extend-identifiers]
+ERRORs = "ERRORs"
+GAM = "GAM"
+ND24s = "ND24s"
+WARNINGs = "WARNINGs"
+fullset = "fullset"
+thess = "thess"
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 183ef62bd68e..4f57cf9622e6 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -5,7 +5,7 @@ option(USE_SWIG "Enable SWIG to generate Java API" OFF)
option(USE_TIMETAG "Set to ON to output time costs" OFF)
option(USE_CUDA "Enable CUDA-accelerated training " OFF)
option(USE_DEBUG "Set to ON for Debug mode" OFF)
-option(USE_SANITIZER "Use santizer flags" OFF)
+option(USE_SANITIZER "Use sanitizer flags" OFF)
set(
ENABLED_SANITIZERS
"address" "leak" "undefined"
diff --git a/R-package/R/lgb.Booster.R b/R-package/R/lgb.Booster.R
index a13516ff6569..85a91b1ce058 100644
--- a/R-package/R/lgb.Booster.R
+++ b/R-package/R/lgb.Booster.R
@@ -1114,7 +1114,7 @@ predict.lgb.Booster <- function(object,
#'
#' Requesting a different prediction type or passing parameters to \link{predict.lgb.Booster}
#' will cause it to ignore the fast-predict configuration and take the slow route instead
-#' (but be aware that an existing configuration might not always be overriden by supplying
+#' (but be aware that an existing configuration might not always be overridden by supplying
#' different parameters or prediction type, so make sure to check that the output is what
#' was expected when a prediction is to be made on a single row for something different than
#' what is configured).
@@ -1128,7 +1128,7 @@ predict.lgb.Booster <- function(object,
#' and as such, this function will produce an error if passing \code{csr=TRUE} and
#' \code{type = "contrib"} together.
#' @inheritParams lgb_predict_shared_params
-#' @param model LighGBM model object (class \code{lgb.Booster}).
+#' @param model LightGBM model object (class \code{lgb.Booster}).
#'
#' \bold{The object will be modified in-place}.
#' @param csr Whether the prediction function is going to be called on sparse CSR inputs.
diff --git a/R-package/R/lgb.importance.R b/R-package/R/lgb.importance.R
index 7c76131f4f53..d60507cf00d4 100644
--- a/R-package/R/lgb.importance.R
+++ b/R-package/R/lgb.importance.R
@@ -9,7 +9,7 @@
#' \item{\code{Feature}: Feature names in the model.}
#' \item{\code{Gain}: The total gain of this feature's splits.}
#' \item{\code{Cover}: The number of observation related to this feature.}
-#' \item{\code{Frequency}: The number of times a feature splited in trees.}
+#' \item{\code{Frequency}: The number of times a feature split in trees.}
#' }
#'
#' @examples
diff --git a/R-package/R/lgb.model.dt.tree.R b/R-package/R/lgb.model.dt.tree.R
index db4ef955f866..ac1b2f9aaf14 100644
--- a/R-package/R/lgb.model.dt.tree.R
+++ b/R-package/R/lgb.model.dt.tree.R
@@ -10,7 +10,7 @@
#' \emph{New in version 4.4.0}
#'
#' @return
-#' A \code{data.table} with detailed information about model trees' nodes and leafs.
+#' A \code{data.table} with detailed information about model trees' nodes and leaves.
#'
#' The columns of the \code{data.table} are:
#'
diff --git a/R-package/R/lightgbm.R b/R-package/R/lightgbm.R
index efa593ffe12f..6cb4eebd8baf 100644
--- a/R-package/R/lightgbm.R
+++ b/R-package/R/lightgbm.R
@@ -139,7 +139,7 @@ NULL
#' system, but be aware that getting the number of cores detected correctly requires package
#' \code{RhpcBLASctl} to be installed.
#'
-#' This parameter gets overriden by \code{num_threads} and its aliases under \code{params}
+#' This parameter gets overridden by \code{num_threads} and its aliases under \code{params}
#' if passed there.
#'
#' \emph{New in version 4.0.0}
diff --git a/R-package/demo/cross_validation.R b/R-package/demo/cross_validation.R
index 0324f83f2da9..9f74ef7f4b2a 100644
--- a/R-package/demo/cross_validation.R
+++ b/R-package/demo/cross_validation.R
@@ -51,7 +51,7 @@ logregobj <- function(preds, dtrain) {
# User-defined evaluation function returns a pair (metric_name, result, higher_better)
# NOTE: when you do customized loss function, the default prediction value is margin
-# This may make built-in evalution metric calculate wrong results
+# This may make built-in evaluation metric calculate wrong results
# For example, we are doing logistic loss, the prediction is score before logistic transformation
# Keep this in mind when you use the customization, and maybe you need write customized evaluation function
evalerror <- function(preds, dtrain) {
diff --git a/R-package/demo/early_stopping.R b/R-package/demo/early_stopping.R
index 6ca214c5ac7b..4435dd1b09b6 100644
--- a/R-package/demo/early_stopping.R
+++ b/R-package/demo/early_stopping.R
@@ -29,7 +29,7 @@ logregobj <- function(preds, dtrain) {
# User-defined evaluation function returns a pair (metric_name, result, higher_better)
# NOTE: when you do customized loss function, the default prediction value is margin
-# This may make built-in evalution metric calculate wrong results
+# This may make built-in evaluation metric calculate wrong results
# For example, we are doing logistic loss, the prediction is score before logistic transformation
# The built-in evaluation error assumes input is after logistic transformation
# Keep this in mind when you use the customization, and maybe you need write customized evaluation function
diff --git a/R-package/man/lgb.configure_fast_predict.Rd b/R-package/man/lgb.configure_fast_predict.Rd
index e02600451df5..9cd4339bdced 100644
--- a/R-package/man/lgb.configure_fast_predict.Rd
+++ b/R-package/man/lgb.configure_fast_predict.Rd
@@ -14,7 +14,7 @@ lgb.configure_fast_predict(
)
}
\arguments{
-\item{model}{LighGBM model object (class \code{lgb.Booster}).
+\item{model}{LightGBM model object (class \code{lgb.Booster}).
\bold{The object will be modified in-place}.}
@@ -98,7 +98,7 @@ Calling this function multiple times with different parameters might not overrid
Requesting a different prediction type or passing parameters to \link{predict.lgb.Booster}
will cause it to ignore the fast-predict configuration and take the slow route instead
- (but be aware that an existing configuration might not always be overriden by supplying
+ (but be aware that an existing configuration might not always be overridden by supplying
different parameters or prediction type, so make sure to check that the output is what
was expected when a prediction is to be made on a single row for something different than
what is configured).
diff --git a/R-package/man/lgb.importance.Rd b/R-package/man/lgb.importance.Rd
index 79cb82f5d8ef..5099643112be 100644
--- a/R-package/man/lgb.importance.Rd
+++ b/R-package/man/lgb.importance.Rd
@@ -17,7 +17,7 @@ For a tree model, a \code{data.table} with the following columns:
\item{\code{Feature}: Feature names in the model.}
\item{\code{Gain}: The total gain of this feature's splits.}
\item{\code{Cover}: The number of observation related to this feature.}
- \item{\code{Frequency}: The number of times a feature splited in trees.}
+ \item{\code{Frequency}: The number of times a feature split in trees.}
}
}
\description{
diff --git a/R-package/man/lgb.model.dt.tree.Rd b/R-package/man/lgb.model.dt.tree.Rd
index ecfee17332f5..df36b6a94f42 100644
--- a/R-package/man/lgb.model.dt.tree.Rd
+++ b/R-package/man/lgb.model.dt.tree.Rd
@@ -18,7 +18,7 @@ lgb.model.dt.tree(model, num_iteration = NULL, start_iteration = 1L)
\emph{New in version 4.4.0}}
}
\value{
-A \code{data.table} with detailed information about model trees' nodes and leafs.
+A \code{data.table} with detailed information about model trees' nodes and leaves.
The columns of the \code{data.table} are:
diff --git a/R-package/man/lightgbm.Rd b/R-package/man/lightgbm.Rd
index 90cb3166bf5c..376a6d03a6b1 100644
--- a/R-package/man/lightgbm.Rd
+++ b/R-package/man/lightgbm.Rd
@@ -93,7 +93,7 @@ set to the iteration number of the best iteration.}
system, but be aware that getting the number of cores detected correctly requires package
\code{RhpcBLASctl} to be installed.
- This parameter gets overriden by \code{num_threads} and its aliases under \code{params}
+ This parameter gets overridden by \code{num_threads} and its aliases under \code{params}
if passed there.
\emph{New in version 4.0.0}}
diff --git a/R-package/tests/testthat/test_basic.R b/R-package/tests/testthat/test_basic.R
index c734816b4038..7310815c4a6d 100644
--- a/R-package/tests/testthat/test_basic.R
+++ b/R-package/tests/testthat/test_basic.R
@@ -9,7 +9,7 @@ set.seed(708L)
# to an accumulator then returns the current value.
# This is used to mock the situation where an evaluation
# metric increases every iteration
-ACCUMULATOR_NAME <- "INCREASING_METRIC_ACUMULATOR"
+ACCUMULATOR_NAME <- "INCREASING_METRIC_ACCUMULATOR"
assign(x = ACCUMULATOR_NAME, value = 0.0, envir = .GlobalEnv)
.increasing_metric <- function(preds, dtrain) {
@@ -1777,7 +1777,7 @@ test_that("lgb.train() works with early stopping for regression with a metric th
, early_stopping_rounds + 1L
)
- # Booster should understand thatt all three of these metrics should be minimized
+ # Booster should understand that all three of these metrics should be minimized
eval_info <- bst$.__enclos_env__$private$get_eval_info()
expect_identical(eval_info, c("mape", "rmse", "l1"))
expect_identical(
diff --git a/R-package/tests/testthat/test_custom_objective.R b/R-package/tests/testthat/test_custom_objective.R
index 2c10b9d571dc..a1baf0067c4a 100644
--- a/R-package/tests/testthat/test_custom_objective.R
+++ b/R-package/tests/testthat/test_custom_objective.R
@@ -14,7 +14,7 @@ logregobj <- function(preds, dtrain) {
# User-defined evaluation function returns a pair (metric_name, result, higher_better)
# NOTE: when you do customized loss function, the default prediction value is margin
-# This may make built-in evalution metric calculate wrong results
+# This may make built-in evaluation metric calculate wrong results
# Keep this in mind when you use the customization, and maybe you need write customized evaluation function
evalerror <- function(preds, dtrain) {
labels <- get_field(dtrain, "label")
diff --git a/R-package/tests/testthat/test_lgb.interprete.R b/R-package/tests/testthat/test_lgb.interprete.R
index 322a80a55bc5..cfcd1c942f31 100644
--- a/R-package/tests/testthat/test_lgb.interprete.R
+++ b/R-package/tests/testthat/test_lgb.interprete.R
@@ -5,7 +5,7 @@
log(x / (1.0 - x))
}
-test_that("lgb.intereprete works as expected for binary classification", {
+test_that("lgb.interprete works as expected for binary classification", {
data(agaricus.train, package = "lightgbm")
train <- agaricus.train
dtrain <- lgb.Dataset(train$data, label = train$label)
diff --git a/R-package/tests/testthat/test_lgb.plot.interpretation.R b/R-package/tests/testthat/test_lgb.plot.interpretation.R
index 6cba9927942a..e8a021fc7237 100644
--- a/R-package/tests/testthat/test_lgb.plot.interpretation.R
+++ b/R-package/tests/testthat/test_lgb.plot.interpretation.R
@@ -5,7 +5,7 @@
log(x / (1.0 - x))
}
-test_that("lgb.plot.interepretation works as expected for binary classification", {
+test_that("lgb.plot.interpretation works as expected for binary classification", {
data(agaricus.train, package = "lightgbm")
train <- agaricus.train
dtrain <- lgb.Dataset(train$data, label = train$label)
@@ -57,7 +57,7 @@ test_that("lgb.plot.interepretation works as expected for binary classification"
expect_null(plot_res)
})
-test_that("lgb.plot.interepretation works as expected for multiclass classification", {
+test_that("lgb.plot.interpretation works as expected for multiclass classification", {
data(iris)
# We must convert factors to numeric
diff --git a/cmake/Sanitizer.cmake b/cmake/Sanitizer.cmake
index a3768effac0d..f99048476d8b 100644
--- a/cmake/Sanitizer.cmake
+++ b/cmake/Sanitizer.cmake
@@ -18,7 +18,7 @@ macro(enable_sanitizer sanitizer)
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=undefined -fno-sanitize-recover=undefined")
else()
- message(FATAL_ERROR "Santizer ${sanitizer} not supported.")
+ message(FATAL_ERROR "Sanitizer ${sanitizer} not supported.")
endif()
endmacro()
diff --git a/docker/README.md b/docker/README.md
index dfedc2f4e3f1..e68346545ccf 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -55,7 +55,7 @@ After this runs, a LightGBM model can be found at `LightGBM-CLI-model.txt`.
For more details on how to configure and use the LightGBM CLI, see https://lightgbm.readthedocs.io/en/latest/Quick-Start.html.
-## Running the Python-package Сontainer
+## Running the Python-package Container
Build an image with the LightGBM Python-package installed.
@@ -114,7 +114,7 @@ docker run \
python
```
-## Running the R-package Сontainer
+## Running the R-package Container
Build an image with the LightGBM R-package installed.
diff --git a/docs/Installation-Guide.rst b/docs/Installation-Guide.rst
index 41b84f9b82c2..1e28d037388d 100644
--- a/docs/Installation-Guide.rst
+++ b/docs/Installation-Guide.rst
@@ -1,17 +1,30 @@
Installation Guide
==================
-This is a guide for building the LightGBM Command Line Interface (CLI). If you want to build the Python-package or R-package please refer to `Python-package`_ and `R-package`_ folders respectively.
-
All instructions below are aimed at compiling the 64-bit version of LightGBM.
It is worth compiling the 32-bit version only in very rare special cases involving environmental limitations.
The 32-bit version is slow and untested, so use it at your own risk and don't forget to adjust some of the commands below when installing.
+By default, instructions below will use **VS Build Tools** or **make** tool to compile the code.
+It it possible to use `Ninja`_ tool instead of make on all platforms, but VS Build Tools cannot be replaced with Ninja.
+You can add ``-G Ninja`` to CMake flags to use Ninja.
+
+By default, instructions below will produce a shared library file and an executable file with command-line interface.
+You can add ``-DBUILD_CLI=OFF`` to CMake flags to disable the executable compilation.
+
If you need to build a static library instead of a shared one, you can add ``-DBUILD_STATIC_LIB=ON`` to CMake flags.
+By default, instructions below will place header files into system-wide folder.
+You can add ``-DINSTALL_HEADERS=OFF`` to CMake flags to disable headers installation.
+
+By default, on macOS, CMake is looking into Homebrew standard folders for finding dependencies (e.g. OpenMP).
+You can add ``-DUSE_HOMEBREW_FALLBACK=OFF`` to CMake flags to disable this behaviour.
+
Users who want to perform benchmarking can make LightGBM output time costs for different internal routines by adding ``-DUSE_TIMETAG=ON`` to CMake flags.
-It is possible to build LightGBM in debug mode. In this mode all compiler optimizations are disabled and LightGBM performs more checks internally. To enable debug mode you can add ``-DUSE_DEBUG=ON`` to CMake flags or choose ``Debug_*`` configuration (e.g. ``Debug_DLL``, ``Debug_mpi``) in Visual Studio depending on how you are building LightGBM.
+It is possible to build LightGBM in debug mode.
+In this mode all compiler optimizations are disabled and LightGBM performs more checks internally.
+To enable debug mode you can add ``-DUSE_DEBUG=ON`` to CMake flags or choose ``Debug_*`` configuration (e.g. ``Debug_DLL``, ``Debug_mpi``) in Visual Studio depending on how you are building LightGBM.
.. _sanitizers:
@@ -30,7 +43,7 @@ It is very useful to build `C++ unit tests <#build-c-unit-tests>`__ with sanitiz
.. _nightly-builds:
-You can also download the artifacts of the latest successful build on master branch (nightly builds) here: |download artifacts|.
+You can download the artifacts of the latest successful build on master branch (nightly builds) here: |download artifacts|.
.. contents:: **Contents**
:depth: 1
@@ -40,12 +53,10 @@ You can also download the artifacts of the latest successful build on master bra
Windows
~~~~~~~
-On Windows LightGBM can be built using
+On Windows, LightGBM can be built using
- **Visual Studio**;
-
- **CMake** and **VS Build Tools**;
-
- **CMake** and **MinGW**.
Visual Studio (or VS Build Tools)
@@ -54,22 +65,23 @@ Visual Studio (or VS Build Tools)
With GUI
********
-1. Install `Visual Studio`_ (2015 or newer).
+1. Install `Visual Studio`_.
2. Navigate to one of the releases at https://github.com/microsoft/LightGBM/releases, download ``LightGBM-complete_source_code_zip.zip``, and unzip it.
-3. Go to ``LightGBM-master/windows`` folder.
+3. Go to ``LightGBM-complete_source_code_zip/windows`` folder.
-4. Open ``LightGBM.sln`` file with **Visual Studio**, choose ``Release`` configuration and click ``BUILD`` -> ``Build Solution (Ctrl+Shift+B)``.
+4. Open ``LightGBM.sln`` file with **Visual Studio**, choose ``Release`` configuration if you need executable file or ``DLL`` configuration if you need shared library and click ``Build`` -> ``Build Solution (Ctrl+Shift+B)``.
- If you have errors about **Platform Toolset**, go to ``PROJECT`` -> ``Properties`` -> ``Configuration Properties`` -> ``General`` and select the toolset installed on your machine.
+ If you have errors about **Platform Toolset**, go to ``Project`` -> ``Properties`` -> ``Configuration Properties`` -> ``General`` and select the toolset installed on your machine.
-The ``.exe`` file will be in ``LightGBM-master/windows/x64/Release`` folder.
+The ``.exe`` file will be in ``LightGBM-complete_source_code_zip/windows/x64/Release`` folder.
+The ``.dll`` file will be in ``LightGBM-complete_source_code_zip/windows/x64/DLL`` folder.
From Command Line
*****************
-1. Install `Git for Windows`_, `CMake`_ and `VS Build Tools`_ (**VS Build Tools** is not needed if **Visual Studio** (2015 or newer) is already installed).
+1. Install `Git for Windows`_, `CMake`_ and `VS Build Tools`_ (**VS Build Tools** is not needed if **Visual Studio** is already installed).
2. Run the following commands:
@@ -98,7 +110,7 @@ MinGW-w64
The ``.exe`` and ``.dll`` files will be in ``LightGBM/`` folder.
-**Note**: You may need to run the ``cmake -B build -S . -G "MinGW Makefiles"`` one more time if you encounter the ``sh.exe was found in your PATH`` error.
+**Note**: You may need to run the ``cmake -B build -S . -G "MinGW Makefiles"`` one more time or add ``-DCMAKE_SH=CMAKE_SH-NOTFOUND`` to CMake flags if you encounter the ``sh.exe was found in your PATH`` error.
It is recommended that you use **Visual Studio** since it has better multithreading efficiency in **Windows** for many-core systems
(see `Question 4 <./FAQ.rst#i-am-using-windows-should-i-use-visual-studio-or-mingw-for-compiling-lightgbm>`__ and `Question 8 <./FAQ.rst#cpu-usage-is-low-like-10-in-windows-when-using-lightgbm-on-very-large-datasets-with-many-core-systems>`__).
@@ -106,9 +118,17 @@ It is recommended that you use **Visual Studio** since it has better multithread
Linux
~~~~~
-On Linux LightGBM can be built using **CMake** and **gcc** or **Clang**.
+On Linux, LightGBM can be built using
+
+- **CMake** and **gcc**;
+- **CMake** and **Clang**.
+
+After compilation the executable and ``.so`` files will be in ``LightGBM/`` folder.
-1. Install `CMake`_.
+gcc
+^^^
+
+1. Install `CMake`_ and **gcc**.
2. Run the following commands:
@@ -119,53 +139,69 @@ On Linux LightGBM can be built using **CMake** and **gcc** or **Clang**.
cmake -B build -S .
cmake --build build -j4
-**Note**: In some rare cases you may need to install OpenMP runtime library separately (use your package manager and search for ``lib[g|i]omp`` for doing this).
+Clang
+^^^^^
-Using ``Ninja``
-^^^^^^^^^^^^^^^
+1. Install `CMake`_, **Clang** and **OpenMP**.
-On Linux, LightGBM can also be built with `Ninja `__ instead of ``make``.
+2. Run the following commands:
-.. code:: sh
+ .. code:: sh
git clone --recursive https://github.com/microsoft/LightGBM
cd LightGBM
- cmake -B build -S . -G 'Ninja'
- cmake --build build -j2
+ export CXX=clang++-14 CC=clang-14 # replace "14" with version of Clang installed on your machine
+ cmake -B build -S .
+ cmake --build build -j4
macOS
~~~~~
-On macOS LightGBM can be installed using **Homebrew**, or can be built using **CMake** and **Apple Clang** or **gcc**.
+On macOS, LightGBM can be installed using
-Apple Clang
-^^^^^^^^^^^
+- **Homebrew**;
+- **MacPorts**;
+
+or can be built using
-Only **Apple Clang** version 8.1 or higher is supported.
+- **CMake** and **Apple Clang**;
+- **CMake** and **gcc**.
Install Using ``Homebrew``
-**************************
+^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: sh
brew install lightgbm
-Build from GitHub
-*****************
+Refer to https://formulae.brew.sh/formula/lightgbm for more details.
-1. Install `CMake`_ :
+Install Using ``MacPorts``
+^^^^^^^^^^^^^^^^^^^^^^^^^^
- .. code:: sh
+.. code:: sh
- brew install cmake
+ sudo port install LightGBM
+
+Refer to https://ports.macports.org/port/LightGBM for more details.
+
+**Note**: Port for LightGBM is not maintained by LightGBM's maintainers.
-2. Install **OpenMP**:
+Build from GitHub
+^^^^^^^^^^^^^^^^^
+
+After compilation the executable and ``.dylib`` files will be in ``LightGBM/`` folder.
+
+Apple Clang
+***********
+
+1. Install `CMake`_ and **OpenMP**:
.. code:: sh
- brew install libomp
+ brew install cmake libomp
-3. Run the following commands:
+2. Run the following commands:
.. code:: sh
@@ -175,21 +211,15 @@ Build from GitHub
cmake --build build -j4
gcc
-^^^
-
-1. Install `CMake`_ :
-
- .. code:: sh
-
- brew install cmake
+***
-2. Install **gcc**:
+1. Install `CMake`_ and **gcc**:
.. code:: sh
- brew install gcc
+ brew install cmake gcc
-3. Run the following commands:
+2. Run the following commands:
.. code:: sh
@@ -213,12 +243,10 @@ You can build LightGBM without OpenMP support but it is **strongly not recommend
Windows
^^^^^^^
-On Windows a version of LightGBM without OpenMP support can be built using
+On Windows, a version of LightGBM without OpenMP support can be built using
- **Visual Studio**;
-
- **CMake** and **VS Build Tools**;
-
- **CMake** and **MinGW**.
Visual Studio (or VS Build Tools)
@@ -227,26 +255,27 @@ Visual Studio (or VS Build Tools)
With GUI
--------
-1. Install `Visual Studio`_ (2015 or newer).
+1. Install `Visual Studio`_.
2. Navigate to one of the releases at https://github.com/microsoft/LightGBM/releases, download ``LightGBM-complete_source_code_zip.zip``, and unzip it.
-3. Go to ``LightGBM-master/windows`` folder.
+3. Go to ``LightGBM-complete_source_code_zip/windows`` folder.
-4. Open ``LightGBM.sln`` file with **Visual Studio**.
+4. Open ``LightGBM.sln`` file with **Visual Studio**, choose ``Release`` configuration if you need executable file or ``DLL`` configuration if you need shared library.
-5. Go to ``PROJECT`` -> ``Properties`` -> ``Configuration Properties`` -> ``C/C++`` -> ``Language`` and change the ``OpenMP Support`` property to ``No (/openmp-)``.
+5. Go to ``Project`` -> ``Properties`` -> ``Configuration Properties`` -> ``C/C++`` -> ``Language`` and change the ``OpenMP Support`` property to ``No (/openmp-)``.
-6. Get back to the project's main screen, then choose ``Release`` configuration and click ``BUILD`` -> ``Build Solution (Ctrl+Shift+B)``.
+6. Get back to the project's main screen and click ``Build`` -> ``Build Solution (Ctrl+Shift+B)``.
- If you have errors about **Platform Toolset**, go to ``PROJECT`` -> ``Properties`` -> ``Configuration Properties`` -> ``General`` and select the toolset installed on your machine.
+ If you have errors about **Platform Toolset**, go to ``Project`` -> ``Properties`` -> ``Configuration Properties`` -> ``General`` and select the toolset installed on your machine.
-The ``.exe`` file will be in ``LightGBM-master/windows/x64/Release`` folder.
+The ``.exe`` file will be in ``LightGBM-complete_source_code_zip/windows/x64/Release`` folder.
+The ``.dll`` file will be in ``LightGBM-complete_source_code_zip/windows/x64/DLL`` folder.
From Command Line
-----------------
-1. Install `Git for Windows`_, `CMake`_ and `VS Build Tools`_ (**VS Build Tools** is not needed if **Visual Studio** (2015 or newer) is already installed).
+1. Install `Git for Windows`_, `CMake`_ and `VS Build Tools`_ (**VS Build Tools** is not needed if **Visual Studio** is already installed).
2. Run the following commands:
@@ -275,14 +304,36 @@ MinGW-w64
The ``.exe`` and ``.dll`` files will be in ``LightGBM/`` folder.
-**Note**: You may need to run the ``cmake -B build -S . -G "MinGW Makefiles" -DUSE_OPENMP=OFF`` one more time if you encounter the ``sh.exe was found in your PATH`` error.
+**Note**: You may need to run the ``cmake -B build -S . -G "MinGW Makefiles" -DUSE_OPENMP=OFF`` one more time or add ``-DCMAKE_SH=CMAKE_SH-NOTFOUND`` to CMake flags if you encounter the ``sh.exe was found in your PATH`` error.
Linux
^^^^^
-On Linux a version of LightGBM without OpenMP support can be built using **CMake** and **gcc** or **Clang**.
+On Linux, a version of LightGBM without OpenMP support can be built using
+
+- **CMake** and **gcc**;
+- **CMake** and **Clang**.
+
+After compilation the executable and ``.so`` files will be in ``LightGBM/`` folder.
+
+gcc
+***
+
+1. Install `CMake`_ and **gcc**.
+
+2. Run the following commands:
+
+ .. code:: sh
-1. Install `CMake`_.
+ git clone --recursive https://github.com/microsoft/LightGBM
+ cd LightGBM
+ cmake -B build -S . -DUSE_OPENMP=OFF
+ cmake --build build -j4
+
+Clang
+*****
+
+1. Install `CMake`_ and **Clang**.
2. Run the following commands:
@@ -290,20 +341,24 @@ On Linux a version of LightGBM without OpenMP support can be built using **CMake
git clone --recursive https://github.com/microsoft/LightGBM
cd LightGBM
+ export CXX=clang++-14 CC=clang-14 # replace "14" with version of Clang installed on your machine
cmake -B build -S . -DUSE_OPENMP=OFF
cmake --build build -j4
macOS
^^^^^
-On macOS a version of LightGBM without OpenMP support can be built using **CMake** and **Apple Clang** or **gcc**.
+On macOS, a version of LightGBM without OpenMP support can be built using
+
+- **CMake** and **Apple Clang**;
+- **CMake** and **gcc**.
+
+After compilation the executable and ``.dylib`` files will be in ``LightGBM/`` folder.
Apple Clang
***********
-Only **Apple Clang** version 8.1 or higher is supported.
-
-1. Install `CMake`_ :
+1. Install `CMake`_:
.. code:: sh
@@ -321,19 +376,13 @@ Only **Apple Clang** version 8.1 or higher is supported.
gcc
***
-1. Install `CMake`_ :
+1. Install `CMake`_ and **gcc**:
.. code:: sh
- brew install cmake
+ brew install cmake gcc
-2. Install **gcc**:
-
- .. code:: sh
-
- brew install gcc
-
-3. Run the following commands:
+2. Run the following commands:
.. code:: sh
@@ -354,35 +403,36 @@ If you need to run a distributed learning application with high performance comm
Windows
^^^^^^^
-On Windows an MPI version of LightGBM can be built using
+On Windows, an MPI version of LightGBM can be built using
- **MS MPI** and **Visual Studio**;
-
- **MS MPI**, **CMake** and **VS Build Tools**.
+**Note**: Building MPI version by **MinGW** is not supported due to the miss of MPI library in it.
+
With GUI
********
1. You need to install `MS MPI`_ first. Both ``msmpisdk.msi`` and ``msmpisetup.exe`` are needed.
-2. Install `Visual Studio`_ (2015 or newer).
+2. Install `Visual Studio`_.
3. Navigate to one of the releases at https://github.com/microsoft/LightGBM/releases, download ``LightGBM-complete_source_code_zip.zip``, and unzip it.
-4. Go to ``LightGBM-master/windows`` folder.
+4. Go to ``LightGBM-complete_source_code_zip/windows`` folder.
-5. Open ``LightGBM.sln`` file with **Visual Studio**, choose ``Release_mpi`` configuration and click ``BUILD`` -> ``Build Solution (Ctrl+Shift+B)``.
+5. Open ``LightGBM.sln`` file with **Visual Studio**, choose ``Release_mpi`` configuration and click ``Build`` -> ``Build Solution (Ctrl+Shift+B)``.
- If you have errors about **Platform Toolset**, go to ``PROJECT`` -> ``Properties`` -> ``Configuration Properties`` -> ``General`` and select the toolset installed on your machine.
+ If you have errors about **Platform Toolset**, go to ``Project`` -> ``Properties`` -> ``Configuration Properties`` -> ``General`` and select the toolset installed on your machine.
-The ``.exe`` file will be in ``LightGBM-master/windows/x64/Release_mpi`` folder.
+The ``.exe`` file will be in ``LightGBM-complete_source_code_zip/windows/x64/Release_mpi`` folder.
From Command Line
*****************
1. You need to install `MS MPI`_ first. Both ``msmpisdk.msi`` and ``msmpisetup.exe`` are needed.
-2. Install `Git for Windows`_, `CMake`_ and `VS Build Tools`_ (**VS Build Tools** is not needed if **Visual Studio** (2015 or newer) is already installed).
+2. Install `Git for Windows`_, `CMake`_ and `VS Build Tools`_ (**VS Build Tools** is not needed if **Visual Studio** is already installed).
3. Run the following commands:
@@ -395,18 +445,22 @@ From Command Line
The ``.exe`` and ``.dll`` files will be in ``LightGBM/Release`` folder.
-**Note**: Building MPI version by **MinGW** is not supported due to the miss of MPI library in it.
-
Linux
^^^^^
-On Linux an MPI version of LightGBM can be built using **Open MPI**, **CMake** and **gcc** or **Clang**.
+On Linux, an MPI version of LightGBM can be built using
-1. Install `Open MPI`_.
+- **CMake**, **gcc** and **Open MPI**;
+- **CMake**, **Clang** and **Open MPI**.
-2. Install `CMake`_.
+After compilation the executable and ``.so`` files will be in ``LightGBM/`` folder.
-3. Run the following commands:
+gcc
+***
+
+1. Install `CMake`_, **gcc** and `Open MPI`_.
+
+2. Run the following commands:
.. code:: sh
@@ -415,37 +469,41 @@ On Linux an MPI version of LightGBM can be built using **Open MPI**, **CMake** a
cmake -B build -S . -DUSE_MPI=ON
cmake --build build -j4
-**Note**: In some rare cases you may need to install OpenMP runtime library separately (use your package manager and search for ``lib[g|i]omp`` for doing this).
-
-macOS
-^^^^^
+Clang
+*****
-On macOS an MPI version of LightGBM can be built using **Open MPI**, **CMake** and **Apple Clang** or **gcc**.
+1. Install `CMake`_, **Clang**, **OpenMP** and `Open MPI`_.
-Apple Clang
-***********
+2. Run the following commands:
-Only **Apple Clang** version 8.1 or higher is supported.
+ .. code:: sh
-1. Install `CMake`_ :
+ git clone --recursive https://github.com/microsoft/LightGBM
+ cd LightGBM
+ export CXX=clang++-14 CC=clang-14 # replace "14" with version of Clang installed on your machine
+ cmake -B build -S . -DUSE_MPI=ON
+ cmake --build build -j4
- .. code:: sh
+macOS
+^^^^^
- brew install cmake
+On macOS, an MPI version of LightGBM can be built using
-2. Install **OpenMP**:
+- **CMake**, **Open MPI** and **Apple Clang**;
+- **CMake**, **Open MPI** and **gcc**.
- .. code:: sh
+After compilation the executable and ``.dylib`` files will be in ``LightGBM/`` folder.
- brew install libomp
+Apple Clang
+***********
-3. Install **Open MPI**:
+1. Install `CMake`_, **OpenMP** and `Open MPI`_:
.. code:: sh
- brew install open-mpi
+ brew install cmake libomp open-mpi
-4. Run the following commands:
+2. Run the following commands:
.. code:: sh
@@ -457,25 +515,13 @@ Only **Apple Clang** version 8.1 or higher is supported.
gcc
***
-1. Install `CMake`_ :
-
- .. code:: sh
-
- brew install cmake
-
-2. Install **gcc**:
+1. Install `CMake`_, `Open MPI`_ and **gcc**:
.. code:: sh
- brew install gcc
+ brew install cmake open-mpi gcc
-3. Install **Open MPI**:
-
- .. code:: sh
-
- brew install open-mpi
-
-4. Run the following commands:
+2. Run the following commands:
.. code:: sh
@@ -488,48 +534,19 @@ gcc
Build GPU Version
~~~~~~~~~~~~~~~~~
-Linux
-^^^^^
-
-On Linux a GPU version of LightGBM (``device_type=gpu``) can be built using **OpenCL**, **Boost**, **CMake** and **gcc** or **Clang**.
-
-The following dependencies should be installed before compilation:
-
-- **OpenCL** 1.2 headers and libraries, which is usually provided by GPU manufacture.
-
- The generic OpenCL ICD packages (for example, Debian package ``ocl-icd-libopencl1`` and ``ocl-icd-opencl-dev``) can also be used.
-
-- **libboost** 1.56 or later (1.61 or later is recommended).
-
- We use Boost.Compute as the interface to GPU, which is part of the Boost library since version 1.61. However, since we include the source code of Boost.Compute as a submodule, we only require the host has Boost 1.56 or later installed. We also use Boost.Align for memory allocation. Boost.Compute requires Boost.System and Boost.Filesystem to store offline kernel cache.
-
- The following Debian packages should provide necessary Boost libraries: ``libboost-dev``, ``libboost-system-dev``, ``libboost-filesystem-dev``.
-
-- **CMake**
-
-To build LightGBM GPU version, run the following commands:
-
-.. code:: sh
-
- git clone --recursive https://github.com/microsoft/LightGBM
- cd LightGBM
- cmake -B build -S . -DUSE_GPU=1
- # if you have installed NVIDIA CUDA to a customized location, you should specify paths to OpenCL headers and library like the following:
- # cmake -B build -S . -DUSE_GPU=1 -DOpenCL_LIBRARY=/usr/local/cuda/lib64/libOpenCL.so -DOpenCL_INCLUDE_DIR=/usr/local/cuda/include/
- cmake --build build
-
-**Note**: In some rare cases you may need to install OpenMP runtime library separately (use your package manager and search for ``lib[g|i]omp`` for doing this).
-
Windows
^^^^^^^
-On Windows a GPU version of LightGBM (``device_type=gpu``) can be built using **OpenCL**, **Boost**, **CMake** and **VS Build Tools** or **MinGW**.
+On Windows, a GPU version of LightGBM (``device_type=gpu``) can be built using
+
+- **OpenCL**, **Boost**, **CMake** and **VS Build Tools**;
+- **OpenCL**, **Boost**, **CMake** and **MinGW**.
If you use **MinGW**, the build procedure is similar to the build on Linux.
Following procedure is for the **MSVC** (Microsoft Visual C++) build.
-1. Install `Git for Windows`_, `CMake`_ and `VS Build Tools`_ (**VS Build Tools** is not needed if **Visual Studio** (2015 or newer) is installed).
+1. Install `Git for Windows`_, `CMake`_ and `VS Build Tools`_ (**VS Build Tools** is not needed if **Visual Studio** is installed).
2. Install **OpenCL** for Windows. The installation depends on the brand (NVIDIA, AMD, Intel) of your GPU card.
@@ -559,13 +576,68 @@ Following procedure is for the **MSVC** (Microsoft Visual C++) build.
git clone --recursive https://github.com/microsoft/LightGBM
cd LightGBM
- cmake -B build -S . -A x64 -DUSE_GPU=1 -DBOOST_ROOT=C:/local/boost_1_63_0 -DBOOST_LIBRARYDIR=C:/local/boost_1_63_0/lib64-msvc-14.0
+ cmake -B build -S . -A x64 -DUSE_GPU=ON -DBOOST_ROOT=C:/local/boost_1_63_0 -DBOOST_LIBRARYDIR=C:/local/boost_1_63_0/lib64-msvc-14.0
# if you have installed NVIDIA CUDA to a customized location, you should specify paths to OpenCL headers and library like the following:
- # cmake -B build -S . -A x64 -DUSE_GPU=1 -DBOOST_ROOT=C:/local/boost_1_63_0 -DBOOST_LIBRARYDIR=C:/local/boost_1_63_0/lib64-msvc-14.0 -DOpenCL_LIBRARY="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.0/lib/x64/OpenCL.lib" -DOpenCL_INCLUDE_DIR="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.0/include"
+ # cmake -B build -S . -A x64 -DUSE_GPU=ON -DBOOST_ROOT=C:/local/boost_1_63_0 -DBOOST_LIBRARYDIR=C:/local/boost_1_63_0/lib64-msvc-14.0 -DOpenCL_LIBRARY="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.0/lib/x64/OpenCL.lib" -DOpenCL_INCLUDE_DIR="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.0/include"
cmake --build build --target ALL_BUILD --config Release
**Note**: ``C:/local/boost_1_63_0`` and ``C:/local/boost_1_63_0/lib64-msvc-14.0`` are locations of your **Boost** binaries (assuming you've downloaded 1.63.0 version for Visual Studio 2015).
+The ``.exe`` and ``.dll`` files will be in ``LightGBM/Release`` folder.
+
+Linux
+^^^^^
+
+On Linux, a GPU version of LightGBM (``device_type=gpu``) can be built using
+
+- **CMake**, **OpenCL**, **Boost** and **gcc**;
+- **CMake**, **OpenCL**, **Boost** and **Clang**.
+
+**OpenCL** headers and libraries are usually provided by GPU manufacture.
+The generic OpenCL ICD packages (for example, Debian packages ``ocl-icd-libopencl1``, ``ocl-icd-opencl-dev``, ``pocl-opencl-icd``) can also be used.
+
+Required **Boost** libraries (Boost.Align, Boost.System, Boost.Filesystem, Boost.Chrono) should be provided by the following Debian packages: ``libboost-dev``, ``libboost-system-dev``, ``libboost-filesystem-dev``, ``libboost-chrono-dev``.
+
+After compilation the executable and ``.so`` files will be in ``LightGBM/`` folder.
+
+gcc
+***
+
+1. Install `CMake`_, **gcc**, **OpenCL** and **Boost**.
+
+2. Run the following commands:
+
+ .. code:: sh
+
+ git clone --recursive https://github.com/microsoft/LightGBM
+ cd LightGBM
+ cmake -B build -S . -DUSE_GPU=ON
+ # if you have installed NVIDIA CUDA to a customized location, you should specify paths to OpenCL headers and library like the following:
+ # cmake -B build -S . -DUSE_GPU=ON -DOpenCL_LIBRARY=/usr/local/cuda/lib64/libOpenCL.so -DOpenCL_INCLUDE_DIR=/usr/local/cuda/include/
+ cmake --build build -j4
+
+Clang
+*****
+
+1. Install `CMake`_, **Clang**, **OpenMP**, **OpenCL** and **Boost**.
+
+2. Run the following commands:
+
+ .. code:: sh
+
+ git clone --recursive https://github.com/microsoft/LightGBM
+ cd LightGBM
+ export CXX=clang++-14 CC=clang-14 # replace "14" with version of Clang installed on your machine
+ cmake -B build -S . -DUSE_GPU=ON
+ # if you have installed NVIDIA CUDA to a customized location, you should specify paths to OpenCL headers and library like the following:
+ # cmake -B build -S . -DUSE_GPU=ON -DOpenCL_LIBRARY=/usr/local/cuda/lib64/libOpenCL.so -DOpenCL_INCLUDE_DIR=/usr/local/cuda/include/
+ cmake --build build -j4
+
+macOS
+^^^^^
+
+The GPU version is not supported on macOS.
+
Docker
^^^^^^
@@ -574,60 +646,84 @@ Refer to `GPU Docker folder `__ of LightGBM (``device_type=gpu``) is based on OpenCL.
+The `original GPU version <#build-gpu-version>`__ of LightGBM (``device_type=gpu``) is based on OpenCL.
-The CUDA-based build (``device_type=cuda``) is a separate implementation.
+The CUDA-based version (``device_type=cuda``) is a separate implementation.
Use this version in Linux environments with an NVIDIA GPU with compute capability 6.0 or higher.
+Windows
+^^^^^^^
+
+The CUDA version is not supported on Windows.
+Use the `GPU version <#build-gpu-version>`__ (``device_type=gpu``) for GPU acceleration on Windows.
+
Linux
^^^^^
-On Linux a CUDA version of LightGBM can be built using **CUDA**, **CMake** and **gcc** or **Clang**.
+On Linux, a CUDA version of LightGBM can be built using
-The following dependencies should be installed before compilation:
+- **CMake**, **gcc** and **CUDA**;
+- **CMake**, **Clang** and **CUDA**.
-- **CUDA** 11.0 or later libraries. Please refer to `this detailed guide`_. Pay great attention to the minimum required versions of host compilers listed in the table from that guide and use only recommended versions of compilers.
+Please refer to `this detailed guide`_ for **CUDA** libraries installation.
-- **CMake**
+After compilation the executable and ``.so`` files will be in ``LightGBM/`` folder.
-To build LightGBM CUDA version, run the following commands:
+gcc
+***
-.. code:: sh
+1. Install `CMake`_, **gcc** and **CUDA**.
+
+2. Run the following commands:
- git clone --recursive https://github.com/microsoft/LightGBM
- cd LightGBM
- cmake -B build -S . -DUSE_CUDA=1
- cmake --build build -j4
+ .. code:: sh
+
+ git clone --recursive https://github.com/microsoft/LightGBM
+ cd LightGBM
+ cmake -B build -S . -DUSE_CUDA=ON
+ cmake --build build -j4
+
+Clang
+*****
+
+1. Install `CMake`_, **Clang**, **OpenMP** and **CUDA**.
+
+2. Run the following commands:
-**Note**: In some rare cases you may need to install OpenMP runtime library separately (use your package manager and search for ``lib[g|i]omp`` for doing this).
+ .. code:: sh
+
+ git clone --recursive https://github.com/microsoft/LightGBM
+ cd LightGBM
+ export CXX=clang++-14 CC=clang-14 # replace "14" with version of Clang installed on your machine
+ cmake -B build -S . -DUSE_CUDA=ON
+ cmake --build build -j4
macOS
^^^^^
The CUDA version is not supported on macOS.
-Windows
-^^^^^^^
-
-The CUDA version is not supported on Windows.
-Use the GPU version (``device_type=gpu``) for GPU acceleration on Windows.
-
Build Java Wrapper
~~~~~~~~~~~~~~~~~~
Using the following instructions you can generate a JAR file containing the LightGBM `C API <./Development-Guide.rst#c-api>`__ wrapped by **SWIG**.
+After compilation the ``.jar`` file will be in ``LightGBM/build`` folder.
+
Windows
^^^^^^^
-On Windows a Java wrapper of LightGBM can be built using **Java**, **SWIG**, **CMake** and **VS Build Tools** or **MinGW**.
+On Windows, a Java wrapper of LightGBM can be built using
+
+- **Java**, **SWIG**, **CMake** and **VS Build Tools**;
+- **Java**, **SWIG**, **CMake** and **MinGW**.
VS Build Tools
**************
-1. Install `Git for Windows`_, `CMake`_ and `VS Build Tools`_ (**VS Build Tools** is not needed if **Visual Studio** (2015 or newer) is already installed).
+1. Install `Git for Windows`_, `CMake`_ and `VS Build Tools`_ (**VS Build Tools** is not needed if **Visual Studio** is already installed).
-2. Install `SWIG`_ and **Java** (also make sure that ``JAVA_HOME`` is set properly).
+2. Install `SWIG`_ and **Java** (also make sure that ``JAVA_HOME`` environment variable is set properly).
3. Run the following commands:
@@ -638,14 +734,12 @@ VS Build Tools
cmake -B build -S . -A x64 -DUSE_SWIG=ON
cmake --build build --target ALL_BUILD --config Release
-The ``.jar`` file will be in ``LightGBM/build`` folder and the ``.dll`` files will be in ``LightGBM/Release`` folder.
-
MinGW-w64
*********
1. Install `Git for Windows`_, `CMake`_ and `MinGW-w64`_.
-2. Install `SWIG`_ and **Java** (also make sure that ``JAVA_HOME`` is set properly).
+2. Install `SWIG`_ and **Java** (also make sure that ``JAVA_HOME`` environment variable is set properly).
3. Run the following commands:
@@ -656,9 +750,7 @@ MinGW-w64
cmake -B build -S . -G "MinGW Makefiles" -DUSE_SWIG=ON
cmake --build build -j4
-The ``.jar`` file will be in ``LightGBM/build`` folder and the ``.dll`` files will be in ``LightGBM/`` folder.
-
-**Note**: You may need to run the ``cmake -B build -S . -G "MinGW Makefiles" -DUSE_SWIG=ON`` one more time if you encounter the ``sh.exe was found in your PATH`` error.
+**Note**: You may need to run the ``cmake -B build -S . -G "MinGW Makefiles" -DUSE_SWIG=ON`` one more time or add ``-DCMAKE_SH=CMAKE_SH-NOTFOUND`` to CMake flags if you encounter the ``sh.exe was found in your PATH`` error.
It is recommended to use **VS Build Tools (Visual Studio)** since it has better multithreading efficiency in **Windows** for many-core systems
(see `Question 4 <./FAQ.rst#i-am-using-windows-should-i-use-visual-studio-or-mingw-for-compiling-lightgbm>`__ and `Question 8 <./FAQ.rst#cpu-usage-is-low-like-10-in-windows-when-using-lightgbm-on-very-large-datasets-with-many-core-systems>`__).
@@ -666,9 +758,15 @@ It is recommended to use **VS Build Tools (Visual Studio)** since it has better
Linux
^^^^^
-On Linux a Java wrapper of LightGBM can be built using **Java**, **SWIG**, **CMake** and **gcc** or **Clang**.
+On Linux, a Java wrapper of LightGBM can be built using
+
+- **CMake**, **gcc**, **Java** and **SWIG**;
+- **CMake**, **Clang**, **Java** and **SWIG**.
+
+gcc
+***
-1. Install `CMake`_, `SWIG`_ and **Java** (also make sure that ``JAVA_HOME`` is set properly).
+1. Install `CMake`_, **gcc**, `SWIG`_ and **Java** (also make sure that ``JAVA_HOME`` environment variable is set properly).
2. Run the following commands:
@@ -679,34 +777,40 @@ On Linux a Java wrapper of LightGBM can be built using **Java**, **SWIG**, **CMa
cmake -B build -S . -DUSE_SWIG=ON
cmake --build build -j4
-**Note**: In some rare cases you may need to install OpenMP runtime library separately (use your package manager and search for ``lib[g|i]omp`` for doing this).
+Clang
+*****
-macOS
-^^^^^
+1. Install `CMake`_, **Clang**, **OpenMP**, `SWIG`_ and **Java** (also make sure that ``JAVA_HOME`` environment variable is set properly).
-On macOS a Java wrapper of LightGBM can be built using **Java**, **SWIG**, **CMake** and **Apple Clang** or **gcc**.
+2. Run the following commands:
-First, install `SWIG`_ and **Java** (also make sure that ``JAVA_HOME`` is set properly).
-Then, either follow the **Apple Clang** or **gcc** installation instructions below.
+ .. code:: sh
-Apple Clang
-***********
+ git clone --recursive https://github.com/microsoft/LightGBM
+ cd LightGBM
+ export CXX=clang++-14 CC=clang-14 # replace "14" with version of Clang installed on your machine
+ cmake -B build -S . -DUSE_SWIG=ON
+ cmake --build build -j4
-Only **Apple Clang** version 8.1 or higher is supported.
+macOS
+^^^^^
-1. Install `CMake`_ :
+On macOS, a Java wrapper of LightGBM can be built using
- .. code:: sh
+- **CMake**, **Java**, **SWIG** and **Apple Clang**;
+- **CMake**, **Java**, **SWIG** and **gcc**.
- brew install cmake
+Apple Clang
+***********
-2. Install **OpenMP**:
+1. Install `CMake`_, **Java** (also make sure that ``JAVA_HOME`` environment variable is set properly), `SWIG`_ and **OpenMP**:
.. code:: sh
- brew install libomp
+ brew install cmake openjdk swig libomp
+ export JAVA_HOME="$(brew --prefix openjdk)/libexec/openjdk.jdk/Contents/Home/"
-3. Run the following commands:
+2. Run the following commands:
.. code:: sh
@@ -718,19 +822,14 @@ Only **Apple Clang** version 8.1 or higher is supported.
gcc
***
-1. Install `CMake`_ :
+1. Install `CMake`_, **Java** (also make sure that ``JAVA_HOME`` environment variable is set properly), `SWIG`_ and **gcc**:
.. code:: sh
- brew install cmake
-
-2. Install **gcc**:
-
- .. code:: sh
-
- brew install gcc
+ brew install cmake openjdk swig gcc
+ export JAVA_HOME="$(brew --prefix openjdk)/libexec/openjdk.jdk/Contents/Home/"
-3. Run the following commands:
+2. Run the following commands:
.. code:: sh
@@ -740,15 +839,31 @@ gcc
cmake -B build -S . -DUSE_SWIG=ON
cmake --build build -j4
+Build Python-package
+~~~~~~~~~~~~~~~~~~~~
+
+Refer to `Python-package folder `__.
+
+Build R-package
+~~~~~~~~~~~~~~~
+
+Refer to `R-package folder `__.
+
Build C++ Unit Tests
~~~~~~~~~~~~~~~~~~~~
Windows
^^^^^^^
-On Windows, C++ unit tests of LightGBM can be built using **CMake** and **VS Build Tools**.
+On Windows, C++ unit tests of LightGBM can be built using
+
+- **CMake** and **VS Build Tools**;
+- **CMake** and **MinGW**.
+
+VS Build Tools
+**************
-1. Install `Git for Windows`_, `CMake`_ and `VS Build Tools`_ (**VS Build Tools** is not needed if **Visual Studio** (2015 or newer) is already installed).
+1. Install `Git for Windows`_, `CMake`_ and `VS Build Tools`_ (**VS Build Tools** is not needed if **Visual Studio** is already installed).
2. Run the following commands:
@@ -756,17 +871,43 @@ On Windows, C++ unit tests of LightGBM can be built using **CMake** and **VS Bui
git clone --recursive https://github.com/microsoft/LightGBM
cd LightGBM
- cmake -B build -S . -A x64 -DBUILD_CPP_TEST=ON -DUSE_OPENMP=OFF
+ cmake -B build -S . -A x64 -DBUILD_CPP_TEST=ON
cmake --build build --target testlightgbm --config Debug
The ``.exe`` file will be in ``LightGBM/Debug`` folder.
+MinGW-w64
+*********
+
+1. Install `Git for Windows`_, `CMake`_ and `MinGW-w64`_.
+
+2. Run the following commands:
+
+ .. code:: console
+
+ git clone --recursive https://github.com/microsoft/LightGBM
+ cd LightGBM
+ cmake -B build -S . -G "MinGW Makefiles" -DBUILD_CPP_TEST=ON
+ cmake --build build --target testlightgbm -j4
+
+The ``.exe`` file will be in ``LightGBM/`` folder.
+
+**Note**: You may need to run the ``cmake -B build -S . -G "MinGW Makefiles" -DBUILD_CPP_TEST=ON`` one more time or add ``-DCMAKE_SH=CMAKE_SH-NOTFOUND`` to CMake flags if you encounter the ``sh.exe was found in your PATH`` error.
+
Linux
^^^^^
-On Linux a C++ unit tests of LightGBM can be built using **CMake** and **gcc** or **Clang**.
+On Linux, a C++ unit tests of LightGBM can be built using
+
+- **CMake** and **gcc**;
+- **CMake** and **Clang**.
+
+After compilation the executable file will be in ``LightGBM/`` folder.
+
+gcc
+***
-1. Install `CMake`_.
+1. Install `CMake`_ and **gcc**.
2. Run the following commands:
@@ -774,24 +915,42 @@ On Linux a C++ unit tests of LightGBM can be built using **CMake** and **gcc** o
git clone --recursive https://github.com/microsoft/LightGBM
cd LightGBM
- cmake -B build -S . -DBUILD_CPP_TEST=ON -DUSE_OPENMP=OFF
+ cmake -B build -S . -DBUILD_CPP_TEST=ON
+ cmake --build build --target testlightgbm -j4
+
+Clang
+*****
+
+1. Install `CMake`_, **Clang** and **OpenMP**.
+
+2. Run the following commands:
+
+ .. code:: sh
+
+ git clone --recursive https://github.com/microsoft/LightGBM
+ cd LightGBM
+ export CXX=clang++-14 CC=clang-14 # replace "14" with version of Clang installed on your machine
+ cmake -B build -S . -DBUILD_CPP_TEST=ON
cmake --build build --target testlightgbm -j4
macOS
^^^^^
-On macOS a C++ unit tests of LightGBM can be built using **CMake** and **Apple Clang** or **gcc**.
+On macOS, a C++ unit tests of LightGBM can be built using
+
+- **CMake** and **Apple Clang**;
+- **CMake** and **gcc**.
+
+After compilation the executable file will be in ``LightGBM/`` folder.
Apple Clang
***********
-Only **Apple Clang** version 8.1 or higher is supported.
-
-1. Install `CMake`_ :
+1. Install `CMake`_ and **OpenMP**:
.. code:: sh
- brew install cmake
+ brew install cmake libomp
2. Run the following commands:
@@ -799,42 +958,32 @@ Only **Apple Clang** version 8.1 or higher is supported.
git clone --recursive https://github.com/microsoft/LightGBM
cd LightGBM
- cmake -B build -S . -DBUILD_CPP_TEST=ON -DUSE_OPENMP=OFF
+ cmake -B build -S . -DBUILD_CPP_TEST=ON
cmake --build build --target testlightgbm -j4
gcc
***
-1. Install `CMake`_ :
+1. Install `CMake`_ and **gcc**:
.. code:: sh
- brew install cmake
-
-2. Install **gcc**:
+ brew install cmake gcc
- .. code:: sh
-
- brew install gcc
-
-3. Run the following commands:
+2. Run the following commands:
.. code:: sh
git clone --recursive https://github.com/microsoft/LightGBM
cd LightGBM
export CXX=g++-7 CC=gcc-7 # replace "7" with version of gcc installed on your machine
- cmake -B build -S . -DBUILD_CPP_TEST=ON -DUSE_OPENMP=OFF
+ cmake -B build -S . -DBUILD_CPP_TEST=ON
cmake --build build --target testlightgbm -j4
.. |download artifacts| image:: ./_static/images/artifacts-not-available.svg
:target: https://lightgbm.readthedocs.io/en/latest/Installation-Guide.html
-.. _Python-package: https://github.com/microsoft/LightGBM/tree/master/python-package
-
-.. _R-package: https://github.com/microsoft/LightGBM/tree/master/R-package
-
.. _Visual Studio: https://visualstudio.microsoft.com/downloads/
.. _Git for Windows: https://git-scm.com/download/win
@@ -864,3 +1013,5 @@ gcc
.. _this detailed guide: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html
.. _following docs: https://github.com/google/sanitizers/wiki
+
+.. _Ninja: https://ninja-build.org
diff --git a/docs/Parameters.rst b/docs/Parameters.rst
index 1f80a13d5731..b44d90ecec10 100644
--- a/docs/Parameters.rst
+++ b/docs/Parameters.rst
@@ -35,7 +35,7 @@ For example, in Python:
.. code-block:: python
- # use learning rate of 0.07, becase 'learning_rate'
+ # use learning rate of 0.07, because 'learning_rate'
# is the primary parameter name
lgb.train(
params={
diff --git a/docs/_static/js/script.js b/docs/_static/js/script.js
index 3f129501e06f..c6d21713fe5c 100644
--- a/docs/_static/js/script.js
+++ b/docs/_static/js/script.js
@@ -15,17 +15,19 @@ $(() => {
/* Collapse specified sections in the installation guide */
if (window.location.pathname.toLocaleLowerCase().indexOf("installation-guide") !== -1) {
$(
- '',
+ '',
).appendTo("body");
- const collapsable = [
+ const collapsible = [
"#build-threadless-version-not-recommended",
"#build-mpi-version",
"#build-gpu-version",
"#build-cuda-version",
"#build-java-wrapper",
+ "#build-python-package",
+ "#build-r-package",
"#build-c-unit-tests",
];
- $.each(collapsable, (_, val) => {
+ $.each(collapsible, (_, val) => {
const header = `${val} > :header:first`;
const content = `${val} :not(:header:first)`;
$(header).addClass("closed");
diff --git a/examples/lambdarank/train.conf b/examples/lambdarank/train.conf
index 2aa2113b40d4..f007dcd6fe66 100644
--- a/examples/lambdarank/train.conf
+++ b/examples/lambdarank/train.conf
@@ -64,7 +64,7 @@ num_leaves = 31
# alias: tree
tree_learner = serial
-# number of threads for multi-threading. One thread will use one CPU, defalut is setted to #cpu.
+# number of threads for multi-threading. One thread will use one CPU, default is set to #cpu.
# num_threads = 8
# feature sub-sample, will random select 80% feature to train on each iteration
diff --git a/examples/regression/train.conf b/examples/regression/train.conf
index cd910af61dcf..992bc6c9ab53 100644
--- a/examples/regression/train.conf
+++ b/examples/regression/train.conf
@@ -20,7 +20,7 @@ objective = regression
# binary_error
metric = l2
-# frequence for metric output
+# frequency for metric output
metric_freq = 1
# true if need output metric for training data, alias: tranining_metric, train_metric
@@ -36,12 +36,12 @@ max_bin = 255
# forcedbins_filename = forced_bins.json
# training data
-# if exsting weight file, should name to "regression.train.weight"
+# if existing weight file, should name to "regression.train.weight"
# alias: train_data, train
data = regression.train
# validation data, support multi validation data, separated by ','
-# if exsting weight file, should name to "regression.test.weight"
+# if existing weight file, should name to "regression.test.weight"
# alias: valid, test, test_data,
valid_data = regression.test
@@ -62,7 +62,7 @@ num_leaves = 31
# alias: tree
tree_learner = serial
-# number of threads for multi-threading. One thread will use one CPU, default is setted to #cpu.
+# number of threads for multi-threading. One thread will use one CPU, default is set to #cpu.
# num_threads = 8
# feature sub-sample, will random select 80% feature to train on each iteration
@@ -72,7 +72,7 @@ feature_fraction = 0.9
# Support bagging (data sub-sample), will perform bagging every 5 iterations
bagging_freq = 5
-# Bagging farction, will random select 80% data on bagging
+# Bagging fraction, will random select 80% data on bagging
# alias: sub_row
bagging_fraction = 0.8
diff --git a/include/LightGBM/bin.h b/include/LightGBM/bin.h
index a33fcfa9c45c..5826f2387102 100644
--- a/include/LightGBM/bin.h
+++ b/include/LightGBM/bin.h
@@ -9,6 +9,7 @@
#include
#include
+#include
#include
#include
#include
diff --git a/include/LightGBM/cuda/cuda_algorithms.hpp b/include/LightGBM/cuda/cuda_algorithms.hpp
index f79fc57e4f42..abda07b1582f 100644
--- a/include/LightGBM/cuda/cuda_algorithms.hpp
+++ b/include/LightGBM/cuda/cuda_algorithms.hpp
@@ -115,7 +115,7 @@ __device__ __forceinline__ T ShuffleReduceSumWarp(T value, const data_size_t len
return value;
}
-// reduce values from an 1-dimensional block (block size must be no greather than 1024)
+// reduce values from an 1-dimensional block (block size must be no greater than 1024)
template
__device__ __forceinline__ T ShuffleReduceSum(T value, T* shared_mem_buffer, const size_t len) {
const uint32_t warpLane = threadIdx.x % warpSize;
@@ -145,7 +145,7 @@ __device__ __forceinline__ T ShuffleReduceMaxWarp(T value, const data_size_t len
return value;
}
-// reduce values from an 1-dimensional block (block size must be no greather than 1024)
+// reduce values from an 1-dimensional block (block size must be no greater than 1024)
template
__device__ __forceinline__ T ShuffleReduceMax(T value, T* shared_mem_buffer, const size_t len) {
const uint32_t warpLane = threadIdx.x % warpSize;
@@ -196,7 +196,7 @@ __device__ __forceinline__ T ShuffleReduceMinWarp(T value, const data_size_t len
return value;
}
-// reduce values from an 1-dimensional block (block size must be no greather than 1024)
+// reduce values from an 1-dimensional block (block size must be no greater than 1024)
template
__device__ __forceinline__ T ShuffleReduceMin(T value, T* shared_mem_buffer, const size_t len) {
const uint32_t warpLane = threadIdx.x % warpSize;
diff --git a/include/LightGBM/cuda/cuda_column_data.hpp b/include/LightGBM/cuda/cuda_column_data.hpp
index 314a178859c6..8875cd151d7d 100644
--- a/include/LightGBM/cuda/cuda_column_data.hpp
+++ b/include/LightGBM/cuda/cuda_column_data.hpp
@@ -13,6 +13,7 @@
#include
#include
+#include
#include
namespace LightGBM {
diff --git a/include/LightGBM/cuda/cuda_row_data.hpp b/include/LightGBM/cuda/cuda_row_data.hpp
index 1d4cb2f73b1e..85da72bc083d 100644
--- a/include/LightGBM/cuda/cuda_row_data.hpp
+++ b/include/LightGBM/cuda/cuda_row_data.hpp
@@ -15,6 +15,7 @@
#include
#include
+#include
#include
#define COPY_SUBROW_BLOCK_SIZE_ROW_DATA (1024)
diff --git a/include/LightGBM/dataset.h b/include/LightGBM/dataset.h
index 220a1f9f009c..c2a4b62296f2 100644
--- a/include/LightGBM/dataset.h
+++ b/include/LightGBM/dataset.h
@@ -15,6 +15,7 @@
#include
#include
+#include
#include
#include
#include