diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 000000000000..c2e19847a4fc --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,11 @@ +version: 2 +formats: + - pdf +python: + version: 3 + install: + - requirements: docs/requirements.txt +sphinx: + builder: html + configuration: docs/conf.py + fail_on_warning: true diff --git a/R-package/.Rbuildignore b/R-package/.Rbuildignore index 193677ab5a01..1fb7a359298e 100644 --- a/R-package/.Rbuildignore +++ b/R-package/.Rbuildignore @@ -1,5 +1,7 @@ ^build_package.R$ \.gitkeep$ +^docs$ +^_pkgdown\.yml$ # Objects created by compilation \.o$ diff --git a/R-package/DESCRIPTION b/R-package/DESCRIPTION index 4e3b6939e2b1..0a90b2eed319 100755 --- a/R-package/DESCRIPTION +++ b/R-package/DESCRIPTION @@ -18,6 +18,7 @@ Description: Tree based algorithms can be improved by introducing boosting frame 5. Capable of handling large-scale data. In recognition of these advantages, LightGBM has being widely-used in many winning solutions of machine learning competitions. Comparison experiments on public datasets suggest that LightGBM can outperform existing boosting frameworks on both efficiency and accuracy, with significantly lower memory consumption. In addition, parallel experiments suggest that in certain circumstances, LightGBM can achieve a linear speed-up in training time by using multiple machines. +Encoding: UTF-8 License: MIT + file LICENSE URL: https://github.com/Microsoft/LightGBM BugReports: https://github.com/Microsoft/LightGBM/issues diff --git a/R-package/R/lgb.Booster.R b/R-package/R/lgb.Booster.R index d52cae9f08c5..9e22fa5e9b64 100644 --- a/R-package/R/lgb.Booster.R +++ b/R-package/R/lgb.Booster.R @@ -644,11 +644,11 @@ Booster <- R6::R6Class( #' valids <- list(test = dtest) #' model <- lgb.train(params, #' dtrain, -#' 100, +#' 10, #' valids, #' min_data = 1, #' learning_rate = 1, -#' early_stopping_rounds = 10) +#' early_stopping_rounds = 5) #' preds <- predict(model, test$data) #' #' @rdname predict.lgb.Booster @@ -701,11 +701,11 @@ predict.lgb.Booster <- function(object, #' valids <- list(test = dtest) #' model <- lgb.train(params, #' dtrain, -#' 100, +#' 10, #' valids, #' min_data = 1, #' learning_rate = 1, -#' early_stopping_rounds = 10) +#' early_stopping_rounds = 5) #' lgb.save(model, "model.txt") #' load_booster <- lgb.load(filename = "model.txt") #' model_string <- model$save_model_to_string(NULL) # saves best iteration @@ -759,11 +759,11 @@ lgb.load <- function(filename = NULL, model_str = NULL){ #' valids <- list(test = dtest) #' model <- lgb.train(params, #' dtrain, -#' 100, +#' 10, #' valids, #' min_data = 1, #' learning_rate = 1, -#' early_stopping_rounds = 10) +#' early_stopping_rounds = 5) #' lgb.save(model, "model.txt") #' #' @rdname lgb.save @@ -806,11 +806,11 @@ lgb.save <- function(booster, filename, num_iteration = NULL){ #' valids <- list(test = dtest) #' model <- lgb.train(params, #' dtrain, -#' 100, +#' 10, #' valids, #' min_data = 1, #' learning_rate = 1, -#' early_stopping_rounds = 10) +#' early_stopping_rounds = 5) #' json_model <- lgb.dump(model) #' #' @rdname lgb.dump @@ -850,13 +850,12 @@ lgb.dump <- function(booster, num_iteration = NULL){ #' valids <- list(test = dtest) #' model <- lgb.train(params, #' dtrain, -#' 100, +#' 10, #' valids, #' min_data = 1, #' learning_rate = 1, -#' early_stopping_rounds = 10) +#' early_stopping_rounds = 5) #' lgb.get.eval.result(model, "test", "l2") -#' #' @rdname lgb.get.eval.result #' @export lgb.get.eval.result <- function(booster, data_name, eval_name, iters = NULL, is_err = FALSE) { diff --git a/R-package/R/lgb.Dataset.R b/R-package/R/lgb.Dataset.R index 5c4dddae546f..345eb22f95e0 100644 --- a/R-package/R/lgb.Dataset.R +++ b/R-package/R/lgb.Dataset.R @@ -1,4 +1,3 @@ - #' @importFrom methods is #' @importFrom R6 R6Class Dataset <- R6::R6Class( @@ -1057,7 +1056,6 @@ lgb.Dataset.set.reference <- function(dataset, reference) { #' @return passed dataset #' #' @examples -#' #' library(lightgbm) #' data(agaricus.train, package = "lightgbm") #' train <- agaricus.train diff --git a/R-package/R/lgb.Predictor.R b/R-package/R/lgb.Predictor.R index faec8c2af569..b44ab5f08193 100644 --- a/R-package/R/lgb.Predictor.R +++ b/R-package/R/lgb.Predictor.R @@ -1,4 +1,3 @@ - #' @importFrom methods is #' @importFrom R6 R6Class Predictor <- R6::R6Class( diff --git a/R-package/R/lgb.cv.R b/R-package/R/lgb.cv.R index bacefd4f92f2..00f9d1542835 100644 --- a/R-package/R/lgb.cv.R +++ b/R-package/R/lgb.cv.R @@ -64,10 +64,10 @@ CVBooster <- R6::R6Class( #' model <- lgb.cv(params, #' dtrain, #' 10, -#' nfold = 5, +#' nfold = 3, #' min_data = 1, #' learning_rate = 1, -#' early_stopping_rounds = 10) +#' early_stopping_rounds = 5) #' @export lgb.cv <- function(params = list(), data, diff --git a/R-package/R/lgb.importance.R b/R-package/R/lgb.importance.R index 57af3a821471..79f626b15b12 100644 --- a/R-package/R/lgb.importance.R +++ b/R-package/R/lgb.importance.R @@ -22,10 +22,9 @@ #' dtrain <- lgb.Dataset(train$data, label = train$label) #' #' params <- list(objective = "binary", -#' learning_rate = 0.01, num_leaves = 63, max_depth = -1, -#' min_data_in_leaf = 1, min_sum_hessian_in_leaf = 1) -#' model <- lgb.train(params, dtrain, 20) -#' model <- lgb.train(params, dtrain, 20) +#' learning_rate = 0.01, num_leaves = 63, max_depth = -1, +#' min_data_in_leaf = 1, min_sum_hessian_in_leaf = 1) +#' model <- lgb.train(params, dtrain, 10) #' #' tree_imp1 <- lgb.importance(model, percentage = TRUE) #' tree_imp2 <- lgb.importance(model, percentage = FALSE) diff --git a/R-package/R/lgb.interprete.R b/R-package/R/lgb.interprete.R index 4fb72be9ce69..839d6f084703 100644 --- a/R-package/R/lgb.interprete.R +++ b/R-package/R/lgb.interprete.R @@ -34,7 +34,7 @@ #' , min_data_in_leaf = 1 #' , min_sum_hessian_in_leaf = 1 #' ) -#' model <- lgb.train(params, dtrain, 20) +#' model <- lgb.train(params, dtrain, 10) #' #' tree_interpretation <- lgb.interprete(model, test$data, 1:5) #' diff --git a/R-package/R/lgb.model.dt.tree.R b/R-package/R/lgb.model.dt.tree.R index 70355b26faa3..1717de8947af 100644 --- a/R-package/R/lgb.model.dt.tree.R +++ b/R-package/R/lgb.model.dt.tree.R @@ -36,10 +36,9 @@ #' dtrain <- lgb.Dataset(train$data, label = train$label) #' #' params <- list(objective = "binary", -#' learning_rate = 0.01, num_leaves = 63, max_depth = -1, -#' min_data_in_leaf = 1, min_sum_hessian_in_leaf = 1) -#' model <- lgb.train(params, dtrain, 20) -#' model <- lgb.train(params, dtrain, 20) +#' learning_rate = 0.01, num_leaves = 63, max_depth = -1, +#' min_data_in_leaf = 1, min_sum_hessian_in_leaf = 1) +#' model <- lgb.train(params, dtrain, 10) #' #' tree_dt <- lgb.model.dt.tree(model) #' diff --git a/R-package/R/lgb.plot.importance.R b/R-package/R/lgb.plot.importance.R index 260bb16c3822..5e78487b0648 100644 --- a/R-package/R/lgb.plot.importance.R +++ b/R-package/R/lgb.plot.importance.R @@ -30,7 +30,7 @@ #' , min_sum_hessian_in_leaf = 1 #' ) #' -#' model <- lgb.train(params, dtrain, 20) +#' model <- lgb.train(params, dtrain, 10) #' #' tree_imp <- lgb.importance(model, percentage = TRUE) #' lgb.plot.importance(tree_imp, top_n = 10, measure = "Gain") diff --git a/R-package/R/lgb.plot.interpretation.R b/R-package/R/lgb.plot.interpretation.R index e58e438f4426..ef3a9dbe9823 100644 --- a/R-package/R/lgb.plot.interpretation.R +++ b/R-package/R/lgb.plot.interpretation.R @@ -27,10 +27,9 @@ #' test <- agaricus.test #' #' params <- list(objective = "binary", -#' learning_rate = 0.01, num_leaves = 63, max_depth = -1, -#' min_data_in_leaf = 1, min_sum_hessian_in_leaf = 1) -#' model <- lgb.train(params, dtrain, 20) -#' model <- lgb.train(params, dtrain, 20) +#' learning_rate = 0.01, num_leaves = 63, max_depth = -1, +#' min_data_in_leaf = 1, min_sum_hessian_in_leaf = 1) +#' model <- lgb.train(params, dtrain, 10) #' #' tree_interpretation <- lgb.interprete(model, test$data, 1:5) #' lgb.plot.interpretation(tree_interpretation[[1]], top_n = 10) diff --git a/R-package/R/lgb.prepare.R b/R-package/R/lgb.prepare.R index 2f556ef42289..e0f30d619be2 100644 --- a/R-package/R/lgb.prepare.R +++ b/R-package/R/lgb.prepare.R @@ -26,6 +26,7 @@ #' # $ Petal.Width : num 0.2 0.2 0.2 0.2 0.2 0.4 0.3 0.2 0.2 0.1 ... #' # $ Species : num 1 1 1 1 1 1 1 1 1 1 ... #' +#' \dontrun{ #' # When lightgbm package is installed, and you do not want to load it #' # You can still use the function! #' lgb.unloader() @@ -36,6 +37,7 @@ #' # $ Petal.Length: num 1.4 1.4 1.3 1.5 1.4 1.7 1.4 1.5 1.4 1.5 ... #' # $ Petal.Width : num 0.2 0.2 0.2 0.2 0.2 0.4 0.3 0.2 0.2 0.1 ... #' # $ Species : num 1 1 1 1 1 1 1 1 1 1 ... +#' } #' #' @export lgb.prepare <- function(data) { diff --git a/R-package/R/lgb.prepare2.R b/R-package/R/lgb.prepare2.R index afc8c1599375..b0730fb7cb2f 100644 --- a/R-package/R/lgb.prepare2.R +++ b/R-package/R/lgb.prepare2.R @@ -27,6 +27,7 @@ #' # $ Petal.Width : num 0.2 0.2 0.2 0.2 0.2 0.4 0.3 0.2 0.2 0.1 ... #' # $ Species : int 1 1 1 1 1 1 1 1 1 1 ... #' +#' \dontrun{ #' # When lightgbm package is installed, and you do not want to load it #' # You can still use the function! #' lgb.unloader() @@ -37,6 +38,7 @@ #' # $ Petal.Length: num 1.4 1.4 1.3 1.5 1.4 1.7 1.4 1.5 1.4 1.5 ... #' # $ Petal.Width : num 0.2 0.2 0.2 0.2 0.2 0.4 0.3 0.2 0.2 0.1 ... #' # $ Species : int 1 1 1 1 1 1 1 1 1 1 ... +#' } #' #' @export lgb.prepare2 <- function(data) { diff --git a/R-package/R/lgb.train.R b/R-package/R/lgb.train.R index 04bc425657b2..83a688620c84 100644 --- a/R-package/R/lgb.train.R +++ b/R-package/R/lgb.train.R @@ -39,12 +39,11 @@ #' valids <- list(test = dtest) #' model <- lgb.train(params, #' dtrain, -#' 100, +#' 10, #' valids, #' min_data = 1, #' learning_rate = 1, -#' early_stopping_rounds = 10) -#' +#' early_stopping_rounds = 5) #' @export lgb.train <- function(params = list(), data, diff --git a/R-package/R/lgb.unloader.R b/R-package/R/lgb.unloader.R index d3f68568701e..35139dffed2e 100644 --- a/R-package/R/lgb.unloader.R +++ b/R-package/R/lgb.unloader.R @@ -20,17 +20,20 @@ #' valids <- list(test = dtest) #' model <- lgb.train(params, #' dtrain, -#' 100, +#' 10, #' valids, #' min_data = 1, #' learning_rate = 1, -#' early_stopping_rounds = 10) +#' early_stopping_rounds = 5) +#' +#' \dontrun{ #' lgb.unloader(restore = FALSE, wipe = FALSE, envir = .GlobalEnv) #' rm(model, dtrain, dtest) # Not needed if wipe = TRUE #' gc() # Not needed if wipe = TRUE #' #' library(lightgbm) #' # Do whatever you want again with LightGBM without object clashing +#' } #' #' @export lgb.unloader <- function(restore = TRUE, wipe = FALSE, envir = .GlobalEnv) { diff --git a/R-package/R/lightgbm.R b/R-package/R/lightgbm.R index 412e28ebbaa0..944a2c8af5d7 100644 --- a/R-package/R/lightgbm.R +++ b/R-package/R/lightgbm.R @@ -1,4 +1,3 @@ - #' @name lgb_shared_params #' @title Shared parameter docs #' @description Parameter docs shared by \code{lgb.train}, \code{lgb.cv}, and \code{lightgbm} diff --git a/R-package/R/readRDS.lgb.Booster.R b/R-package/R/readRDS.lgb.Booster.R index 37e66b00b69e..dbc5653a505e 100644 --- a/R-package/R/readRDS.lgb.Booster.R +++ b/R-package/R/readRDS.lgb.Booster.R @@ -19,11 +19,11 @@ #' valids <- list(test = dtest) #' model <- lgb.train(params, #' dtrain, -#' 100, +#' 10, #' valids, #' min_data = 1, #' learning_rate = 1, -#' early_stopping_rounds = 10) +#' early_stopping_rounds = 5) #' saveRDS.lgb.Booster(model, "model.rds") #' new_model <- readRDS.lgb.Booster("model.rds") #' diff --git a/R-package/R/saveRDS.lgb.Booster.R b/R-package/R/saveRDS.lgb.Booster.R index 66e6ec43b4ef..9cb4c2ad96ff 100644 --- a/R-package/R/saveRDS.lgb.Booster.R +++ b/R-package/R/saveRDS.lgb.Booster.R @@ -25,11 +25,11 @@ #' model <- lgb.train( #' params #' , dtrain -#' , 100 +#' , 10 #' , valids #' , min_data = 1 #' , learning_rate = 1 -#' , early_stopping_rounds = 10 +#' , early_stopping_rounds = 5 #' ) #' saveRDS.lgb.Booster(model, "model.rds") #' @export diff --git a/R-package/README.md b/R-package/README.md index 1b9531d1533a..e3be126a0d53 100644 --- a/R-package/README.md +++ b/R-package/README.md @@ -116,12 +116,12 @@ You may also read [Microsoft/LightGBM#912](https://github.com/microsoft/LightGBM Examples -------- -Please visit [demo](demo): - -* [Basic walkthrough of wrappers](demo/basic_walkthrough.R) -* [Boosting from existing prediction](demo/boost_from_prediction.R) -* [Early Stopping](demo/early_stopping.R) -* [Cross Validation](demo/cross_validation.R) -* [Multiclass Training/Prediction](demo/multiclass.R) -* [Leaf (in)Stability](demo/leaf_stability.R) -* [Weight-Parameter Adjustment Relationship](demo/weight_param.R) +Please visit [demo](https://github.com/microsoft/LightGBM/tree/master/R-package/demo): + +* [Basic walkthrough of wrappers](https://github.com/microsoft/LightGBM/blob/master/R-package/demo/basic_walkthrough.R) +* [Boosting from existing prediction](https://github.com/microsoft/LightGBM/blob/master/R-package/demo/boost_from_prediction.R) +* [Early Stopping](https://github.com/microsoft/LightGBM/blob/master/R-package/demo/early_stopping.R) +* [Cross Validation](https://github.com/microsoft/LightGBM/blob/master/R-package/demo/cross_validation.R) +* [Multiclass Training/Prediction](https://github.com/microsoft/LightGBM/blob/master/R-package/demo/multiclass.R) +* [Leaf (in)Stability](https://github.com/microsoft/LightGBM/blob/master/R-package/demo/leaf_stability.R) +* [Weight-Parameter Adjustment Relationship](https://github.com/microsoft/LightGBM/blob/master/R-package/demo/weight_param.R) diff --git a/R-package/_pkgdown.yml b/R-package/_pkgdown.yml new file mode 100644 index 000000000000..37318c01113f --- /dev/null +++ b/R-package/_pkgdown.yml @@ -0,0 +1,88 @@ +template: + params: + bootswatch: cerulean + +site: + root: '' + title: LightGBM, Light Gradient Boosting Machine + +authors: + Guolin Ke: + href: https://github.com/guolinke + html: Guolin Ke + Damien Soukhavong: + href: https://github.com/Laurae2 + html: Damien Soukhavong + Yachen Yan: + href: https://github.com/yanyachen + html: Yachen Yan + James Lamb: + href: https://github.com/jameslamb + html: James Lamb + +navbar: + title: LightGBM + type: default + left: + - icon: fa-reply fa-lg + href: ../ + - icon: fa-home fa-lg + href: index.html + - text: Reference + href: reference/index.html + right: + - icon: fa-github fa-lg + href: https://github.com/microsoft/LightGBM/tree/master/R-package + +reference: + - title: Datasets + desc: Datasets included with the R package + contents: + - '`agaricus.train`' + - '`agaricus.test`' + - '`bank`' + - title: Data Input / Output + desc: Data I/O required for LightGBM + contents: + - '`dim.lgb.Dataset`' + - '`dimnames.lgb.Dataset`' + - '`getinfo`' + - '`setinfo`' + - '`slice`' + - '`lgb.Dataset`' + - '`lgb.Dataset.construct`' + - '`lgb.Dataset.create.valid`' + - '`lgb.Dataset.save`' + - '`lgb.Dataset.set.categorical`' + - '`lgb.Dataset.set.reference`' + - title: Machine Learning + desc: Train models with LightGBM + contents: + - '`lgb.prepare`' + - '`lgb.prepare2`' + - '`lgb.prepare_rules`' + - '`lgb.prepare_rules2`' + - '`lgb.train`' + - '`lgb.cv`' + - title: Saving / Loading Models + desc: Save and load LightGBM models + contents: + - '`lgb.dump`' + - '`lgb.save`' + - '`lgb.load`' + - '`lgb.model.dt.tree`' + - '`predict.lgb.Booster`' + - '`saveRDS.lgb.Booster`' + - '`readRDS.lgb.Booster`' + - title: Predictive Analysis + desc: Analyze your predictions + contents: + - '`lgb.get.eval.result`' + - '`lgb.importance`' + - '`lgb.interprete`' + - '`lgb.plot.importance`' + - '`lgb.plot.interpretation`' + - title: Miscellaneous + desc: Ungroupable functions to troubleshoot LightGBM + contents: + - '`lgb.unloader`' diff --git a/build_r.R b/build_r.R index 3b5122fc352f..8a9924c75df7 100644 --- a/build_r.R +++ b/build_r.R @@ -56,7 +56,7 @@ result <- file.copy(from = "CMakeLists.txt", overwrite = TRUE) .handle_result(result) -# Build the package +# Build the package (do not touch this line!) # NOTE: --keep-empty-dirs is necessary to keep the deep paths expected # by CMake while also meeting the CRAN req to create object files # on demand diff --git a/build_r_site.R b/build_r_site.R new file mode 100644 index 000000000000..fd2f95bc7c7d --- /dev/null +++ b/build_r_site.R @@ -0,0 +1,17 @@ +library(pkgdown) + +setwd("lightgbm_r") +if (!dir.exists("docs")) { + dir.create("docs") +} + +devtools::document() +clean_site() +init_site() +build_home(preview = FALSE, quiet = FALSE) +build_reference(lazy = FALSE, + document = FALSE, + examples = TRUE, + run_dont_run = FALSE, + seed = 42, + preview = FALSE) diff --git a/docs/.linkcheckerrc b/docs/.linkcheckerrc index 03204e2fb8cc..8b9bcdde9127 100644 --- a/docs/.linkcheckerrc +++ b/docs/.linkcheckerrc @@ -8,6 +8,7 @@ ignore= public.tableau.com https://www.open-mpi.org https://readthedocs.org + .*R/reference$ ignorewarnings=http-robots-denied,https-certificate-error [output] diff --git a/docs/R-API.rst b/docs/R-API.rst new file mode 100644 index 000000000000..41cf6678a51f --- /dev/null +++ b/docs/R-API.rst @@ -0,0 +1,4 @@ +R API +===== + +Refer to `R reference <./R/reference>`__. diff --git a/docs/README.rst b/docs/README.rst index f4b11bca9f8f..b4a3e3f9e464 100644 --- a/docs/README.rst +++ b/docs/README.rst @@ -20,6 +20,9 @@ You can build the documentation locally. Just install Doxygen and run in ``docs` pip install -r requirements.txt make html +Unfortunately, documentation for R code is built only on our site, and commands above will not build it for you locally. +Consider using common R utilities for documentation generation, if you need it. + If you faced any problems with Doxygen installation or you simply do not need documentation for C code, it is possible to build the documentation without it: .. code:: sh diff --git a/docs/conf.py b/docs/conf.py index 567ecf39b70a..338972b3c87c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -22,6 +22,7 @@ import sys import sphinx +from distutils.dir_util import copy_tree from docutils.parsers.rst import Directive from sphinx.errors import VersionRequirementError from subprocess import PIPE, Popen @@ -56,6 +57,7 @@ def run(self): os.environ['LIGHTGBM_BUILD_DOC'] = '1' C_API = os.environ.get('C_API', '').lower().strip() != 'no' +RTD = bool(os.environ.get('READTHEDOCS', '')) # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.3' # Due to sphinx.ext.napoleon @@ -212,6 +214,52 @@ def generate_doxygen_xml(app): raise Exception("An error has occurred while executing Doxygen\n" + str(e)) +def generate_r_docs(app): + """Generate documentation for R-package. + + Parameters + ---------- + app : object + The application object representing the Sphinx process. + """ + commands = """ + echo 'options(repos = "https://cran.rstudio.com")' > $HOME/.Rprofile + /home/docs/.conda/bin/conda create -q -y -n r_env \ + r-base=3.5.1=h1e0a451_2 \ + r-devtools=1.13.6=r351h6115d3f_0 \ + r-data.table=1.11.4=r351h96ca727_0 \ + r-jsonlite=1.5=r351h96ca727_0 \ + r-magrittr=1.5=r351h6115d3f_4 \ + r-matrix=1.2_14=r351h96ca727_0 \ + r-testthat=2.0.0=r351h29659fb_0 \ + cmake=3.14.0=h52cb24c_0 + /home/docs/.conda/bin/conda install -q -y -n r_env -c conda-forge \ + r-pkgdown=1.3.0=r351h6115d3f_1000 + source /home/docs/.conda/bin/activate r_env + export TAR=/bin/tar + cd {0} + sed -i'.bak' '/# Build the package (do not touch this line!)/q' build_r.R + Rscript build_r.R + Rscript build_r_site.R + """.format(os.path.join(CURR_PATH, os.path.pardir)) + try: + # Warning! The following code can cause buffer overflows on RTD. + # Consider suppressing output completely if RTD project silently fails. + # Refer to https://github.com/svenevs/exhale + # /blob/fe7644829057af622e467bb529db6c03a830da99/exhale/deploy.py#L99-L111 + process = Popen(['/bin/bash'], + stdin=PIPE, stdout=PIPE, stderr=PIPE, + universal_newlines=True) + stdout, stderr = process.communicate(commands) + output = '\n'.join([i for i in (stdout, stderr) if i is not None]) + if process.returncode != 0: + raise RuntimeError(output) + else: + print(output) + except BaseException as e: + raise Exception("An error has occurred while generating documentation for R-package\n" + str(e)) + + def setup(app): """Add new elements at Sphinx initialization time. @@ -220,9 +268,18 @@ def setup(app): app : object The application object representing the Sphinx process. """ + first_run = not os.path.exists(os.path.join(CURR_PATH, '_FIRST_RUN.flag')) + if first_run and RTD: + open(os.path.join(CURR_PATH, '_FIRST_RUN.flag'), 'w').close() if C_API: app.connect("builder-inited", generate_doxygen_xml) else: app.add_directive('doxygenfile', IgnoredDirective) + if RTD: # build R docs only on Read the Docs site + if first_run: + app.connect("builder-inited", generate_r_docs) + app.connect("build-finished", + lambda app, exception: copy_tree(os.path.join(CURR_PATH, os.path.pardir, "lightgbm_r", "docs"), + os.path.join(app.outdir, "R"), verbose=0)) add_js_file = getattr(app, 'add_js_file', False) or app.add_javascript add_js_file("js/script.js") diff --git a/docs/index.rst b/docs/index.rst index 5ae58fc2b9f1..3a2574039bbd 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -29,6 +29,7 @@ For more details, please refer to `Features <./Features.rst>`__. Parameters Tuning C API Python API + R API Parallel Learning Guide GPU Tutorial Advanced Topics