Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[python] rename print_evaluation() into log_evaluation() #4604

Merged
merged 8 commits into from
Sep 15, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/Python-API.rst
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ Callbacks
:toctree: pythonapi/

early_stopping
print_evaluation
log_evaluation
record_evaluation
reset_parameter

Expand Down
4 changes: 2 additions & 2 deletions python-package/lightgbm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from pathlib import Path

from .basic import Booster, Dataset, Sequence, register_logger
from .callback import early_stopping, print_evaluation, record_evaluation, reset_parameter
from .callback import early_stopping, log_evaluation, print_evaluation, record_evaluation, reset_parameter
from .engine import CVBooster, cv, train

try:
Expand All @@ -32,5 +32,5 @@
'train', 'cv',
'LGBMModel', 'LGBMRegressor', 'LGBMClassifier', 'LGBMRanker',
'DaskLGBMRegressor', 'DaskLGBMClassifier', 'DaskLGBMRanker',
'print_evaluation', 'record_evaluation', 'reset_parameter', 'early_stopping',
'log_evaluation', 'print_evaluation', 'record_evaluation', 'reset_parameter', 'early_stopping',
'plot_importance', 'plot_split_value_histogram', 'plot_metric', 'plot_tree', 'create_tree_digraph']
10 changes: 10 additions & 0 deletions python-package/lightgbm/callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,16 @@ def _format_eval_result(value: list, show_stdv: bool = True) -> str:
def print_evaluation(period: int = 1, show_stdv: bool = True) -> Callable:
"""Create a callback that logs the evaluation results.

Deprecated, use ``log_evaluation()`` instead.
"""
_log_warning("'print_evaluation()' callback is deprecated and will be removed in a future release of LightGBM. "
"Use 'log_evaluation()' callback instead.")
return log_evaluation(period=period, show_stdv=show_stdv)


def log_evaluation(period: int = 1, show_stdv: bool = True) -> Callable:
"""Create a callback that logs the evaluation results.

By default, standard output resource is used.
Use ``register_logger()`` function to register a custom logger.

Expand Down
14 changes: 7 additions & 7 deletions python-package/lightgbm/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,16 +238,16 @@ def train(
# Most of legacy advanced options becomes callbacks
if verbose_eval != "warn":
_log_warning("'verbose_eval' argument is deprecated and will be removed in a future release of LightGBM. "
"Pass 'print_evaluation()' callback via 'callbacks' argument instead.")
"Pass 'log_evaluation()' callback via 'callbacks' argument instead.")
else:
if callbacks: # assume user has already specified print_evaluation callback
if callbacks: # assume user has already specified log_evaluation callback
verbose_eval = False
else:
verbose_eval = True
if verbose_eval is True:
callbacks.add(callback.print_evaluation())
callbacks.add(callback.log_evaluation())
elif isinstance(verbose_eval, int):
callbacks.add(callback.print_evaluation(verbose_eval))
callbacks.add(callback.log_evaluation(verbose_eval))

if early_stopping_rounds is not None and early_stopping_rounds > 0:
callbacks.add(callback.early_stopping(early_stopping_rounds, first_metric_only, verbose=bool(verbose_eval)))
Expand Down Expand Up @@ -619,11 +619,11 @@ def cv(params, train_set, num_boost_round=100,
callbacks.add(callback.early_stopping(early_stopping_rounds, first_metric_only, verbose=False))
if verbose_eval is not None:
_log_warning("'verbose_eval' argument is deprecated and will be removed in a future release of LightGBM. "
"Pass 'print_evaluation()' callback via 'callbacks' argument instead.")
"Pass 'log_evaluation()' callback via 'callbacks' argument instead.")
if verbose_eval is True:
callbacks.add(callback.print_evaluation(show_stdv=show_stdv))
callbacks.add(callback.log_evaluation(show_stdv=show_stdv))
elif isinstance(verbose_eval, int):
callbacks.add(callback.print_evaluation(verbose_eval, show_stdv=show_stdv))
callbacks.add(callback.log_evaluation(verbose_eval, show_stdv=show_stdv))

callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
callbacks_after_iter = callbacks - callbacks_before_iter
Expand Down
8 changes: 4 additions & 4 deletions python-package/lightgbm/sklearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import numpy as np

from .basic import Dataset, LightGBMError, _choose_param_value, _ConfigAliases, _log_warning
from .callback import print_evaluation, record_evaluation
from .callback import log_evaluation, record_evaluation
from .compat import (SKLEARN_INSTALLED, LGBMNotFittedError, _LGBMAssertAllFinite, _LGBMCheckArray,
_LGBMCheckClassificationTargets, _LGBMCheckSampleWeight, _LGBMCheckXY, _LGBMClassifierBase,
_LGBMComputeSampleWeight, _LGBMLabelEncoder, _LGBMModelBase, _LGBMRegressorBase, dt_DataTable,
Expand Down Expand Up @@ -731,13 +731,13 @@ def _get_meta_data(collection, name, i):

if verbose != 'warn':
_log_warning("'verbose' argument is deprecated and will be removed in a future release of LightGBM. "
"Pass 'print_evaluation()' callback via 'callbacks' argument instead.")
"Pass 'log_evaluation()' callback via 'callbacks' argument instead.")
else:
if callbacks: # assume user has already specified print_evaluation callback
if callbacks: # assume user has already specified log_evaluation callback
verbose = False
else:
verbose = True
callbacks.append(print_evaluation(int(verbose)))
callbacks.append(log_evaluation(int(verbose)))

evals_result = {}
callbacks.append(record_evaluation(evals_result))
Expand Down
2 changes: 1 addition & 1 deletion tests/python_package_test/test_utilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def dummy_metric(_, __):
eval_records = {}
callbacks = [
lgb.record_evaluation(eval_records),
lgb.print_evaluation(2),
lgb.log_evaluation(2),
lgb.early_stopping(4)
]
lgb.train({'objective': 'binary', 'metric': ['auc', 'binary_error']},
Expand Down