Skip to content

Commit

Permalink
[python] remove verbose_eval argument of train() and cv() funct…
Browse files Browse the repository at this point in the history
…ions (#4878)

* remove `verbose_eval` argument

* update example Notebook
  • Loading branch information
StrikerRUS authored Dec 12, 2021
1 parent 8066261 commit 9f13a9c
Show file tree
Hide file tree
Showing 5 changed files with 25 additions and 94 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@
" feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])],\n",
" categorical_feature=[21],\n",
" evals_result=evals_result,\n",
" verbose_eval=10)"
" callbacks=[lgb.log_evaluation(10)])"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion examples/python-guide/plot_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])],
categorical_feature=[21],
evals_result=evals_result,
verbose_eval=10)
callbacks=[lgb.log_evaluation(10)])

print('Plotting metrics recorded during training...')
ax = lgb.plot_metric(evals_result, metric='l1')
Expand Down
44 changes: 2 additions & 42 deletions python-package/lightgbm/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ def train(
categorical_feature: Union[List[str], List[int], str] = 'auto',
early_stopping_rounds: Optional[int] = None,
evals_result: Optional[Dict[str, Any]] = None,
verbose_eval: Union[bool, int, str] = 'warn',
keep_training_booster: bool = False,
callbacks: Optional[List[Callable]] = None
) -> Booster:
Expand Down Expand Up @@ -133,17 +132,6 @@ def train(
returns {'train': {'logloss': ['0.48253', '0.35953', ...]},
'eval': {'logloss': ['0.480385', '0.357756', ...]}}.
verbose_eval : bool or int, optional (default=True)
Requires at least one validation data.
If True, the eval metric on the valid set is printed at each boosting stage.
If int, the eval metric on the valid set is printed at every ``verbose_eval`` boosting stage.
The last boosting stage or the boosting stage found by using ``early_stopping_rounds`` is also printed.
.. rubric:: Example
With ``verbose_eval`` = 4 and at least one item in ``valid_sets``,
an evaluation metric is printed every 4 (instead of 1) boosting stages.
keep_training_booster : bool, optional (default=False)
Whether the returned Booster will be used to keep training.
If False, the returned value will be converted into _InnerPredictor before returning.
Expand Down Expand Up @@ -230,21 +218,8 @@ def train(
callbacks_set = set(callbacks)

# Most of legacy advanced options becomes callbacks
if verbose_eval != "warn":
_log_warning("'verbose_eval' argument is deprecated and will be removed in a future release of LightGBM. "
"Pass 'log_evaluation()' callback via 'callbacks' argument instead.")
else:
if callbacks_set: # assume user has already specified log_evaluation callback
verbose_eval = False
else:
verbose_eval = True
if verbose_eval is True:
callbacks_set.add(callback.log_evaluation())
elif isinstance(verbose_eval, int):
callbacks_set.add(callback.log_evaluation(verbose_eval))

if early_stopping_rounds is not None and early_stopping_rounds > 0:
callbacks_set.add(callback.early_stopping(early_stopping_rounds, first_metric_only, verbose=bool(verbose_eval)))
callbacks_set.add(callback.early_stopping(early_stopping_rounds, first_metric_only))

if evals_result is not None:
_log_warning("'evals_result' argument is deprecated and will be removed in a future release of LightGBM. "
Expand Down Expand Up @@ -426,8 +401,7 @@ def cv(params, train_set, num_boost_round=100,
metrics=None, fobj=None, feval=None, init_model=None,
feature_name='auto', categorical_feature='auto',
early_stopping_rounds=None, fpreproc=None,
verbose_eval=None, show_stdv=True, seed=0,
callbacks=None, eval_train_metric=False,
seed=0, callbacks=None, eval_train_metric=False,
return_cvbooster=False):
"""Perform the cross-validation with given parameters.
Expand Down Expand Up @@ -522,13 +496,6 @@ def cv(params, train_set, num_boost_round=100,
fpreproc : callable or None, optional (default=None)
Preprocessing function that takes (dtrain, dtest, params)
and returns transformed versions of those.
verbose_eval : bool, int, or None, optional (default=None)
Whether to display the progress.
If True, progress will be displayed at every boosting stage.
If int, progress will be displayed at every given ``verbose_eval`` boosting stage.
show_stdv : bool, optional (default=True)
Whether to display the standard deviation in progress.
Results are not affected by this parameter, and always contain std.
seed : int, optional (default=0)
Seed used to generate the folds (passed to numpy.random.seed).
callbacks : list of callable, or None, optional (default=None)
Expand Down Expand Up @@ -606,13 +573,6 @@ def cv(params, train_set, num_boost_round=100,
callbacks = set(callbacks)
if early_stopping_rounds is not None and early_stopping_rounds > 0:
callbacks.add(callback.early_stopping(early_stopping_rounds, first_metric_only, verbose=False))
if verbose_eval is not None:
_log_warning("'verbose_eval' argument is deprecated and will be removed in a future release of LightGBM. "
"Pass 'log_evaluation()' callback via 'callbacks' argument instead.")
if verbose_eval is True:
callbacks.add(callback.log_evaluation(show_stdv=show_stdv))
elif isinstance(verbose_eval, int):
callbacks.add(callback.log_evaluation(verbose_eval, show_stdv=show_stdv))

callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
callbacks_after_iter = callbacks - callbacks_before_iter
Expand Down
Loading

0 comments on commit 9f13a9c

Please sign in to comment.