Skip to content

Commit

Permalink
remove evals_result argument from train() function
Browse files Browse the repository at this point in the history
  • Loading branch information
StrikerRUS committed Dec 12, 2021
1 parent 9f13a9c commit a05bc4c
Show file tree
Hide file tree
Showing 6 changed files with 349 additions and 150 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -148,8 +148,10 @@
" valid_sets=[lgb_train, lgb_test],\n",
" feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])],\n",
" categorical_feature=[21],\n",
" evals_result=evals_result,\n",
" callbacks=[lgb.log_evaluation(10)])"
" callbacks=[\n",
" lgb.log_evaluation(10),\n",
" lgb.record_evaluation(evals_result)\n",
" ])"
]
},
{
Expand Down
20 changes: 12 additions & 8 deletions examples/python-guide/plot_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,18 @@

print('Starting training...')
# train
gbm = lgb.train(params,
lgb_train,
num_boost_round=100,
valid_sets=[lgb_train, lgb_test],
feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])],
categorical_feature=[21],
evals_result=evals_result,
callbacks=[lgb.log_evaluation(10)])
gbm = lgb.train(
params,
lgb_train,
num_boost_round=100,
valid_sets=[lgb_train, lgb_test],
feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])],
categorical_feature=[21],
callbacks=[
lgb.log_evaluation(10),
lgb.record_evaluation(evals_result)
]
)

print('Plotting metrics recorded during training...')
ax = lgb.plot_metric(evals_result, metric='l1')
Expand Down
10 changes: 8 additions & 2 deletions python-package/lightgbm/callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,18 +128,24 @@ def record_evaluation(eval_result: Dict[str, Dict[str, List[Any]]]) -> Callable:
"""
if not isinstance(eval_result, dict):
raise TypeError('eval_result should be a dictionary')
eval_result.clear()
inited = False

def _init(env: CallbackEnv) -> None:
nonlocal inited
eval_result.clear()
for data_name, eval_name, _, _ in env.evaluation_result_list:
eval_result.setdefault(data_name, collections.OrderedDict())
eval_result[data_name].setdefault(eval_name, [])
inited = True

def _callback(env: CallbackEnv) -> None:
if not eval_result:
nonlocal inited
if not inited:
_init(env)
for data_name, eval_name, result, _ in env.evaluation_result_list:
eval_result[data_name][eval_name].append(result)
if env.iteration == env.end_iteration - 1:
inited = False
_callback.order = 20 # type: ignore
return _callback

Expand Down
19 changes: 0 additions & 19 deletions python-package/lightgbm/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ def train(
feature_name: Union[List[str], str] = 'auto',
categorical_feature: Union[List[str], List[int], str] = 'auto',
early_stopping_rounds: Optional[int] = None,
evals_result: Optional[Dict[str, Any]] = None,
keep_training_booster: bool = False,
callbacks: Optional[List[Callable]] = None
) -> Booster:
Expand Down Expand Up @@ -119,19 +118,6 @@ def train(
To check only the first metric, set the ``first_metric_only`` parameter to ``True`` in ``params``.
The index of iteration that has the best performance will be saved in the ``best_iteration`` field
if early stopping logic is enabled by setting ``early_stopping_rounds``.
evals_result : dict or None, optional (default=None)
Dictionary used to store all evaluation results of all the items in ``valid_sets``.
This should be initialized outside of your call to ``train()`` and should be empty.
Any initial contents of the dictionary will be deleted.
.. rubric:: Example
With a ``valid_sets`` = [valid_set, train_set],
``valid_names`` = ['eval', 'train']
and a ``params`` = {'metric': 'logloss'}
returns {'train': {'logloss': ['0.48253', '0.35953', ...]},
'eval': {'logloss': ['0.480385', '0.357756', ...]}}.
keep_training_booster : bool, optional (default=False)
Whether the returned Booster will be used to keep training.
If False, the returned value will be converted into _InnerPredictor before returning.
Expand Down Expand Up @@ -221,11 +207,6 @@ def train(
if early_stopping_rounds is not None and early_stopping_rounds > 0:
callbacks_set.add(callback.early_stopping(early_stopping_rounds, first_metric_only))

if evals_result is not None:
_log_warning("'evals_result' argument is deprecated and will be removed in a future release of LightGBM. "
"Pass 'record_evaluation()' callback via 'callbacks' argument instead.")
callbacks_set.add(callback.record_evaluation(evals_result))

callbacks_before_iter_set = {cb for cb in callbacks_set if getattr(cb, 'before_iteration', False)}
callbacks_after_iter_set = callbacks_set - callbacks_before_iter_set
callbacks_before_iter = sorted(callbacks_before_iter_set, key=attrgetter('order'))
Expand Down
Loading

0 comments on commit a05bc4c

Please sign in to comment.