Skip to content

Commit

Permalink
[python][sklearn] remove verbose argument from fit() method (#4832)
Browse files Browse the repository at this point in the history
  • Loading branch information
StrikerRUS authored Nov 30, 2021
1 parent 67b4205 commit 4072e9f
Show file tree
Hide file tree
Showing 5 changed files with 39 additions and 68 deletions.
6 changes: 3 additions & 3 deletions python-package/lightgbm/dask.py
Original file line number Diff line number Diff line change
Expand Up @@ -1205,7 +1205,7 @@ def fit(
+ _base_doc[_base_doc.find('eval_metric :'):])

_base_doc = (_base_doc[:_base_doc.find('early_stopping_rounds :')]
+ _base_doc[_base_doc.find('verbose :'):])
+ _base_doc[_base_doc.find('feature_name :'):])

# DaskLGBMClassifier support for callbacks and init_model is not tested
fit.__doc__ = f"""{_base_doc[:_base_doc.find('callbacks :')]}**kwargs
Expand Down Expand Up @@ -1388,7 +1388,7 @@ def fit(
+ _base_doc[_base_doc.find('eval_metric :'):])

_base_doc = (_base_doc[:_base_doc.find('early_stopping_rounds :')]
+ _base_doc[_base_doc.find('verbose :'):])
+ _base_doc[_base_doc.find('feature_name :'):])

# DaskLGBMRegressor support for callbacks and init_model is not tested
fit.__doc__ = f"""{_base_doc[:_base_doc.find('callbacks :')]}**kwargs
Expand Down Expand Up @@ -1554,7 +1554,7 @@ def fit(
_base_doc = (_base_doc[:_base_doc.find('early_stopping_rounds :')]
+ "eval_at : iterable of int, optional (default=(1, 2, 3, 4, 5))\n"
+ f"{' ':8}The evaluation positions of the specified metric.\n"
+ f"{' ':4}{_base_doc[_base_doc.find('verbose :'):]}")
+ f"{' ':4}{_base_doc[_base_doc.find('feature_name :'):]}")

# DaskLGBMRanker support for callbacks and init_model is not tested
fit.__doc__ = f"""{_base_doc[:_base_doc.find('callbacks :')]}**kwargs
Expand Down
35 changes: 7 additions & 28 deletions python-package/lightgbm/sklearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,17 +258,6 @@ def __call__(self, preds, dataset):
If there's more than one, will check all of them. But the training data is ignored anyway.
To check only the first metric, set the ``first_metric_only`` parameter to ``True``
in additional parameters ``**kwargs`` of the model constructor.
verbose : bool or int, optional (default=True)
Requires at least one evaluation data.
If True, the eval metric on the eval set is printed at each boosting stage.
If int, the eval metric on the eval set is printed at every ``verbose`` boosting stage.
The last boosting stage or the boosting stage found by using ``early_stopping_rounds`` is also printed.
.. rubric:: Example
With ``verbose`` = 4 and at least one item in ``eval_set``,
an evaluation metric is printed every 4 (instead of 1) boosting stages.
feature_name : list of str, or 'auto', optional (default='auto')
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
Expand Down Expand Up @@ -597,7 +586,7 @@ def fit(self, X, y,
sample_weight=None, init_score=None, group=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_class_weight=None, eval_init_score=None, eval_group=None,
eval_metric=None, early_stopping_rounds=None, verbose='warn',
eval_metric=None, early_stopping_rounds=None,
feature_name='auto', categorical_feature='auto',
callbacks=None, init_model=None):
"""Docstring is set after definition, using a template."""
Expand Down Expand Up @@ -752,16 +741,6 @@ def _get_meta_data(collection, name, i):
else:
callbacks = copy.copy(callbacks) # don't use deepcopy here to allow non-serializable objects

if verbose != 'warn':
_log_warning("'verbose' argument is deprecated and will be removed in a future release of LightGBM. "
"Pass 'log_evaluation()' callback via 'callbacks' argument instead.")
else:
if callbacks: # assume user has already specified log_evaluation callback
verbose = False
else:
verbose = True
callbacks.append(log_evaluation(int(verbose)))

evals_result = {}
callbacks.append(record_evaluation(evals_result))

Expand Down Expand Up @@ -931,13 +910,13 @@ def fit(self, X, y,
sample_weight=None, init_score=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_init_score=None, eval_metric=None, early_stopping_rounds=None,
verbose='warn', feature_name='auto', categorical_feature='auto',
feature_name='auto', categorical_feature='auto',
callbacks=None, init_model=None):
"""Docstring is inherited from the LGBMModel."""
super().fit(X, y, sample_weight=sample_weight, init_score=init_score,
eval_set=eval_set, eval_names=eval_names, eval_sample_weight=eval_sample_weight,
eval_init_score=eval_init_score, eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds, verbose=verbose, feature_name=feature_name,
early_stopping_rounds=early_stopping_rounds, feature_name=feature_name,
categorical_feature=categorical_feature, callbacks=callbacks, init_model=init_model)
return self

Expand All @@ -957,7 +936,7 @@ def fit(self, X, y,
sample_weight=None, init_score=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_class_weight=None, eval_init_score=None, eval_metric=None,
early_stopping_rounds=None, verbose='warn',
early_stopping_rounds=None,
feature_name='auto', categorical_feature='auto',
callbacks=None, init_model=None):
"""Docstring is inherited from the LGBMModel."""
Expand Down Expand Up @@ -1004,7 +983,7 @@ def fit(self, X, y,
eval_names=eval_names, eval_sample_weight=eval_sample_weight,
eval_class_weight=eval_class_weight, eval_init_score=eval_init_score,
eval_metric=eval_metric, early_stopping_rounds=early_stopping_rounds,
verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature,
feature_name=feature_name, categorical_feature=categorical_feature,
callbacks=callbacks, init_model=init_model)
return self

Expand Down Expand Up @@ -1079,7 +1058,7 @@ def fit(self, X, y,
sample_weight=None, init_score=None, group=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_init_score=None, eval_group=None, eval_metric=None,
eval_at=(1, 2, 3, 4, 5), early_stopping_rounds=None, verbose='warn',
eval_at=(1, 2, 3, 4, 5), early_stopping_rounds=None,
feature_name='auto', categorical_feature='auto',
callbacks=None, init_model=None):
"""Docstring is inherited from the LGBMModel."""
Expand All @@ -1103,7 +1082,7 @@ def fit(self, X, y,
super().fit(X, y, sample_weight=sample_weight, init_score=init_score, group=group,
eval_set=eval_set, eval_names=eval_names, eval_sample_weight=eval_sample_weight,
eval_init_score=eval_init_score, eval_group=eval_group, eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds, verbose=verbose, feature_name=feature_name,
early_stopping_rounds=early_stopping_rounds, feature_name=feature_name,
categorical_feature=categorical_feature, callbacks=callbacks, init_model=init_model)
return self

Expand Down
3 changes: 1 addition & 2 deletions tests/python_package_test/test_dask.py
Original file line number Diff line number Diff line change
Expand Up @@ -900,8 +900,7 @@ def test_eval_set_no_early_stopping(task, output, eval_sizes, eval_names_prefix,
'eval_names': eval_names,
'eval_sample_weight': eval_sample_weight,
'eval_init_score': eval_init_score,
'eval_metric': eval_metrics,
'verbose': True
'eval_metric': eval_metrics
}
if task == 'ranking':
fit_params.update(
Expand Down
6 changes: 3 additions & 3 deletions tests/python_package_test/test_plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def test_plot_split_value_histogram(params, breast_cancer_split, train_data):
def test_plot_tree(breast_cancer_split):
X_train, _, y_train, _ = breast_cancer_split
gbm = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1)
gbm.fit(X_train, y_train, verbose=False)
gbm.fit(X_train, y_train)

with pytest.raises(IndexError):
lgb.plot_tree(gbm, tree_index=83)
Expand All @@ -161,7 +161,7 @@ def test_create_tree_digraph(breast_cancer_split):

constraints = [-1, 1] * int(X_train.shape[1] / 2)
gbm = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1, monotone_constraints=constraints)
gbm.fit(X_train, y_train, verbose=False)
gbm.fit(X_train, y_train)

with pytest.raises(IndexError):
lgb.create_tree_digraph(gbm, tree_index=83)
Expand Down Expand Up @@ -265,7 +265,7 @@ def test_plot_metrics(params, breast_cancer_split, train_data):
lgb.plot_metric(evals_result1)

gbm2 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1)
gbm2.fit(X_train, y_train, eval_set=[(X_test, y_test)], verbose=False)
gbm2.fit(X_train, y_train, eval_set=[(X_test, y_test)])
ax4 = lgb.plot_metric(gbm2, title=None, xlabel=None, ylabel=None)
assert isinstance(ax4, matplotlib.axes.Axes)
assert ax4.get_title() == ''
Expand Down
Loading

0 comments on commit 4072e9f

Please sign in to comment.