diff --git a/python-package/lightgbm/sklearn.py b/python-package/lightgbm/sklearn.py index ff65bf657c5c..796bde3717aa 100644 --- a/python-package/lightgbm/sklearn.py +++ b/python-package/lightgbm/sklearn.py @@ -840,7 +840,6 @@ def _process_params(self, stage: str) -> Dict[str, Any]: params["random_state"] = params["random_state"].randint(np.iinfo(np.int32).max) elif isinstance(params["random_state"], np.random.Generator): params["random_state"] = int(params["random_state"].integers(np.iinfo(np.int32).max)) - if self._n_classes > 2: for alias in _ConfigAliases.get("num_class"): params.pop(alias, None) diff --git a/tests/python_package_test/test_sklearn.py b/tests/python_package_test/test_sklearn.py index cfbb0e3d9f68..03ac406d4fec 100644 --- a/tests/python_package_test/test_sklearn.py +++ b/tests/python_package_test/test_sklearn.py @@ -356,7 +356,7 @@ def test_early_stopping_is_deactivated_by_default_lambdarank(): q_train = np.loadtxt(str(rank_example_dir / "rank.train.query")) n_estimators = 5 gbm = lgb.LGBMRanker(n_estimators=n_estimators, random_state=42, verbose=-1) - gbm.fit(X_train, y_train, group=q_train) # Assuming 10 samples in one group + gbm.fit(X_train, y_train, group=q_train) # Check that early stopping did not kick in assert gbm._Booster.params.get("early_stopping_round") is None @@ -377,7 +377,6 @@ def test_auto_early_stopping_lambdarank(): y_train, group=q_train, eval_at=[1, 3], - callbacks=[lgb.reset_parameter(learning_rate=lambda x: max(0.01, 0.1 - 0.01 * x))], ) assert gbm._Booster.params["early_stopping_round"] == 10 assert gbm._Booster.num_trees() < n_estimators