Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[python][scikit-learn] new stacking tests and make number of features a property #3173

Merged
merged 19 commits into from
Jun 27, 2020
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 10 additions & 7 deletions python-package/lightgbm/sklearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,11 +253,6 @@ def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1,

\*\*kwargs is not supported in sklearn, it may cause unexpected issues.

Attributes
----------
n_features_in_ : int
The number of features of fitted model.

Note
----
A custom objective function can be provided for the ``objective`` parameter.
Expand Down Expand Up @@ -313,6 +308,7 @@ def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1,
self._class_weight = None
self._class_map = None
self._n_features = None
self._n_features_in = None
self._classes = None
self._n_classes = None
self.set_params(**kwargs)
Expand Down Expand Up @@ -545,8 +541,8 @@ def fit(self, X, y,
sample_weight = np.multiply(sample_weight, class_sample_weight)

self._n_features = _X.shape[1]
# set public attribute for consistency
self.n_features_in_ = self._n_features
# copy for consistency
self._n_features_in = self._n_features

def _construct_dataset(X, y, sample_weight, init_score, group, params,
categorical_feature='auto'):
Expand Down Expand Up @@ -675,6 +671,13 @@ def n_features_(self):
raise LGBMNotFittedError('No n_features found. Need to call fit beforehand.')
return self._n_features

@property
def n_features_in_(self):
""":obj:`int`: The number of features of fitted model."""
if self._n_features_in is None:
raise LGBMNotFittedError('No n_features_in found. Need to call fit beforehand.')
return self._n_features_in

@property
def best_score_(self):
""":obj:`dict` or :obj:`None`: The best score of fitted model."""
Expand Down
44 changes: 44 additions & 0 deletions tests/python_package_test/test_sklearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,50 @@ def test_dart(self):
self.assertGreaterEqual(score, 0.8)
self.assertLessEqual(score, 1.)

# sklearn < 0.22 does not have a stacking classifier
@unittest.skipIf(sk_version < '0.22.0', 'scikit-learn version is less than 0.22')
def test_stacking_classifier(self):
from sklearn.ensemble import StackingClassifier

X, y = load_iris(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
classifiers = [('gbm1', lgb.LGBMClassifier()),
('gbm2', lgb.LGBMClassifier())]
StrikerRUS marked this conversation as resolved.
Show resolved Hide resolved
clf = StackingClassifier(estimators=classifiers,
final_estimator=lgb.LGBMClassifier())
StrikerRUS marked this conversation as resolved.
Show resolved Hide resolved
clf.fit(X_train, y_train).score(X_test, y_test)
StrikerRUS marked this conversation as resolved.
Show resolved Hide resolved
self.assertEqual(clf.n_features_in_, 4) # test number of input features
StrikerRUS marked this conversation as resolved.
Show resolved Hide resolved

# sklearn < 0.22 does not have a stacking regressor
@unittest.skipIf(sk_version < '0.22.0', 'scikit-learn version is less than 0.22')
def test_stacking_regressor(self):
from sklearn.ensemble import StackingRegressor

X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
regressors = [('gbm1', lgb.LGBMRegressor()),
('gbm2', lgb.LGBMRegressor())]
StrikerRUS marked this conversation as resolved.
Show resolved Hide resolved
reg = StackingRegressor(estimators=regressors,
final_estimator=lgb.LGBMRegressor())
StrikerRUS marked this conversation as resolved.
Show resolved Hide resolved
reg.fit(X_train, y_train).score(X_test, y_test)
StrikerRUS marked this conversation as resolved.
Show resolved Hide resolved
self.assertEqual(reg.n_features_in_, 13) # test number of input features
StrikerRUS marked this conversation as resolved.
Show resolved Hide resolved

# sklearn < 0.23 does not have n_features_in_
@unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23')
def test_n_features_in(self):
StrikerRUS marked this conversation as resolved.
Show resolved Hide resolved
from sklearn.utils.estimator_checks import check_n_features_in

X, y = load_iris(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf = lgb.LGBMClassifier()
name = clf.__class__.__name__
check_n_features_in(name, clf) # test classifier compatibility
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
reg = lgb.LGBMRegressor()
name = reg.__class__.__name__
check_n_features_in(name, reg) # test regressor compatibility

def test_grid_search(self):
X, y = load_iris(True)
y = np.array(list(map(str, y))) # utilize label encoder at it's max power
Expand Down