Skip to content

Commit

Permalink
Future warnings (#923)
Browse files Browse the repository at this point in the history
Address sklearn future warnings
Address yaml.load warning
  • Loading branch information
milesgranger authored Feb 6, 2020
1 parent 0a49881 commit 1fd4e0b
Show file tree
Hide file tree
Showing 7 changed files with 22 additions and 22 deletions.
8 changes: 4 additions & 4 deletions examples/Gordo-Workflow-High-Level.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,9 @@
" base_estimator:\n",
" sklearn.pipeline.Pipeline:\n",
" steps:\n",
" - sklearn.decomposition.pca.PCA\n",
" - sklearn.decomposition.PCA\n",
" - sklearn.multioutput.MultiOutputRegressor:\n",
" estimator: sklearn.linear_model.base.LinearRegression\n",
" estimator: sklearn.linear_model.LinearRegression\n",
" name: crazy-sweet-name\n",
"\"\"\""
]
Expand Down Expand Up @@ -453,9 +453,9 @@
" base_estimator:\n",
" sklearn.pipeline.Pipeline:\n",
" steps:\n",
" - sklearn.decomposition.pca.PCA\n",
" - sklearn.decomposition.PCA\n",
" - sklearn.multioutput.MultiOutputRegressor:\n",
" estimator: sklearn.linear_model.base.LinearRegression\n",
" estimator: sklearn.linear_model.LinearRegression\n",
" model-creation-date: '2019-10-21 12:56:18.169609+02:00'\n",
" model-offset: 0\n",
" model-training-duration-sec: 0.005204916000366211\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/Gordo-Workflow-Semi-Low-Level.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@
" \"\"\" \n",
" sklearn.pipeline.Pipeline:\n",
" steps:\n",
" - sklearn.preprocessing.data.MinMaxScaler\n",
" - sklearn.preprocessing.MinMaxScaler\n",
" - gordo.machine.model.models.KerasAutoEncoder:\n",
" kind: feedforward_hourglass\n",
" \"\"\"\n",
Expand Down
8 changes: 4 additions & 4 deletions examples/Pipelines-with-Gordo.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -69,23 +69,23 @@
"sklearn.pipeline.Pipeline:\n",
" memory: null\n",
" steps:\n",
" - sklearn.decomposition.pca.PCA: {copy: true, iterated_power: auto, n_components: 10,\n",
" - sklearn.decomposition.PCA: {copy: true, iterated_power: auto, n_components: 10,\n",
" random_state: null, svd_solver: auto, tol: 0.0, whiten: false}\n",
" - sklearn.pipeline.FeatureUnion:\n",
" n_jobs: null\n",
" transformer_list:\n",
" - sklearn.pipeline.Pipeline:\n",
" memory: null\n",
" steps:\n",
" - sklearn.preprocessing.data.MinMaxScaler:\n",
" - sklearn.preprocessing.MinMaxScaler:\n",
" copy: true\n",
" feature_range: !!python/tuple [0, 1]\n",
" - sklearn.decomposition.pca.PCA: {copy: true, iterated_power: auto, n_components: 2,\n",
" - sklearn.decomposition.PCA: {copy: true, iterated_power: auto, n_components: 2,\n",
" random_state: null, svd_solver: auto, tol: 0.0, whiten: false}\n",
" - sklearn.pipeline.Pipeline:\n",
" memory: null\n",
" steps:\n",
" - sklearn.preprocessing.data.QuantileTransformer: {copy: true, ignore_implicit_zeros: false,\n",
" - sklearn.preprocessing.QuantileTransformer: {copy: true, ignore_implicit_zeros: false,\n",
" n_quantiles: 1000, output_distribution: uniform, random_state: null,\n",
" subsample: 100000}\n",
" transformer_weights: null\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,6 @@ spec:
base_estimator:
sklearn.pipeline.Pipeline:
steps:
- sklearn.preprocessing.data.MinMaxScaler
- sklearn.preprocessing.MinMaxScaler
- gordo.machine.model.models.KerasAutoEncoder:
kind: feedforward_hourglass
4 changes: 2 additions & 2 deletions gordo/builder/build_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def __init__(self, machine: Machine):
>>> from gordo.machine import Machine
>>> machine = Machine(
... name="special-model-name",
... model={"sklearn.decomposition.pca.PCA": {"svd_solver": "auto"}},
... model={"sklearn.decomposition.PCA": {"svd_solver": "auto"}},
... dataset={
... "type": "RandomDataset",
... "train_start_date": "2017-12-25 06:00:00Z",
Expand Down Expand Up @@ -535,7 +535,7 @@ def calculate_cache_key(machine: Machine) -> str:
>>> from gordo.machine.dataset.sensor_tag import SensorTag
>>> machine = Machine(
... name="special-model-name",
... model={"sklearn.decomposition.pca.PCA": {"svd_solver": "auto"}},
... model={"sklearn.decomposition.PCA": {"svd_solver": "auto"}},
... dataset={
... "type": "RandomDataset",
... "train_start_date": "2017-12-25 06:00:00Z",
Expand Down
4 changes: 2 additions & 2 deletions gordo/builder/local_build.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,9 @@ def local_build(
... base_estimator:
... sklearn.pipeline.Pipeline:
... steps:
... - sklearn.decomposition.pca.PCA
... - sklearn.decomposition.PCA
... - sklearn.multioutput.MultiOutputRegressor:
... estimator: sklearn.linear_model.base.LinearRegression
... estimator: sklearn.linear_model.LinearRegression
... name: crazy-sweet-name
... '''
>>> models_n_metadata = local_build(config)
Expand Down
16 changes: 8 additions & 8 deletions gordo/serializer/from_definition.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,19 +26,19 @@ def from_definition(
>>> raw_config = '''
... sklearn.pipeline.Pipeline:
... steps:
... - sklearn.decomposition.pca.PCA:
... - sklearn.decomposition.PCA:
... n_components: 3
... - sklearn.pipeline.FeatureUnion:
... - sklearn.decomposition.pca.PCA:
... - sklearn.decomposition.PCA:
... n_components: 3
... - sklearn.pipeline.Pipeline:
... - sklearn.preprocessing.data.MinMaxScaler
... - sklearn.decomposition.truncated_svd.TruncatedSVD:
... - sklearn.preprocessing.MinMaxScaler
... - sklearn.decomposition.TruncatedSVD:
... n_components: 2
... - sklearn.ensemble.forest.RandomForestClassifier:
... - sklearn.ensemble.RandomForestClassifier:
... max_depth: 3
... '''
>>> config = yaml.load(raw_config)
>>> config = yaml.safe_load(raw_config)
>>> scikit_learn_pipeline = serializer.from_definition(config)
Expand Down Expand Up @@ -204,7 +204,7 @@ def _load_param_classes(params: dict):
# Load an actual model, without any kwargs
>>> from sklearn.ensemble import RandomForestRegressor
>>> params = {"base_estimator": "sklearn.ensemble.forest.RandomForestRegressor"}
>>> params = {"base_estimator": "sklearn.ensemble.RandomForestRegressor"}
>>> print(_load_param_classes(params))
{'base_estimator': RandomForestRegressor(bootstrap=True, ccp_alpha=0.0, criterion='mse',
max_depth=None, max_features='auto', max_leaf_nodes=None,
Expand All @@ -215,7 +215,7 @@ def _load_param_classes(params: dict):
random_state=None, verbose=0, warm_start=False)}
# Load an actual model, with kwargs
>>> params = {"base_estimator": {"sklearn.ensemble.forest.RandomForestRegressor": {"n_estimators": 20}}}
>>> params = {"base_estimator": {"sklearn.ensemble.RandomForestRegressor": {"n_estimators": 20}}}
>>> print(_load_param_classes(params))
{'base_estimator': RandomForestRegressor(bootstrap=True, ccp_alpha=0.0, criterion='mse',
max_depth=None, max_features='auto', max_leaf_nodes=None,
Expand Down

0 comments on commit 1fd4e0b

Please sign in to comment.