diff --git a/gordo_components/cli/client.py b/gordo_components/cli/client.py index bdcbac16c..4ae3dc103 100644 --- a/gordo_components/cli/client.py +++ b/gordo_components/cli/client.py @@ -2,6 +2,7 @@ import os import typing +import sys import json from datetime import datetime from pprint import pprint @@ -108,20 +109,20 @@ def predict( # Loop over all error messages for each result and log them click.secho(f"\n{'-' * 20} Summary of failed predictions (if any) {'-' * 20}") + exit_code = 0 for (_name, _df, error_messages) in predictions: for err_msg in error_messages: + # Any error message indicates we encountered at least one error + exit_code = 1 click.secho(err_msg, fg="red") # Shall we write the predictions out? if output_dir is not None: - for ( - name, - prediction_df, - _err_msgs, - ) in predictions: # [(name: str, predictions: pd.DataFrame), ...] + for (name, prediction_df, _err_msgs) in predictions: prediction_df.to_csv( os.path.join(output_dir, f"{name}.csv.gz"), compression="gzip" ) + sys.exit(exit_code) @click.command("metadata") diff --git a/tests/gordo_components/client/test_client.py b/tests/gordo_components/client/test_client.py index b3620dc4b..7dc455059 100644 --- a/tests/gordo_components/client/test_client.py +++ b/tests/gordo_components/client/test_client.py @@ -3,6 +3,7 @@ import os import tempfile import json +import logging from dateutil.parser import isoparse # type: ignore import aiohttp @@ -331,6 +332,49 @@ def test_client_cli_predict( ) +@pytest.mark.parametrize( + "should_fail,start_date,end_date", + [ + (True, "1888-01-01T00:00:00Z", "1888-02-01T01:00:00Z"), # Fail on bad dates + (False, "2016-01-01T00:00:00Z", "2016-01-01T01:00:00Z"), # pass on good dates + ], +) +def test_client_cli_predict_non_zero_exit( + should_fail, start_date, end_date, caplog, influxdb, watchman_service +): + """ + Test ability for client to get predictions via CLI + """ + runner = CliRunner() + + # Should fail requesting dates which clearly don't exist. + args = [ + "client", + "--metadata", + "key,value", + "--project", + tu.GORDO_PROJECT, + "predict", + start_date, + end_date, + ] + + data_provider = providers.InfluxDataProvider( + measurement=tu.INFLUXDB_MEASUREMENT, value_name="Value", uri=tu.INFLUXDB_URI + ) + + args.extend(["--data-provider", json.dumps(data_provider.to_dict())]) + + # Run without any error + with caplog.at_level(logging.CRITICAL): + out = runner.invoke(cli.gordo, args=args) + + if should_fail: + assert out.exit_code != 0, f"{out.output}" + else: + assert out.exit_code == 0, f"{out.output}" + + @pytest.mark.parametrize( "config", (