diff --git a/orion.py b/orion.py index eb73488..2e16843 100644 --- a/orion.py +++ b/orion.py @@ -113,7 +113,7 @@ def cmd_analysis(**kwargs): logger_instance = SingletonLogger(debug=level, name="Orion") logger_instance.info("🏹 Starting Orion in command-line mode") kwargs["configMap"] = load_config(kwargs["config"]) - output = run(**kwargs) + output, regression_flag = run(**kwargs) if output is None: logger_instance.error("Terminating test") sys.exit(0) @@ -125,6 +125,8 @@ def cmd_analysis(**kwargs): output_file_name = f"{kwargs['save_output_path'].split('.')[0]}_{test_name}.{kwargs['save_output_path'].split('.')[1]}" with open(output_file_name, 'w', encoding="utf-8") as file: file.write(str(result_table)) + if regression_flag: + sys.exit(1) diff --git a/pkg/algorithms/algorithm.py b/pkg/algorithms/algorithm.py index b84cbf1..16d3744 100644 --- a/pkg/algorithms/algorithm.py +++ b/pkg/algorithms/algorithm.py @@ -30,8 +30,9 @@ def __init__( # pylint: disable = too-many-arguments self.test = test self.options = options self.metrics_config = metrics_config + self.regression_flag = False - def output_json(self) -> Tuple[str, str]: + def output_json(self) -> Tuple[str, str, bool]: """Method to output json output Returns: @@ -64,9 +65,9 @@ def output_json(self) -> Tuple[str, str]: ] = percentage_change dataframe_json[index]["is_changepoint"] = True - return self.test["name"], json.dumps(dataframe_json, indent=2) + return self.test["name"], json.dumps(dataframe_json, indent=2), self.regression_flag - def output_text(self) -> Tuple[str,str]: + def output_text(self) -> Tuple[str,str, bool]: """Outputs the data in text/tabular format""" series, change_points_by_metric = self._analyze() change_points_by_time = self.group_change_points_by_time( @@ -76,9 +77,9 @@ def output_text(self) -> Tuple[str,str]: output_table = report.produce_report( test_name=self.test["name"], report_type=ReportType.LOG ) - return self.test["name"], output_table + return self.test["name"], output_table, self.regression_flag - def output_junit(self) -> Tuple[str,str]: + def output_junit(self) -> Tuple[str,str, bool]: """Output junit format Returns: @@ -92,7 +93,7 @@ def output_junit(self) -> Tuple[str,str]: metrics_config=self.metrics_config, options=self.options, ) - return test_name, data_junit + return test_name, data_junit, self.regression_flag @abstractmethod def _analyze(self): diff --git a/pkg/algorithms/edivisive/edivisive.py b/pkg/algorithms/edivisive/edivisive.py index a835459..5d2143a 100644 --- a/pkg/algorithms/edivisive/edivisive.py +++ b/pkg/algorithms/edivisive/edivisive.py @@ -25,5 +25,6 @@ def _analyze(self): if ((self.metrics_config[metric]["direction"] == 1 and changepoint_list[i].stats.mean_1 > changepoint_list[i].stats.mean_2) or (self.metrics_config[metric]["direction"] == -1 and changepoint_list[i].stats.mean_1 < changepoint_list[i].stats.mean_2) ): del changepoint_list[i] - + if [val for li in change_points_by_metric.values() for val in li]: + self.regression_flag=True return series, change_points_by_metric diff --git a/pkg/algorithms/isolationforest/isolationForest.py b/pkg/algorithms/isolationforest/isolationForest.py index 8158149..41ff88d 100644 --- a/pkg/algorithms/isolationforest/isolationForest.py +++ b/pkg/algorithms/isolationforest/isolationForest.py @@ -70,5 +70,6 @@ def _analyze(self): pvalue=1 )) change_points_by_metric[feature].append(change_point) - + if [val for li in change_points_by_metric.values() for val in li]: + self.regression_flag=True return series, change_points_by_metric diff --git a/pkg/daemon.py b/pkg/daemon.py index 63f8bd3..e39e5dd 100644 --- a/pkg/daemon.py +++ b/pkg/daemon.py @@ -53,7 +53,8 @@ async def daemon_changepoint( # pylint: disable = R0913 filter_changepoints = ( True if filter_changepoints == "true" else False # pylint: disable = R1719 ) - result = runTest.run(**option_arguments) + result, _ = runTest.run(**option_arguments) + result = {k:json.loads(v) for k,v in result.items()} if result is None: return {"Error":"No UUID with given metadata"} result = {k:json.loads(v) for k,v in result.items()} @@ -129,7 +130,8 @@ async def daemon_anomaly( # pylint: disable = R0913, R0914 filter_points = ( True if filter_points == "true" else False # pylint: disable = R1719 ) - result = runTest.run(**option_arguments) + result, _ = runTest.run(**option_arguments) + result = {k:json.loads(v) for k,v in result.items()} if result is None: return {"Error":"No UUID with given metadata"} result = {k:json.loads(v) for k,v in result.items()} diff --git a/pkg/runTest.py b/pkg/runTest.py index d4cb7e4..769a018 100644 --- a/pkg/runTest.py +++ b/pkg/runTest.py @@ -10,7 +10,7 @@ -def run(**kwargs: dict[str, Any]) -> dict[str, Any]: +def run(**kwargs: dict[str, Any]) -> dict[str, Any]: #pylint: disable = R0914 """run method to start the tests Args: @@ -26,6 +26,7 @@ def run(**kwargs: dict[str, Any]) -> dict[str, Any]: config_map = kwargs["configMap"] datasource = get_datasource(config_map) result_output = {} + regression_flag = False for test in config_map["tests"]: # Create fingerprint Matcher matcher = Matcher( @@ -42,7 +43,6 @@ def run(**kwargs: dict[str, Any]) -> dict[str, Any]: kwargs, start_timestamp, ) - if fingerprint_matched_df is None: return None @@ -62,9 +62,10 @@ def run(**kwargs: dict[str, Any]) -> dict[str, Any]: kwargs, metrics_config, ) - testname, result_data = algorithm.output(kwargs["output_format"]) + testname, result_data, test_flag = algorithm.output(kwargs["output_format"]) result_output[testname] = result_data - return result_output + regression_flag = regression_flag or test_flag + return result_output, regression_flag def get_start_timestamp(kwargs: Dict[str, Any]) -> str: diff --git a/pkg/utils.py b/pkg/utils.py index 350be59..ab5986e 100644 --- a/pkg/utils.py +++ b/pkg/utils.py @@ -262,6 +262,7 @@ def process_test( else buildUrls[uuid] ) # pylint: disable = cell-var-from-loop ) + merged_df=merged_df.reset_index(drop=True) #save the dataframe output_file_path = f"{options['save_data_path'].split('.')[0]}-{test['name']}.csv" match.save_results(merged_df, csv_file_path=output_file_path)