diff --git a/orion.py b/orion.py index 765b6ad..de4b082 100644 --- a/orion.py +++ b/orion.py @@ -19,7 +19,6 @@ def cli(): cli function to group commands """ - # pylint: disable=too-many-locals @click.command() @click.option("--config", default="config.yaml", help="Path to the configuration file") @@ -59,9 +58,7 @@ def orion(config, debug, output): if len(uuids) == 0: print("No UUID present for given metadata") sys.exit() - - metrics = test["metrics"] - dataframe_list = [] + if metadata["benchmark"] == "k8s-netperf" : index = "k8s-netperf" ids = uuids @@ -73,46 +70,8 @@ def orion(config, debug, output): runs = match.match_kube_burner(uuids) ids = match.filter_runs(runs, runs) - for metric in metrics: - logger.info("Collecting %s", metric["metric"]) - metric_of_interest = metric['metric_of_interest'] - metric_name = metric['metric'] - metric.pop("metric") - if "agg" in metric.keys(): - try: - cpu = match.get_agg_metric_query( - ids, index, metric - ) - agg_value = metric['agg']['value'] - agg_type = metric['agg']['agg_type'] - agg_name = agg_value + "_" + agg_type - cpu_df = match.convert_to_df(cpu, columns=["uuid", agg_name]) - cpu_df = cpu_df.rename( - columns={agg_name: metric_name+ "_" + agg_name} - ) - dataframe_list.append(cpu_df) - logger.debug(cpu_df) - - except Exception as e: # pylint: disable=broad-exception-caught - logger.error( - "Couldn't get agg metrics %s, exception %s", - metric_name, - e, - ) - else: - try: - podl = match.getResults("", ids, index, metric) - podl_df = match.convert_to_df( - podl, columns=["uuid", "timestamp", metric_of_interest] - ) - dataframe_list.append(podl_df) - logger.debug(podl_df) - except Exception as e: # pylint: disable=broad-exception-caught - logger.error( - "Couldn't get metrics %s, exception %s", - metric_name, - e, - ) + metrics = test["metrics"] + dataframe_list = get_metric_data(ids, index, metrics, match, logger) merged_df = reduce( lambda left, right: pd.merge(left, right, on="uuid", how="inner"), @@ -121,6 +80,62 @@ def orion(config, debug, output): match.save_results(merged_df, csv_file_path=output) +def get_metric_data(ids, index, metrics, match, logger): + """Gets details metrics basked on metric yaml list + + Args: + ids (list): list of all uuids + index (dict): index in es of where to find data + metrics (dict): metrics to gather data on + match (Matcher): current matcher instance + logger (logger): log data to one output + + Returns: + dataframe_list: dataframe of the all metrics + """ + dataframe_list = [] + for metric in metrics: + logger.info("Collecting %s", metric["metric"]) + metric_of_interest = metric['metric_of_interest'] + metric_name = metric['metric'] + metric.pop("metric") + if "agg" in metric.keys(): + try: + cpu = match.get_agg_metric_query( + ids, index, metric + ) + agg_value = metric['agg']['value'] + agg_type = metric['agg']['agg_type'] + agg_name = agg_value + "_" + agg_type + cpu_df = match.convert_to_df(cpu, columns=["uuid", agg_name]) + cpu_df = cpu_df.rename( + columns={agg_name: metric_name+ "_" + agg_name} + ) + dataframe_list.append(cpu_df) + logger.debug(cpu_df) + + except Exception as e: # pylint: disable=broad-exception-caught + logger.error( + "Couldn't get agg metrics %s, exception %s", + metric_name, + e, + ) + else: + try: + podl = match.getResults("", ids, index, metric) + podl_df = match.convert_to_df( + podl, columns=["uuid", "timestamp", metric_of_interest] + ) + dataframe_list.append(podl_df) + logger.debug(podl_df) + except Exception as e: # pylint: disable=broad-exception-caught + logger.error( + "Couldn't get metrics %s, exception %s", + metric_name, + e, + ) + return dataframe_list + def get_metadata(test,logger): """Gets metadata of the run from each test