From 7e03f5000ead8291ec63bcc435514ee9f42add32 Mon Sep 17 00:00:00 2001 From: Paige Rubendall Date: Fri, 26 Jan 2024 15:29:41 -0500 Subject: [PATCH] adding more generic way of comparing with working ingress Signed-off-by: Paige Rubendall --- README.md | 45 ++++++++++++---- orion.py | 131 +++++++++++++++++++++++++++-------------------- requirements.txt | 2 +- 3 files changed, 110 insertions(+), 68 deletions(-) diff --git a/README.md b/README.md index b85a259..24d81fa 100644 --- a/README.md +++ b/README.md @@ -19,20 +19,43 @@ tests : # ipsec: false metrics : - - metric : podReadyLatency - metricType : latency + - name: podReadyLatency + metricName: podLatencyQuantilesMeasurement + quantileName: Ready + metric_of_interest: P99 + not: + - jobConfig.name: "garbage-collection" - - metric : apiserverCPU - metricType : cpu - namespace: openshift-kube-apiserver + - name: apiserverCPU + metricName : containerCPU + labels.namespace: openshift-kube-apiserver + metric_of_interest: value + agg: + value: cpu + agg_type: avg - - metric: ovnCPU - metricType: cpu - namespace: openshift-ovn-kubernetes + - name: ovnCPU + metricName : containerCPU + labels.namespace: openshift-ovn-kubernetes + metric_of_interest: value + agg: + value: cpu + agg_type: avg + + - name: etcdCPU + metricName : containerCPU + labels.namespace: openshift-etcd + metric_of_interest: value + agg: + value: cpu + agg_type: avg - - metric: etcdCPU - metricType: cpu - namespace: openshift-ovn-kubernetes + - name: etcdDisck + metricName : 99thEtcdDiskBackendCommitDurationSeconds + metric_of_interest: value + agg: + value: duration + agg_type: avg ``` diff --git a/orion.py b/orion.py index f40b19a..1cac3d9 100644 --- a/orion.py +++ b/orion.py @@ -19,7 +19,6 @@ def cli(): cli function to group commands """ - # pylint: disable=too-many-locals @click.command() @click.option("--config", default="config.yaml", help="Path to the configuration file") @@ -52,7 +51,7 @@ def orion(config, debug, output): logger.error("An error occurred: %s", e) sys.exit(1) for test in data["tests"]: - metadata = get_metadata(test) + metadata = get_metadata(test, logger) logger.info("The test %s has started", test["name"]) match = Matcher(index="perf_scale_ci", level=level) uuids = match.get_uuid_by_metadata(metadata) @@ -60,46 +59,19 @@ def orion(config, debug, output): print("No UUID present for given metadata") sys.exit() - runs = match.match_kube_burner(uuids) - ids = match.filter_runs(runs, runs) + if metadata["benchmark"] == "k8s-netperf" : + index = "k8s-netperf" + ids = uuids + elif metadata["benchmark"] == "ingress-perf" : + index = "ingress-performance" + ids = uuids + else: + index = "ripsaw-kube-burner" + runs = match.match_kube_burner(uuids) + ids = match.filter_runs(runs, runs) + metrics = test["metrics"] - dataframe_list = [] - - for metric in metrics: - logger.info("Collecting %s", metric["metric"]) - if metric["metricType"] == "latency": - if metric["metric"] == "podReadyLatency": - try: - podl = match.burner_results("", ids, "ripsaw-kube-burner*") - podl_df = match.convert_to_df( - podl, columns=["uuid", "timestamp", "P99"] - ) - dataframe_list.append(podl_df) - logger.debug(podl_df) - except Exception as e: # pylint: disable=broad-exception-caught - logger.error( - "The namespace %s does not exist, exception %s", - metric["namespace"], - e, - ) - - elif metric["metricType"] == "cpu": - try: - cpu = match.burner_cpu_results( - ids, metric["namespace"], "ripsaw-kube-burner*" - ) - cpu_df = match.convert_to_df(cpu, columns=["uuid", "cpu_avg"]) - cpu_df = cpu_df.rename( - columns={"cpu_avg": metric["metric"] + "_cpu_avg"} - ) - dataframe_list.append(cpu_df) - logger.debug(cpu_df) - except Exception as e: # pylint: disable=broad-exception-caught - logger.error( - "The namespace %s does not exist, exception %s", - metric["namespace"], - e, - ) + dataframe_list = get_metric_data(ids, index, metrics, match, logger) merged_df = reduce( lambda left, right: pd.merge(left, right, on="uuid", how="inner"), @@ -108,7 +80,62 @@ def orion(config, debug, output): match.save_results(merged_df, csv_file_path=output) -def get_metadata(test): +def get_metric_data(ids, index, metrics, match, logger): + """Gets details metrics basked on metric yaml list + + Args: + ids (list): list of all uuids + index (dict): index in es of where to find data + metrics (dict): metrics to gather data on + match (Matcher): current matcher instance + logger (logger): log data to one output + + Returns: + dataframe_list: dataframe of the all metrics + """ + dataframe_list = [] + for metric in metrics: + logger.info("Collecting %s", metric["metric"]) + metric_of_interest = metric['metric_of_interest'] + metric_name = metric['name'] + if "agg" in metric.keys(): + try: + cpu = match.get_agg_metric_query( + ids, index, metric + ) + agg_value = metric['agg']['value'] + agg_type = metric['agg']['agg_type'] + agg_name = agg_value + "_" + agg_type + cpu_df = match.convert_to_df(cpu, columns=["uuid", agg_name]) + cpu_df = cpu_df.rename( + columns={agg_name: metric_name+ "_" + agg_name} + ) + dataframe_list.append(cpu_df) + logger.debug(cpu_df) + + except Exception as e: # pylint: disable=broad-exception-caught + logger.error( + "Couldn't get agg metrics %s, exception %s", + metric_name, + e, + ) + else: + try: + podl = match.getResults("", ids, index, metric) + podl_df = match.convert_to_df( + podl, columns=["uuid", "timestamp", metric_of_interest] + ) + dataframe_list.append(podl_df) + logger.debug(podl_df) + except Exception as e: # pylint: disable=broad-exception-caught + logger.error( + "Couldn't get metrics %s, exception %s", + metric_name, + e, + ) + return dataframe_list + +def get_metadata(test,logger): """Gets metadata of the run from each test Args: @@ -117,21 +144,13 @@ def get_metadata(test): Returns: dict: dictionary of the metadata """ - metadata_columns = [ - "platform", - "masterNodesType", - "masterNodesCount", - "workerNodesType", - "workerNodesCount", - "benchmark", - "ocpVersion", - "networkType", - "encrypted", - "fips", - "ipsec", - ] - metadata = {key: test[key] for key in metadata_columns if key in test} + metadata = {} + for k,v in test.items(): + if k in ["metrics","name"]: + continue + metadata[k] = v metadata["ocpVersion"] = str(metadata["ocpVersion"]) + logger.debug('metadata' + str(metadata)) return metadata diff --git a/requirements.txt b/requirements.txt index b98bd22..e3f3ac3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ click==8.1.7 elastic-transport==8.11.0 elasticsearch==8.11.1 elasticsearch7==7.13.0 -fmatch==0.0.2 +fmatch==0.0.3 numpy==1.26.3 pandas==2.1.4 python-dateutil==2.8.2