diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml index d8c23ef..25699ee 100644 --- a/.github/workflows/pylint.yml +++ b/.github/workflows/pylint.yml @@ -22,4 +22,4 @@ jobs: pip install . - name: Analysing the code with pylint run: | - pylint -d C0103 $(git ls-files '*.py') \ No newline at end of file + pylint -d C0103 -d R0912 $(git ls-files '*/*.py' '*.py') \ No newline at end of file diff --git a/README.md b/README.md index 8c124d6..4b47ff5 100644 --- a/README.md +++ b/README.md @@ -97,3 +97,19 @@ Additionally, users can specify a custom path for the output CSV file using the Orion's seamless integration with metadata and hunter ensures a robust regression detection tool for perf-scale CPT runs. +```--uuid``` : If you have a specific uuid in mind (maybe a current run), you can bypass the metadata configuration portion of the config file and use this paraemter. You will still need to specify a config file that contains a metrics section for the metrics you want to collect on the current uuid and uuids that match the metadata of the uuid configuration + +``` +tests : + - name : current-uuid-etcd-duration + metrics : + - name: etcdDisck + metricName : 99thEtcdDiskBackendCommitDurationSeconds + metric_of_interest: value + agg: + value: duration + agg_type: avg +``` + +Orion provides flexibility if you know the comparison uuid you want to compare among, use the ```--baseline``` flag. This should only be used in conjunction when setting uuid. Similar to the uuid section mentioned above, you'll have to set a metrics section to specify the data points you want to collect on + diff --git a/examples/small-scale-cluster-density.yaml b/examples/small-scale-cluster-density.yaml index 703f1b5..538362e 100644 --- a/examples/small-scale-cluster-density.yaml +++ b/examples/small-scale-cluster-density.yaml @@ -6,7 +6,7 @@ tests : masterNodesCount: 3 workerNodesType: m6a.xlarge workerNodesCount: 24 - benchmark.keyword: cluster-density-v2 + benchmark: cluster-density-v2 ocpVersion: 4.15 networkType: OVNKubernetes # encrypted: true diff --git a/examples/small-scale-node-density-cni.yaml b/examples/small-scale-node-density-cni.yaml index 1821f1a..9162b98 100644 --- a/examples/small-scale-node-density-cni.yaml +++ b/examples/small-scale-node-density-cni.yaml @@ -7,7 +7,7 @@ tests : workerNodesType: m6a.xlarge workerNodesCount: 6 infraNodesCount: 3 - benchmark.keyword: node-density-cni + benchmark: node-density-cni ocpVersion: 4.15 networkType: OVNKubernetes infraNodesType: r5.2xlarge diff --git a/orion.py b/orion.py index de52114..3d63542 100644 --- a/orion.py +++ b/orion.py @@ -11,8 +11,7 @@ import pandas as pd from fmatch.matcher import Matcher -from utils.orion_funcs import run_hunter_analyze, get_metadata, \ - set_logging, load_config, get_metric_data +from utils import orion_funcs @click.group() @@ -23,22 +22,28 @@ def cli(): # pylint: disable=too-many-locals @click.command() +@click.option("--uuid", default="", help="UUID to use as base for comparisons") +@click.option("--baseline", default="", help="UUID to use as base for comparisons") @click.option("--config", default="config.yaml", help="Path to the configuration file") @click.option("--output", default="output.csv", help="Path to save the output csv file") @click.option("--debug", is_flag=True, help="log level ") @click.option("--hunter-analyze",is_flag=True, help="run hunter analyze") -def orion(config, debug, output,hunter_analyze): +def orion(**kwargs): """Orion is the cli tool to detect regressions over the runs Args: + uuid (str): gather metrics based on uuid + baseline (str): baseline uuid to compare against uuid (uuid must be set when using baseline) config (str): path to the config file debug (bool): lets you log debug mode output (str): path to the output csv file + hunter_analyze (bool): turns on hunter analysis of gathered uuid(s) data """ - level = logging.DEBUG if debug else logging.INFO + + level = logging.DEBUG if kwargs["debug"] else logging.INFO logger = logging.getLogger("Orion") - logger = set_logging(level, logger) - data = load_config(config,logger) + logger = orion_funcs.set_logging(level, logger) + data = orion_funcs.load_config(kwargs["config"],logger) ES_URL=None if "ES_SERVER" in data.keys(): @@ -51,38 +56,50 @@ def orion(config, debug, output,hunter_analyze): sys.exit(1) for test in data["tests"]: - metadata = get_metadata(test, logger) - logger.info("The test %s has started", test["name"]) + uuid = kwargs["uuid"] + baseline = kwargs["baseline"] match = Matcher(index="perf_scale_ci", level=level, ES_URL=ES_URL) - uuids = match.get_uuid_by_metadata(metadata) - if len(uuids) == 0: - print("No UUID present for given metadata") - sys.exit() + if kwargs["uuid"] == "": + metadata = orion_funcs.get_metadata(test, logger) + else: + metadata = orion_funcs.get_uuid_metadata(uuid,match,logger) - if metadata["benchmark.keyword"] == "k8s-netperf" : + logger.info("The test %s has started", test["name"]) + if baseline == "": + uuids = match.get_uuid_by_metadata(metadata) + if len(uuids) == 0: + logging.info("No UUID present for given metadata") + sys.exit() + else: + uuids = baseline.split(',') + uuids.append(uuid) + if metadata["benchmark"] == "k8s-netperf" : index = "k8s-netperf" ids = uuids - elif metadata["benchmark.keyword"] == "ingress-perf" : + elif metadata["benchmark"] == "ingress-perf" : index = "ingress-performance" ids = uuids else: index = "ripsaw-kube-burner" - runs = match.match_kube_burner(uuids) - ids = match.filter_runs(runs, runs) + if baseline == "": + runs = match.match_kube_burner(uuids) + ids = match.filter_runs(runs, runs) + else: + ids = uuids metrics = test["metrics"] - dataframe_list = get_metric_data(ids, index, metrics, match, logger) + dataframe_list = orion_funcs.get_metric_data(ids, index, metrics, match, logger) merged_df = reduce( lambda left, right: pd.merge(left, right, on="uuid", how="inner"), dataframe_list, ) - match.save_results(merged_df, csv_file_path=output.split(".")[0]+"-"+test['name']+".csv") - - if hunter_analyze: - run_hunter_analyze(merged_df,test) - + match.save_results( + merged_df, csv_file_path=kwargs["output"].split(".")[0]+"-"+test['name']+".csv" + ) + if kwargs["hunter_analyze"]: + orion_funcs.run_hunter_analyze(merged_df,test) if __name__ == "__main__": diff --git a/utils/orion_funcs.py b/utils/orion_funcs.py index 08ec813..e728873 100644 --- a/utils/orion_funcs.py +++ b/utils/orion_funcs.py @@ -116,6 +116,48 @@ def get_metadata(test,logger): return metadata +def get_uuid_metadata(uuid,match,logger): + """Gets metadata of the run from each test + + Args: + uuid (str): str of uuid ot find metadata of + match: the fmatch instance + + + Returns: + dict: dictionary of the metadata + """ + + test= match.get_metadata_by_uuid(uuid) + metadata = { + 'platform': '', + 'clusterType': '', + 'benchmark': '', + 'masterNodesCount': 0, + 'workerNodesCount': 0, + 'infraNodesCount': 0, + 'masterNodesType': '', + 'workerNodesType': '', + 'infraNodesType': '', + 'totalNodesCount': 0, + 'ocpVersion': '', + 'networkType': '', + 'ipsec': '', + 'fips': '', + 'encrypted': '', + 'publish': '', + 'computeArch': '', + 'controlPlaneArch': '' + } + for k,v in test.items(): + if k not in metadata: + continue + metadata[k] = v + metadata["ocpVersion"] = str(metadata["ocpVersion"]) + logger.debug('metadata' + str(metadata)) + return metadata + + def set_logging(level, logger): """sets log level and format