Skip to content

Commit

Permalink
Adding baseline and uuid options
Browse files Browse the repository at this point in the history
Signed-off-by: Paige Rubendall <[email protected]>
  • Loading branch information
paigerube14 committed Mar 11, 2024
1 parent 1ebf0b0 commit 366fc6d
Show file tree
Hide file tree
Showing 6 changed files with 100 additions and 25 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/pylint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,4 @@ jobs:
pip install .
- name: Analysing the code with pylint
run: |
pylint -d C0103 $(git ls-files '*.py')
pylint -d C0103 -d R0912 $(git ls-files '*/*.py' '*.py')
16 changes: 16 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -97,3 +97,19 @@ Additionally, users can specify a custom path for the output CSV file using the
Orion's seamless integration with metadata and hunter ensures a robust regression detection tool for perf-scale CPT runs.


```--uuid``` : If you have a specific uuid in mind (maybe a current run), you can bypass the metadata configuration portion of the config file and use this paraemter. You will still need to specify a config file that contains a metrics section for the metrics you want to collect on the current uuid and uuids that match the metadata of the uuid configuration

```
tests :
- name : current-uuid-etcd-duration
metrics :
- name: etcdDisck
metricName : 99thEtcdDiskBackendCommitDurationSeconds
metric_of_interest: value
agg:
value: duration
agg_type: avg
```

Orion provides flexibility if you know the comparison uuid you want to compare among, use the ```--baseline``` flag. This should only be used in conjunction when setting uuid. Similar to the uuid section mentioned above, you'll have to set a metrics section to specify the data points you want to collect on

2 changes: 1 addition & 1 deletion examples/small-scale-cluster-density.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ tests :
masterNodesCount: 3
workerNodesType: m6a.xlarge
workerNodesCount: 24
benchmark.keyword: cluster-density-v2
benchmark: cluster-density-v2
ocpVersion: 4.15
networkType: OVNKubernetes
# encrypted: true
Expand Down
2 changes: 1 addition & 1 deletion examples/small-scale-node-density-cni.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ tests :
workerNodesType: m6a.xlarge
workerNodesCount: 6
infraNodesCount: 3
benchmark.keyword: node-density-cni
benchmark: node-density-cni
ocpVersion: 4.15
networkType: OVNKubernetes
infraNodesType: r5.2xlarge
Expand Down
61 changes: 39 additions & 22 deletions orion.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,7 @@
import pandas as pd

from fmatch.matcher import Matcher
from utils.orion_funcs import run_hunter_analyze, get_metadata, \
set_logging, load_config, get_metric_data
from utils import orion_funcs


@click.group()
Expand All @@ -23,22 +22,28 @@ def cli():

# pylint: disable=too-many-locals
@click.command()
@click.option("--uuid", default="", help="UUID to use as base for comparisons")
@click.option("--baseline", default="", help="UUID to use as base for comparisons")
@click.option("--config", default="config.yaml", help="Path to the configuration file")
@click.option("--output", default="output.csv", help="Path to save the output csv file")
@click.option("--debug", is_flag=True, help="log level ")
@click.option("--hunter-analyze",is_flag=True, help="run hunter analyze")
def orion(config, debug, output,hunter_analyze):
def orion(**kwargs):
"""Orion is the cli tool to detect regressions over the runs
Args:
uuid (str): gather metrics based on uuid
baseline (str): baseline uuid to compare against uuid (uuid must be set when using baseline)
config (str): path to the config file
debug (bool): lets you log debug mode
output (str): path to the output csv file
hunter_analyze (bool): turns on hunter analysis of gathered uuid(s) data
"""
level = logging.DEBUG if debug else logging.INFO

level = logging.DEBUG if kwargs["debug"] else logging.INFO
logger = logging.getLogger("Orion")
logger = set_logging(level, logger)
data = load_config(config,logger)
logger = orion_funcs.set_logging(level, logger)
data = orion_funcs.load_config(kwargs["config"],logger)
ES_URL=None

if "ES_SERVER" in data.keys():
Expand All @@ -51,38 +56,50 @@ def orion(config, debug, output,hunter_analyze):
sys.exit(1)

for test in data["tests"]:
metadata = get_metadata(test, logger)
logger.info("The test %s has started", test["name"])
uuid = kwargs["uuid"]
baseline = kwargs["baseline"]
match = Matcher(index="perf_scale_ci", level=level, ES_URL=ES_URL)
uuids = match.get_uuid_by_metadata(metadata)
if len(uuids) == 0:
print("No UUID present for given metadata")
sys.exit()
if kwargs["uuid"] == "":
metadata = orion_funcs.get_metadata(test, logger)
else:
metadata = orion_funcs.get_uuid_metadata(uuid,match,logger)

if metadata["benchmark.keyword"] == "k8s-netperf" :
logger.info("The test %s has started", test["name"])
if baseline == "":
uuids = match.get_uuid_by_metadata(metadata)
if len(uuids) == 0:
logging.info("No UUID present for given metadata")
sys.exit()
else:
uuids = baseline.split(',')
uuids.append(uuid)
if metadata["benchmark"] == "k8s-netperf" :
index = "k8s-netperf"
ids = uuids
elif metadata["benchmark.keyword"] == "ingress-perf" :
elif metadata["benchmark"] == "ingress-perf" :
index = "ingress-performance"
ids = uuids
else:
index = "ripsaw-kube-burner"
runs = match.match_kube_burner(uuids)
ids = match.filter_runs(runs, runs)
if baseline == "":
runs = match.match_kube_burner(uuids)
ids = match.filter_runs(runs, runs)
else:
ids = uuids

metrics = test["metrics"]
dataframe_list = get_metric_data(ids, index, metrics, match, logger)
dataframe_list = orion_funcs.get_metric_data(ids, index, metrics, match, logger)

merged_df = reduce(
lambda left, right: pd.merge(left, right, on="uuid", how="inner"),
dataframe_list,
)
match.save_results(merged_df, csv_file_path=output.split(".")[0]+"-"+test['name']+".csv")

if hunter_analyze:
run_hunter_analyze(merged_df,test)

match.save_results(
merged_df, csv_file_path=kwargs["output"].split(".")[0]+"-"+test['name']+".csv"
)

if kwargs["hunter_analyze"]:
orion_funcs.run_hunter_analyze(merged_df,test)


if __name__ == "__main__":
Expand Down
42 changes: 42 additions & 0 deletions utils/orion_funcs.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,48 @@ def get_metadata(test,logger):
return metadata


def get_uuid_metadata(uuid,match,logger):
"""Gets metadata of the run from each test
Args:
uuid (str): str of uuid ot find metadata of
match: the fmatch instance
Returns:
dict: dictionary of the metadata
"""

test= match.get_metadata_by_uuid(uuid)
metadata = {
'platform': '',
'clusterType': '',
'benchmark': '',
'masterNodesCount': 0,
'workerNodesCount': 0,
'infraNodesCount': 0,
'masterNodesType': '',
'workerNodesType': '',
'infraNodesType': '',
'totalNodesCount': 0,
'ocpVersion': '',
'networkType': '',
'ipsec': '',
'fips': '',
'encrypted': '',
'publish': '',
'computeArch': '',
'controlPlaneArch': ''
}
for k,v in test.items():
if k not in metadata:
continue
metadata[k] = v
metadata["ocpVersion"] = str(metadata["ocpVersion"])
logger.debug('metadata' + str(metadata))
return metadata



def set_logging(level, logger):
"""sets log level and format
Expand Down

0 comments on commit 366fc6d

Please sign in to comment.