Skip to content

Commit

Permalink
fixed logging and added uuid comparison
Browse files Browse the repository at this point in the history
Signed-off-by: Shashank Reddy Boyapally <[email protected]>
  • Loading branch information
shashank-boyapally committed Mar 21, 2024
1 parent 8e48be2 commit 555ff50
Show file tree
Hide file tree
Showing 5 changed files with 127 additions and 46 deletions.
15 changes: 8 additions & 7 deletions orion.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
from pkg.logrus import SingletonLogger
from pkg.runTest import run

logger_instance = SingletonLogger(debug=False).logger


@click.group()
Expand All @@ -35,7 +34,9 @@ def cli(max_content_width=120): # pylint: disable=unused-argument
default="text",
help="Choose output format (json or text)",
)
def cmd_analysis(config, debug, output_path, hunter_analyze, output_format):
@click.option("--uuid", default="", help="UUID to use as base for comparisons")
@click.option("--baseline", default="", help="Baseline UUID(s) to to compare against uuid")
def cmd_analysis(**kwargs):
"""
Orion runs on command line mode, and helps in detecting regressions
Expand All @@ -48,13 +49,13 @@ def cmd_analysis(config, debug, output_path, hunter_analyze, output_format):
output (str): path to the output csv file
hunter_analyze (bool): turns on hunter analysis of gathered uuid(s) data
"""
level = logging.DEBUG if debug else logging.INFO
logger_instance.setLevel(level)
level = logging.DEBUG if kwargs['debug'] else logging.INFO
logger_instance = SingletonLogger(debug=level).logger
logger_instance.info("🏹 Starting Orion in command-line mode")
output = run(config, output_path, hunter_analyze, output_format)
output = run(**kwargs)
for test_name, result_table in output.items():
print(test_name)
print("-"*len(test_name))
print("="*len(test_name))
print(result_table)

csv_name = kwargs["output"].split(".")[0]+"-"+test['name']+".csv"
Expand All @@ -70,7 +71,7 @@ def rundaemon(debug):
\b
"""
level = logging.DEBUG if debug else logging.INFO
logger_instance.setLevel(level)
logger_instance = SingletonLogger(debug=level).logger
logger_instance.info("🏹 Starting Orion in Daemon mode")
uvicorn.run("pkg.daemon:app", port=8000)

Expand Down
15 changes: 12 additions & 3 deletions pkg/daemon.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Module to run orion in daemon mode
"""
import logging
import shutil
import os

Expand All @@ -10,11 +11,11 @@
from . import runTest

app = FastAPI()
logger_instance = SingletonLogger(debug=False).logger
logger_instance = SingletonLogger(debug=logging.INFO).logger


@app.post("/daemon")
async def daemon(file: UploadFile = File(...)):
async def daemon(file: UploadFile = File(...), uuid: str = "", baseline: str = ""):
"""starts listening on port 8000 on url /daemon
Args:
Expand All @@ -27,7 +28,15 @@ async def daemon(file: UploadFile = File(...)):
new_file_name = f"{file_name}_copy{file_extension}"
with open(new_file_name, "wb") as buffer:
shutil.copyfileobj(file.file, buffer)
result = runTest.run(new_file_name, "output.csv", True, "json")
argDict={
'config': new_file_name,
'output_path': "output.csv",
'hunter_analyze': True,
'output_format': "json",
'uuid':uuid,
'baseline':baseline,
}
result = runTest.run(**argDict)
try:
os.remove(new_file_name)
except OSError as e:
Expand Down
11 changes: 7 additions & 4 deletions pkg/logrus.py
Original file line number Diff line number Diff line change
@@ -1,32 +1,35 @@
"""
Logger for orion
"""

import logging
import sys


class SingletonLogger:
"""Singleton logger to set logging at one single place
Returns:
_type_: _description_
"""

_instance = None

def __new__(cls, debug=False):
def __new__(cls, debug):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._logger = cls._initialize_logger(debug)
return cls._instance

@staticmethod
def _initialize_logger(debug):
level = logging.DEBUG if debug else logging.INFO
level = debug # if debug else logging.INFO
logger = logging.getLogger("Orion")
logger.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
formatter = logging.Formatter(
"%(asctime)s - %(filename)s-%(lineno)d - %(name)s - %(levelname)s - %(message)s"
"%(asctime)s - %(name)s - %(levelname)s - file: %(filename)s - line: %(lineno)d - %(message)s" # pylint: disable = line-too-long
)
handler.setFormatter(formatter)
logger.addHandler(handler)
Expand All @@ -39,4 +42,4 @@ def logger(self):
Returns:
_type_: _description_
"""
return self._logger # pylint: disable = no-member
return self._logger # pylint: disable = no-member
34 changes: 18 additions & 16 deletions pkg/runTest.py
Original file line number Diff line number Diff line change
@@ -1,37 +1,39 @@
"""
run test
"""

import logging
from fmatch.matcher import Matcher
from pkg.logrus import SingletonLogger
from pkg.utils import (
run_hunter_analyze,
load_config,
get_es_url,
process_test
)
from pkg.utils import run_hunter_analyze, load_config, get_es_url, process_test

logger_instance= SingletonLogger().logger

def run(config, output_path, hunter_analyze,output_format):
def run(**kwargs):
"""run method to start the tests
Args:
config (_type_): file path to config file
debug (_type_): debug to be true or false
output_path (_type_): output path to save the data
hunter_analyze (_type_): changepoint detection through hunter. defaults to True
output_format (_type_): output to be table or json
Returns:
_type_: _description_
"""
data = load_config(config, logger_instance)
ES_URL = get_es_url(data,logger=logger_instance)
logger_instance = SingletonLogger(debug=logging.INFO).logger
data = load_config(kwargs["config"])
ES_URL = get_es_url(data)
result_output = {}
for test in data["tests"]:
match = Matcher(index="perf_scale_ci",level=logger_instance.level, ES_URL=ES_URL)
result = process_test(test, match, logger_instance, output_path)
if hunter_analyze:
testname,result_data=run_hunter_analyze(result, test,output=output_format,matcher=match)
result_output[testname]=result_data
match = Matcher(
index="perf_scale_ci", level=logger_instance.level, ES_URL=ES_URL
)
result = process_test(
test, match, kwargs["output_path"], kwargs["uuid"], kwargs["baseline"]
)
if kwargs["hunter_analyze"]:
testname, result_data = run_hunter_analyze(
result, test, output=kwargs["output_format"], matcher=match
)
result_output[testname] = result_data
return result_output
98 changes: 82 additions & 16 deletions pkg/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,9 @@

from functools import reduce
import json
import logging
import os
import re
import sys

import yaml
Expand All @@ -15,6 +17,11 @@
from hunter.report import Report, ReportType
from hunter.series import Metric, Series

from pkg.logrus import SingletonLogger





def run_hunter_analyze(merged_df, test, output, matcher):
"""Start hunter analyze function
Expand Down Expand Up @@ -71,7 +78,6 @@ def parse_json_output(merged_df, change_points_by_metric,matcher):
Returns:
_type_: _description_
"""

df_json = merged_df.to_json(orient="records")
df_json = json.loads(df_json)

Expand Down Expand Up @@ -99,7 +105,7 @@ def parse_json_output(merged_df, change_points_by_metric,matcher):


# pylint: disable=too-many-locals
def get_metric_data(ids, index, metrics, match, logger):
def get_metric_data(ids, index, metrics, match):
"""Gets details metrics basked on metric yaml list
Args:
Expand All @@ -112,6 +118,7 @@ def get_metric_data(ids, index, metrics, match, logger):
Returns:
dataframe_list: dataframe of the all metrics
"""
logger= SingletonLogger(debug=logging.INFO).logger
dataframe_list = []
for metric in metrics:
metric_name = metric["name"]
Expand Down Expand Up @@ -152,7 +159,7 @@ def get_metric_data(ids, index, metrics, match, logger):
return dataframe_list


def get_metadata(test, logger):
def get_metadata(test):
"""Gets metadata of the run from each test
Args:
Expand All @@ -161,13 +168,14 @@ def get_metadata(test, logger):
Returns:
dict: dictionary of the metadata
"""
logger= SingletonLogger(debug=logging.INFO).logger
metadata = test["metadata"]
metadata["ocpVersion"] = str(metadata["ocpVersion"])
logger.debug("metadata" + str(metadata))
return metadata


def load_config(config, logger):
def load_config(config):
"""Loads config file
Args:
Expand All @@ -177,6 +185,7 @@ def load_config(config, logger):
Returns:
dict: dictionary of the config file
"""
logger= SingletonLogger(debug=logging.INFO).logger
try:
with open(config, "r", encoding="utf-8") as file:
data = yaml.safe_load(file)
Expand All @@ -190,7 +199,7 @@ def load_config(config, logger):
return data


def get_es_url(data, logger):
def get_es_url(data):
"""Gets es url from config or env
Args:
Expand All @@ -200,6 +209,7 @@ def get_es_url(data, logger):
Returns:
str: es url
"""
logger= SingletonLogger(debug=logging.INFO).logger
if "ES_SERVER" in data.keys():
return data["ES_SERVER"]
if "ES_SERVER" in os.environ:
Expand All @@ -208,7 +218,7 @@ def get_es_url(data, logger):
sys.exit(1)


def get_index_and_ids(metadata, uuids, match):
def get_index_and_ids(metadata, uuids, match, baseline):
"""returns the index to be used and runs as uuids
Args:
Expand All @@ -224,11 +234,15 @@ def get_index_and_ids(metadata, uuids, match):
if metadata["benchmark.keyword"] == "ingress-perf":
return "ingress-performance", uuids
index = "ripsaw-kube-burner"
runs = match.match_kube_burner(uuids)
return index, match.filter_runs(runs, runs)
if baseline == "":
runs = match.match_kube_burner(uuids)
ids = match.filter_runs(runs, runs)
else:
ids = uuids
return index, ids


def process_test(test, match, logger, output):
def process_test(test, match, output, uuid, baseline):
"""generate the dataframe for the test given
Args:
Expand All @@ -240,17 +254,25 @@ def process_test(test, match, logger, output):
Returns:
_type_: merged dataframe
"""
metadata = get_metadata(test, logger)
logger= SingletonLogger(debug=logging.INFO).logger
if uuid in ('', None):
metadata = get_metadata(test)
else:
metadata = filter_metadata(uuid,match)
logger.info("The test %s has started", test["name"])
uuids = match.get_uuid_by_metadata(metadata)
if len(uuids) == 0:
print("No UUID present for given metadata")
sys.exit()

index, ids = get_index_and_ids(metadata, uuids, match)
if baseline in ('', None):
uuids = match.get_uuid_by_metadata(metadata)
if len(uuids) == 0:
logger.error("No UUID present for given metadata")
sys.exit()
else:
uuids = re.split(' |,',baseline)
uuids.append(uuid)
index, ids = get_index_and_ids(metadata, uuids, match, baseline)

metrics = test["metrics"]
dataframe_list = get_metric_data(ids, index, metrics, match, logger)
dataframe_list = get_metric_data(ids, index, metrics, match)

merged_df = reduce(
lambda left, right: pd.merge(left, right, on="uuid", how="inner"),
Expand All @@ -260,3 +282,47 @@ def process_test(test, match, logger, output):
output_file_path = output.split(".")[0] + "-" + test["name"] + ".csv"
match.save_results(merged_df, csv_file_path=output_file_path)
return merged_df

def filter_metadata(uuid,match):
"""Gets metadata of the run from each test
Args:
uuid (str): str of uuid ot find metadata of
match: the fmatch instance
Returns:
dict: dictionary of the metadata
"""
logger= SingletonLogger(debug=logging.INFO).logger
test = match.get_metadata_by_uuid(uuid)
metadata = {
'platform': '',
'clusterType': '',
'masterNodesCount': 0,
'workerNodesCount': 0,
'infraNodesCount': 0,
'masterNodesType': '',
'workerNodesType': '',
'infraNodesType': '',
'totalNodesCount': 0,
'ocpVersion': '',
'networkType': '',
'ipsec': '',
'fips': '',
'encrypted': '',
'publish': '',
'computeArch': '',
'controlPlaneArch': ''
}
for k,v in test.items():
if k not in metadata:
continue
metadata[k] = v
metadata['benchmark.keyword'] = test['benchmark']
metadata["ocpVersion"] = str(metadata["ocpVersion"])

#Remove any keys that have blank values
no_blank_meta = {k: v for k, v in metadata.items() if v}
logger.debug('No blank metadata dict: ' + str(no_blank_meta))
return no_blank_meta

0 comments on commit 555ff50

Please sign in to comment.