Skip to content

Commit

Permalink
changed accordingly to fmatch 0.0.6, and filtering changepoints
Browse files Browse the repository at this point in the history
Signed-off-by: Shashank Reddy Boyapally <[email protected]>
  • Loading branch information
shashank-boyapally committed Mar 28, 2024
1 parent a94f5ee commit ae73a39
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 21 deletions.
28 changes: 20 additions & 8 deletions pkg/daemon.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Module to run orion in daemon mode
"""

import logging
import shutil
import os
Expand All @@ -15,7 +16,12 @@


@app.post("/daemon")
async def daemon(file: UploadFile = File(...), uuid: str = "", baseline: str = ""):
async def daemon(
file: UploadFile = File(...),
uuid: str = "",
baseline: str = "",
filter_changepoints="",
):
"""starts listening on port 8000 on url /daemon
Args:
Expand All @@ -28,15 +34,21 @@ async def daemon(file: UploadFile = File(...), uuid: str = "", baseline: str = "
new_file_name = f"{file_name}_copy{file_extension}"
with open(new_file_name, "wb") as buffer:
shutil.copyfileobj(file.file, buffer)
argDict={
'config': new_file_name,
'output_path': "output.csv",
'hunter_analyze': True,
'output_format': "json",
'uuid':uuid,
'baseline':baseline,
argDict = {
"config": new_file_name,
"output_path": "output.csv",
"hunter_analyze": True,
"output_format": "json",
"uuid": uuid,
"baseline": baseline,
}
filter_changepoints = (
True if filter_changepoints == "true" else False # pylint: disable = R1719
)
result = runTest.run(**argDict)
if filter_changepoints:
for key, value in result.items():
result[key] = list(filter(lambda x: x.get("is_changepoint", False), value))
try:
os.remove(new_file_name)
except OSError as e:
Expand Down
2 changes: 1 addition & 1 deletion pkg/runTest.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def run(**kwargs):
)
if kwargs["hunter_analyze"]:
testname, result_data = run_hunter_analyze(
result, test, output=kwargs["output_format"], matcher=match
result, test, output=kwargs["output_format"]
)
result_output[testname] = result_data
return result_output
27 changes: 15 additions & 12 deletions pkg/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,15 @@

from hunter.report import Report, ReportType
from hunter.series import Metric, Series
import pyshorteners

from pkg.logrus import SingletonLogger





def run_hunter_analyze(merged_df, test, output, matcher):
def run_hunter_analyze(merged_df, test, output):
"""Start hunter analyze function
Args:
Expand All @@ -35,15 +36,15 @@ def run_hunter_analyze(merged_df, test, output, matcher):
metrics = {
column: Metric(1, 1.0)
for column in merged_df.columns
if column not in ["uuid", "timestamp"]
if column not in ["uuid","timestamp","buildUrl"]
}
data = {
column: merged_df[column]
for column in merged_df.columns
if column not in ["uuid", "timestamp"]
if column not in ["uuid","timestamp","buildUrl"]
}
attributes = {
column: merged_df[column] for column in merged_df.columns if column in ["uuid"]
column: merged_df[column] for column in merged_df.columns if column in ["uuid","buildUrl"]
}
series = Series(
test_name=test["name"],
Expand All @@ -63,12 +64,12 @@ def run_hunter_analyze(merged_df, test, output, matcher):

if output == "json":
change_points_by_metric = series.analyze().change_points
output_json = parse_json_output(merged_df, change_points_by_metric,matcher=matcher)
output_json = parse_json_output(merged_df, change_points_by_metric)
return test["name"], output_json
return None


def parse_json_output(merged_df, change_points_by_metric,matcher):
def parse_json_output(merged_df, change_points_by_metric):
"""json output generator function
Args:
Expand All @@ -84,11 +85,8 @@ def parse_json_output(merged_df, change_points_by_metric,matcher):
for index, entry in enumerate(df_json):
entry["metrics"] = {
key: {"value": entry.pop(key), "percentage_change": 0}
for key in entry.keys() - {"uuid", "timestamp"}
for key in entry.keys() - {"uuid", "timestamp", "buildUrl"}
}
entry["buildUrl"] = matcher.get_metadata_by_uuid(entry.get("uuid")).get(
"buildUrl"
)
entry["is_changepoint"] = False

for key in change_points_by_metric.keys():
Expand Down Expand Up @@ -261,7 +259,9 @@ def process_test(test, match, output, uuid, baseline):
else:
metadata = filter_metadata(uuid,match)
logger_instance.info("The test %s has started", test["name"])
uuids = match.get_uuid_by_metadata(metadata)
runs = match.get_uuid_by_metadata(metadata)
uuids = [run["uuid"] for run in runs]
buildUrls = {run["uuid"]: run["buildUrl"] for run in runs}
if baseline in ('', None):
uuids = match.get_uuid_by_metadata(metadata)
if len(uuids) == 0:
Expand All @@ -279,7 +279,10 @@ def process_test(test, match, output, uuid, baseline):
lambda left, right: pd.merge(left, right, on="uuid", how="inner"),
dataframe_list,
)

shortener = pyshorteners.Shortener()
merged_df["buildUrl"] = merged_df["uuid"].apply(
lambda uuid: shortener.tinyurl.short(buildUrls[uuid]) #pylint: disable = cell-var-from-loop
)
output_file_path = output.split(".")[0] + "-" + test["name"] + ".csv"
match.save_results(merged_df, csv_file_path=output_file_path)
return merged_df
Expand Down

0 comments on commit ae73a39

Please sign in to comment.