From f1168499261f613cb0763a2444a1c72dd7d79859 Mon Sep 17 00:00:00 2001 From: Vishnu Challa Date: Sat, 22 Jun 2024 11:18:56 -0400 Subject: [PATCH 1/2] Converting thresholds to line in telco plotly visuals Signed-off-by: Vishnu Challa --- .../app/api/v1/endpoints/telco/telcoGraphs.py | 108 +++++++++--------- backend/app/services/splunk.py | 2 - 2 files changed, 54 insertions(+), 56 deletions(-) diff --git a/backend/app/api/v1/endpoints/telco/telcoGraphs.py b/backend/app/api/v1/endpoints/telco/telcoGraphs.py index e4761534..0ec1b93e 100644 --- a/backend/app/api/v1/endpoints/telco/telcoGraphs.py +++ b/backend/app/api/v1/endpoints/telco/telcoGraphs.py @@ -25,7 +25,7 @@ async def process_json(json_data: dict): def process_ptp(json_data: str): nic = json_data["nic"] - ptp4l_max_offset = json_data["ptp4l_max_offset"] + ptp4l_max_offset = json_data.get("ptp4l_max_offset", 0) if "mellanox" in nic.lower(): defined_offset_threshold = 200 else: @@ -38,8 +38,8 @@ def process_ptp(json_data: str): "ptp": [ { "name": "Data Points", - "x": ["ptp4l_max_offset"], - "y": [ptp4l_max_offset], + "x": ["-inf", "ptp4l_max_offset", "inf"], + "y": [0, ptp4l_max_offset, 0], "mode": "markers", "marker": { "size": 10, @@ -47,16 +47,16 @@ def process_ptp(json_data: str): "error_y": { "type": "data", "symmetric": "false", - "array": [0], - "arrayminus": [minus_offset] + "array": [0, 0, 0], + "arrayminus": [0, minus_offset, 0] }, }, { "name": "Threshold", - "x": ["ptp4l_max_offset"], - "y": [defined_offset_threshold], - "mode": "lines+markers", + "x": ["-inf", "ptp4l_max_offset", "inf"], + "y": [defined_offset_threshold, defined_offset_threshold, defined_offset_threshold], + "mode": "lines", "line": { "dash": 'dot', "width": 3, @@ -78,8 +78,8 @@ def process_reboot(json_data: str): defined_threshold = 20 reboot_type = json_data["reboot_type"] for each_iteration in json_data["Iterations"]: - max_minutes = max(max_minutes, each_iteration["total_minutes"]) - avg_minutes += each_iteration["total_minutes"] + max_minutes = max(max_minutes, each_iteration.get("total_minutes", 0)) + avg_minutes += each_iteration.get("total_minutes", 0) avg_minutes /= len(json_data["Iterations"]) if max_minutes > defined_threshold: minus_max_minutes = max_minutes - defined_threshold @@ -108,7 +108,7 @@ def process_reboot(json_data: str): "name": "Threshold", "x": [reboot_type + "_" + "max_minutes", reboot_type + "_" + "avg_minutes"], "y": [defined_threshold, defined_threshold], - "mode": "lines+markers", + "mode": "lines", "marker": { "size": 15, }, @@ -131,9 +131,9 @@ def process_cpu_util(json_data: str): if each_scenario["scenario_name"] == "steadyworkload": for each_type in each_scenario["types"]: if each_type["type_name"] == "total": - total_max_cpu = each_type["max_cpu"] + total_max_cpu = each_type.get("max_cpu", 0) break - total_avg_cpu = each_scenario["avg_cpu_total"] + total_avg_cpu = each_scenario.get("avg_cpu_total", 0) break if total_max_cpu > defined_threshold: minus_max_cpu = total_max_cpu - defined_threshold @@ -162,7 +162,7 @@ def process_cpu_util(json_data: str): "name": "Threshold", "x": ["total_max_cpu", "total_avg_cpu"], "y": [defined_threshold, defined_threshold], - "mode": "lines+markers", + "mode": "lines", "marker": { "size": 15, }, @@ -176,7 +176,7 @@ def process_cpu_util(json_data: str): } def process_rfc_2544(json_data: str): - max_delay = json_data["max_delay"] + max_delay = json_data.get("max_delay", 0) defined_delay_threshold = 30.0 minus_max_delay = 0.0 if max_delay > defined_delay_threshold: @@ -185,8 +185,8 @@ def process_rfc_2544(json_data: str): return { "rfc-2544": [ { - "x": ["max_delay"], - "y": [max_delay], + "x": ["-inf", "max_delay", "inf"], + "y": [0, max_delay, 0], "mode": "markers", "marker": { "size": 10, @@ -195,16 +195,16 @@ def process_rfc_2544(json_data: str): "error_y": { "type": "data", "symmetric": "false", - "array": [0], - "arrayminus": [minus_max_delay] + "array": [0, 0, 0], + "arrayminus": [0, minus_max_delay, 0] }, "type": "scatter", }, { - "x": ["max_delay"], - "y": [defined_delay_threshold], + "x": ["-inf", "max_delay", "inf"], + "y": [defined_delay_threshold, defined_delay_threshold, defined_delay_threshold], "name": "Threshold", - "mode": "lines+markers", + "mode": "lines", "marker": { "size": 15, }, @@ -228,8 +228,8 @@ def process_cyclictest(json_data: str): } def process_deployment(json_data: str): - total_minutes = json_data["total_minutes"] - reboot_count = json_data["reboot_count"] + total_minutes = json_data.get("total_minutes", 0) + reboot_count = json_data.get("reboot_count", 0) defined_total_minutes_threshold = 180 defined_total_reboot_count = 3 minus_total_minutes = 0.0 @@ -244,8 +244,8 @@ def process_deployment(json_data: str): "total_minutes": [ { "name": "Data Points", - "x": ["total_minutes"], - "y": [total_minutes], + "x": ["-inf", "total_minutes", "inf"], + "y": [0, total_minutes, 0], "mode": "markers", "marker": { "size": 10, @@ -253,16 +253,16 @@ def process_deployment(json_data: str): "error_y": { "type": "data", "symmetric": "false", - "array": [0], - "arrayminus": [minus_total_minutes] + "array": [0, 0, 0], + "arrayminus": [0, minus_total_minutes, 0] }, "type": "scatter", }, { "name": "Threshold", - "x": ["total_minutes"], - "y": [defined_total_minutes_threshold], - "mode": "lines+markers", + "x": ["-inf", "total_minutes", "inf"], + "y": [defined_total_minutes_threshold, defined_total_minutes_threshold, defined_total_minutes_threshold], + "mode": "lines", "marker": { "size": 15, }, @@ -276,8 +276,8 @@ def process_deployment(json_data: str): "total_reboot_count": [ { "name": "Data Points", - "x": ["reboot_count"], - "y": [reboot_count], + "x": ["-inf", "reboot_count", "inf"], + "y": [0, reboot_count, 0], "mode": "markers", "marker": { "size": 10, @@ -285,16 +285,16 @@ def process_deployment(json_data: str): "error_y": { "type": "data", "symmetric": "false", - "array": [0], - "arrayminus": [minus_total_reboot_count] + "array": [0, 0, 0], + "arrayminus": [0, minus_total_reboot_count, 0] }, "type": "scatter", }, { "name": "Threshold", - "x": ["reboot_count"], - "y": [defined_total_reboot_count], - "mode": "lines+markers", + "x": ["-inf", "reboot_count", "inf"], + "y": [defined_total_reboot_count, defined_total_reboot_count, defined_total_reboot_count], + "mode": "lines", "marker": { "size": 15, }, @@ -315,8 +315,8 @@ def get_oslat_or_cyclictest(json_data: str): defined_latency_threshold = 20 defined_number_of_nines_threshold = 100 for each_test_unit in json_data["test_units"]: - max_latency = max(max_latency, each_test_unit["max_latency"]) - min_number_of_nines = min(min_number_of_nines, each_test_unit["number_of_nines"]) + max_latency = max(max_latency, each_test_unit.get("max_latency", 0)) + min_number_of_nines = min(min_number_of_nines, each_test_unit.get("number_of_nines", 0)) if max_latency > defined_latency_threshold: minus_max_latency = max_latency - defined_latency_threshold @@ -324,8 +324,8 @@ def get_oslat_or_cyclictest(json_data: str): "number_of_nines": [ { "name": "Data Points", - "x": ["min_number_of_nines"], - "y": [min_number_of_nines], + "x": ["-inf", "min_number_of_nines", "inf"], + "y": [0, min_number_of_nines, 0], "mode": "markers", "marker": { "size": 10, @@ -333,16 +333,16 @@ def get_oslat_or_cyclictest(json_data: str): "error_y": { "type": "data", "symmetric": "false", - "array": [0], - "arrayminus": [min_number_of_nines - defined_number_of_nines_threshold] + "array": [0, 0, 0], + "arrayminus": [0, min_number_of_nines - defined_number_of_nines_threshold, 0] }, "type": "scatter", }, { "name": "Threshold", - "x": ["min_number_of_nines"], - "y": [defined_number_of_nines_threshold], - "mode": "lines+markers", + "x": ["-inf", "min_number_of_nines", "inf"], + "y": [defined_number_of_nines_threshold, defined_number_of_nines_threshold, defined_number_of_nines_threshold], + "mode": "lines", "marker": { "size": 15, }, @@ -356,8 +356,8 @@ def get_oslat_or_cyclictest(json_data: str): "max_latency": [ { "name": "Data Points", - "x": ["max_latency"], - "y": [max_latency], + "x": ["-inf", "max_latency", "inf"], + "y": [0, max_latency, 0], "mode": "markers", "marker": { "size": 10, @@ -365,16 +365,16 @@ def get_oslat_or_cyclictest(json_data: str): "error_y": { "type": "data", "symmetric": "false", - "array": [0], - "arrayminus": [minus_max_latency] + "array": [0, 0, 0], + "arrayminus": [0, minus_max_latency, 0] }, "type": "scatter", }, { "name": "Threshold", - "x": ["max_latency"], - "y": [defined_latency_threshold], - "mode": "lines+markers", + "x": ["-inf", "max_latency", "inf"], + "y": [defined_latency_threshold, defined_latency_threshold, defined_latency_threshold], + "mode": "lines", "marker": { "size": 15, }, diff --git a/backend/app/services/splunk.py b/backend/app/services/splunk.py index 0d25c25b..aea7efb0 100644 --- a/backend/app/services/splunk.py +++ b/backend/app/services/splunk.py @@ -1,7 +1,5 @@ import orjson from app import config -from multiprocessing import Pool, cpu_count -from concurrent.futures import ProcessPoolExecutor, as_completed from splunklib import client, results From 3c9fe8e2ad4d831f193669ba2c2e27ccdae2131f Mon Sep 17 00:00:00 2001 From: Vishnu Challa Date: Sat, 22 Jun 2024 13:21:43 -0400 Subject: [PATCH 2/2] Updating job status based on threshold Signed-off-by: Vishnu Challa --- backend/app/api/v1/commons/telco.py | 4 +- backend/app/api/v1/endpoints/quay/quayJobs.py | 2 +- .../app/api/v1/endpoints/telco/telcoGraphs.py | 531 +++++++++--------- .../app/api/v1/endpoints/telco/telcoJobs.py | 2 +- 4 files changed, 278 insertions(+), 261 deletions(-) diff --git a/backend/app/api/v1/commons/telco.py b/backend/app/api/v1/commons/telco.py index 49656b9a..6f4f9a8d 100644 --- a/backend/app/api/v1/commons/telco.py +++ b/backend/app/api/v1/commons/telco.py @@ -5,6 +5,7 @@ import app.api.v1.commons.hasher as hasher from datetime import datetime, timezone import app.api.v1.commons.utils as utils +import app.api.v1.endpoints.telco.telcoGraphs as telcoGraphs async def getData(start_datetime: date, end_datetime: date, configpath: str): @@ -36,6 +37,7 @@ async def getData(start_datetime: date, end_datetime: date, configpath: str): for each_response in response: end_timestamp = int(each_response['timestamp']) test_data = each_response['data'] + threshold = await telcoGraphs.process_json(test_data, True) hash_digest, encrypted_data = hasher.hash_encrypt_json(each_response) execution_time_seconds = test_type_execution_times.get(test_data['test_type'], 0) start_timestamp = end_timestamp - execution_time_seconds @@ -58,7 +60,7 @@ async def getData(start_datetime: date, end_datetime: date, configpath: str): "startDate": str(start_time_utc), "endDate": str(end_time_utc), "buildUrl": jenkins_url + "/" + str(test_data['cluster_artifacts']['ref']['jenkins_build']), - "jobStatus": "success", + "jobStatus": "failure" if (threshold != 0) else "success", "jobDuration": execution_time_seconds, }) diff --git a/backend/app/api/v1/endpoints/quay/quayJobs.py b/backend/app/api/v1/endpoints/quay/quayJobs.py index 5d57a919..e3e8bf22 100644 --- a/backend/app/api/v1/endpoints/quay/quayJobs.py +++ b/backend/app/api/v1/endpoints/quay/quayJobs.py @@ -25,7 +25,7 @@ async def jobs(start_date: date = Query(None, description="Start date for search pretty: bool = Query(False, description="Output contet in pretty format.")): if start_date is None: start_date = datetime.utcnow().date() - start_date = start_date - timedelta(days=7) + start_date = start_date - timedelta(days=5) if end_date is None: end_date = datetime.utcnow().date() diff --git a/backend/app/api/v1/endpoints/telco/telcoGraphs.py b/backend/app/api/v1/endpoints/telco/telcoGraphs.py index 0ec1b93e..41b90cc1 100644 --- a/backend/app/api/v1/endpoints/telco/telcoGraphs.py +++ b/backend/app/api/v1/endpoints/telco/telcoGraphs.py @@ -8,9 +8,9 @@ async def graph(uuid: str, encryptedData: str): bytesData = encryptedData.encode("utf-8") decrypted_data = hasher.decrypt_unhash_json(uuid, bytesData) json_data = decrypted_data["data"] - return await process_json(json_data) + return await process_json(json_data, False) -async def process_json(json_data: dict): +async def process_json(json_data: dict, is_row: bool): function_mapper = { "ptp": process_ptp, "oslat": process_oslat, @@ -21,9 +21,9 @@ async def process_json(json_data: dict): "deployment": process_deployment, } mapped_function = function_mapper.get(json_data["test_type"]) - return mapped_function(json_data) + return mapped_function(json_data, is_row) -def process_ptp(json_data: str): +def process_ptp(json_data: str, is_row: bool): nic = json_data["nic"] ptp4l_max_offset = json_data.get("ptp4l_max_offset", 0) if "mellanox" in nic.lower(): @@ -34,43 +34,45 @@ def process_ptp(json_data: str): if ptp4l_max_offset > defined_offset_threshold: minus_offset = ptp4l_max_offset - defined_offset_threshold - return { - "ptp": [ - { - "name": "Data Points", - "x": ["-inf", "ptp4l_max_offset", "inf"], - "y": [0, ptp4l_max_offset, 0], - "mode": "markers", - "marker": { - "size": 10, - }, - "error_y": { - "type": "data", - "symmetric": "false", - "array": [0, 0, 0], - "arrayminus": [0, minus_offset, 0] - }, - - }, - { - "name": "Threshold", - "x": ["-inf", "ptp4l_max_offset", "inf"], - "y": [defined_offset_threshold, defined_offset_threshold, defined_offset_threshold], - "mode": "lines", - "line": { - "dash": 'dot', - "width": 3, - }, - "marker": { - "size": 15, + if is_row: + return minus_offset + else: + return { + "ptp": [ + { + "name": "Data Points", + "x": ["-inf", "ptp4l_max_offset", "inf"], + "y": [0, ptp4l_max_offset, 0], + "mode": "markers", + "marker": { + "size": 10, + }, + "error_y": { + "type": "data", + "symmetric": "false", + "array": [0, 0, 0], + "arrayminus": [0, minus_offset, 0] + }, + }, - "type": "scatter", - } - ] - } - + { + "name": "Threshold", + "x": ["-inf", "ptp4l_max_offset", "inf"], + "y": [defined_offset_threshold, defined_offset_threshold, defined_offset_threshold], + "mode": "lines", + "line": { + "dash": 'dot', + "width": 3, + }, + "marker": { + "size": 15, + }, + "type": "scatter", + } + ] + } -def process_reboot(json_data: str): +def process_reboot(json_data: str, is_row: bool): max_minutes = 0.0 avg_minutes = 0.0 minus_max_minutes = 0.0 @@ -85,43 +87,46 @@ def process_reboot(json_data: str): minus_max_minutes = max_minutes - defined_threshold if avg_minutes > defined_threshold: minus_avg_minutes = avg_minutes - defined_threshold - - return { - "reboot": [ - { - "name": "Data Points", - "x": [reboot_type + "_" + "max_minutes", reboot_type + "_" + "avg_minutes"], - "y": [max_minutes, avg_minutes], - "mode": "markers", - "marker": { - "size": 10, - }, - "error_y": { - "type": "data", - "symmetric": "false", - "array": [0, 0], - "arrayminus": [minus_max_minutes, minus_avg_minutes] - }, - "type": "scatter", - }, - { - "name": "Threshold", - "x": [reboot_type + "_" + "max_minutes", reboot_type + "_" + "avg_minutes"], - "y": [defined_threshold, defined_threshold], - "mode": "lines", - "marker": { - "size": 15, - }, - "line": { - "dash": "dot", - "width": 3, + + if is_row: + return 1 if (minus_avg_minutes != 0 or minus_max_minutes != 0) else 0 + else: + return { + "reboot": [ + { + "name": "Data Points", + "x": [reboot_type + "_" + "max_minutes", reboot_type + "_" + "avg_minutes"], + "y": [max_minutes, avg_minutes], + "mode": "markers", + "marker": { + "size": 10, + }, + "error_y": { + "type": "data", + "symmetric": "false", + "array": [0, 0], + "arrayminus": [minus_max_minutes, minus_avg_minutes] + }, + "type": "scatter", }, - "type": "scatter", - } - ] - } + { + "name": "Threshold", + "x": [reboot_type + "_" + "max_minutes", reboot_type + "_" + "avg_minutes"], + "y": [defined_threshold, defined_threshold], + "mode": "lines", + "marker": { + "size": 15, + }, + "line": { + "dash": "dot", + "width": 3, + }, + "type": "scatter", + } + ] + } -def process_cpu_util(json_data: str): +def process_cpu_util(json_data: str, is_row: bool): total_max_cpu = 0.0 total_avg_cpu = 0.0 minus_max_cpu = 0.0 @@ -139,113 +144,16 @@ def process_cpu_util(json_data: str): minus_max_cpu = total_max_cpu - defined_threshold if total_avg_cpu > defined_threshold: minus_avg_cpu = total_avg_cpu - defined_threshold - - return { - "cpu_util": [ - { - "name": "Data Points", - "x": ["total_max_cpu", "total_avg_cpu"], - "y": [total_max_cpu, total_avg_cpu], - "mode": "markers", - "marker": { - "size": 10, - }, - "error_y": { - "type": "data", - "symmetric": "false", - "array": [0, 0], - "arrayminus": [minus_max_cpu, minus_avg_cpu] - }, - "type": "scatter", - }, - { - "name": "Threshold", - "x": ["total_max_cpu", "total_avg_cpu"], - "y": [defined_threshold, defined_threshold], - "mode": "lines", - "marker": { - "size": 15, - }, - "line": { - "dash": "dot", - "width": 3, - }, - "type": "scatter", - } - ] - } - -def process_rfc_2544(json_data: str): - max_delay = json_data.get("max_delay", 0) - defined_delay_threshold = 30.0 - minus_max_delay = 0.0 - if max_delay > defined_delay_threshold: - minus_max_delay = max_delay - defined_delay_threshold - - return { - "rfc-2544": [ - { - "x": ["-inf", "max_delay", "inf"], - "y": [0, max_delay, 0], - "mode": "markers", - "marker": { - "size": 10, - }, - "name": "Data Points", - "error_y": { - "type": "data", - "symmetric": "false", - "array": [0, 0, 0], - "arrayminus": [0, minus_max_delay, 0] - }, - "type": "scatter", - }, - { - "x": ["-inf", "max_delay", "inf"], - "y": [defined_delay_threshold, defined_delay_threshold, defined_delay_threshold], - "name": "Threshold", - "mode": "lines", - "marker": { - "size": 15, - }, - "line": { - "dash": "dot", - "width": 3, - }, - "type": "scatter" - } - ] - } - -def process_oslat(json_data: str): - return { - "oslat": get_oslat_or_cyclictest(json_data) - } - -def process_cyclictest(json_data: str): - return { - "cyclictest": get_oslat_or_cyclictest(json_data) - } - -def process_deployment(json_data: str): - total_minutes = json_data.get("total_minutes", 0) - reboot_count = json_data.get("reboot_count", 0) - defined_total_minutes_threshold = 180 - defined_total_reboot_count = 3 - minus_total_minutes = 0.0 - minus_total_reboot_count = 0.0 - if total_minutes > defined_total_minutes_threshold: - minus_total_minutes = total_minutes - defined_total_minutes_threshold - if reboot_count > defined_total_reboot_count: - minus_total_reboot_count = reboot_count - defined_total_reboot_count - return { - "deployment": { - "total_minutes": [ + if is_row: + return 1 if (minus_avg_cpu != 0 or minus_max_cpu != 0) else 0 + else: + return { + "cpu_util": [ { "name": "Data Points", - "x": ["-inf", "total_minutes", "inf"], - "y": [0, total_minutes, 0], + "x": ["total_max_cpu", "total_avg_cpu"], + "y": [total_max_cpu, total_avg_cpu], "mode": "markers", "marker": { "size": 10, @@ -253,15 +161,15 @@ def process_deployment(json_data: str): "error_y": { "type": "data", "symmetric": "false", - "array": [0, 0, 0], - "arrayminus": [0, minus_total_minutes, 0] + "array": [0, 0], + "arrayminus": [minus_max_cpu, minus_avg_cpu] }, "type": "scatter", }, { "name": "Threshold", - "x": ["-inf", "total_minutes", "inf"], - "y": [defined_total_minutes_threshold, defined_total_minutes_threshold, defined_total_minutes_threshold], + "x": ["total_max_cpu", "total_avg_cpu"], + "y": [defined_threshold, defined_threshold], "mode": "lines", "marker": { "size": 15, @@ -272,28 +180,41 @@ def process_deployment(json_data: str): }, "type": "scatter", } - ], - "total_reboot_count": [ + ] + } + +def process_rfc_2544(json_data: str, is_row: bool): + max_delay = json_data.get("max_delay", 0) + defined_delay_threshold = 30.0 + minus_max_delay = 0.0 + if max_delay > defined_delay_threshold: + minus_max_delay = max_delay - defined_delay_threshold + + if is_row: + return minus_max_delay + else: + return { + "rfc-2544": [ { - "name": "Data Points", - "x": ["-inf", "reboot_count", "inf"], - "y": [0, reboot_count, 0], + "x": ["-inf", "max_delay", "inf"], + "y": [0, max_delay, 0], "mode": "markers", "marker": { "size": 10, }, + "name": "Data Points", "error_y": { "type": "data", "symmetric": "false", "array": [0, 0, 0], - "arrayminus": [0, minus_total_reboot_count, 0] + "arrayminus": [0, minus_max_delay, 0] }, "type": "scatter", }, { + "x": ["-inf", "max_delay", "inf"], + "y": [defined_delay_threshold, defined_delay_threshold, defined_delay_threshold], "name": "Threshold", - "x": ["-inf", "reboot_count", "inf"], - "y": [defined_total_reboot_count, defined_total_reboot_count, defined_total_reboot_count], "mode": "lines", "marker": { "size": 15, @@ -302,13 +223,104 @@ def process_deployment(json_data: str): "dash": "dot", "width": 3, }, - "type": "scatter", + "type": "scatter" } ] } - } + +def process_oslat(json_data: str, is_row: bool): + result = get_oslat_or_cyclictest(json_data, is_row) + return result if is_row else { "oslat": result } + +def process_cyclictest(json_data: str, is_row: bool): + result = get_oslat_or_cyclictest(json_data, is_row) + return result if is_row else { "cyclictest": result } -def get_oslat_or_cyclictest(json_data: str): +def process_deployment(json_data: str, is_row: bool): + total_minutes = json_data.get("total_minutes", 0) + reboot_count = json_data.get("reboot_count", 0) + defined_total_minutes_threshold = 180 + defined_total_reboot_count = 3 + minus_total_minutes = 0.0 + minus_total_reboot_count = 0.0 + if total_minutes > defined_total_minutes_threshold: + minus_total_minutes = total_minutes - defined_total_minutes_threshold + if reboot_count > defined_total_reboot_count: + minus_total_reboot_count = reboot_count - defined_total_reboot_count + + if is_row: + return 1 if (minus_total_minutes != 0 or minus_total_reboot_count != 0) else 0 + else: + return { + "deployment": { + "total_minutes": [ + { + "name": "Data Points", + "x": ["-inf", "total_minutes", "inf"], + "y": [0, total_minutes, 0], + "mode": "markers", + "marker": { + "size": 10, + }, + "error_y": { + "type": "data", + "symmetric": "false", + "array": [0, 0, 0], + "arrayminus": [0, minus_total_minutes, 0] + }, + "type": "scatter", + }, + { + "name": "Threshold", + "x": ["-inf", "total_minutes", "inf"], + "y": [defined_total_minutes_threshold, defined_total_minutes_threshold, defined_total_minutes_threshold], + "mode": "lines", + "marker": { + "size": 15, + }, + "line": { + "dash": "dot", + "width": 3, + }, + "type": "scatter", + } + ], + "total_reboot_count": [ + { + "name": "Data Points", + "x": ["-inf", "reboot_count", "inf"], + "y": [0, reboot_count, 0], + "mode": "markers", + "marker": { + "size": 10, + }, + "error_y": { + "type": "data", + "symmetric": "false", + "array": [0, 0, 0], + "arrayminus": [0, minus_total_reboot_count, 0] + }, + "type": "scatter", + }, + { + "name": "Threshold", + "x": ["-inf", "reboot_count", "inf"], + "y": [defined_total_reboot_count, defined_total_reboot_count, defined_total_reboot_count], + "mode": "lines", + "marker": { + "size": 15, + }, + "line": { + "dash": "dot", + "width": 3, + }, + "type": "scatter", + } + ] + } + } + +def get_oslat_or_cyclictest(json_data: str, is_row: bool): min_number_of_nines = 10000 max_latency = 0 minus_max_latency = 0 @@ -319,70 +331,73 @@ def get_oslat_or_cyclictest(json_data: str): min_number_of_nines = min(min_number_of_nines, each_test_unit.get("number_of_nines", 0)) if max_latency > defined_latency_threshold: minus_max_latency = max_latency - defined_latency_threshold - - return { - "number_of_nines": [ - { - "name": "Data Points", - "x": ["-inf", "min_number_of_nines", "inf"], - "y": [0, min_number_of_nines, 0], - "mode": "markers", - "marker": { - "size": 10, - }, - "error_y": { - "type": "data", - "symmetric": "false", - "array": [0, 0, 0], - "arrayminus": [0, min_number_of_nines - defined_number_of_nines_threshold, 0] - }, - "type": "scatter", - }, - { - "name": "Threshold", - "x": ["-inf", "min_number_of_nines", "inf"], - "y": [defined_number_of_nines_threshold, defined_number_of_nines_threshold, defined_number_of_nines_threshold], - "mode": "lines", - "marker": { - "size": 15, - }, - "line": { - "dash": "dot", - "width": 3, - }, - "type": "scatter", - } - ], - "max_latency": [ - { - "name": "Data Points", - "x": ["-inf", "max_latency", "inf"], - "y": [0, max_latency, 0], - "mode": "markers", - "marker": { - "size": 10, - }, - "error_y": { - "type": "data", - "symmetric": "false", - "array": [0, 0, 0], - "arrayminus": [0, minus_max_latency, 0] - }, - "type": "scatter", - }, - { - "name": "Threshold", - "x": ["-inf", "max_latency", "inf"], - "y": [defined_latency_threshold, defined_latency_threshold, defined_latency_threshold], - "mode": "lines", - "marker": { - "size": 15, + + if is_row: + return 1 if ((min_number_of_nines - defined_number_of_nines_threshold) != 0 or minus_max_latency != 0) else 0 + else: + return { + "number_of_nines": [ + { + "name": "Data Points", + "x": ["-inf", "min_number_of_nines", "inf"], + "y": [0, min_number_of_nines, 0], + "mode": "markers", + "marker": { + "size": 10, + }, + "error_y": { + "type": "data", + "symmetric": "false", + "array": [0, 0, 0], + "arrayminus": [0, min_number_of_nines - defined_number_of_nines_threshold, 0] + }, + "type": "scatter", }, - "line": { - "dash": "dot", - "width": 3, + { + "name": "Threshold", + "x": ["-inf", "min_number_of_nines", "inf"], + "y": [defined_number_of_nines_threshold, defined_number_of_nines_threshold, defined_number_of_nines_threshold], + "mode": "lines", + "marker": { + "size": 15, + }, + "line": { + "dash": "dot", + "width": 3, + }, + "type": "scatter", + } + ], + "max_latency": [ + { + "name": "Data Points", + "x": ["-inf", "max_latency", "inf"], + "y": [0, max_latency, 0], + "mode": "markers", + "marker": { + "size": 10, + }, + "error_y": { + "type": "data", + "symmetric": "false", + "array": [0, 0, 0], + "arrayminus": [0, minus_max_latency, 0] + }, + "type": "scatter", }, - "type": "scatter", - } - ] - } + { + "name": "Threshold", + "x": ["-inf", "max_latency", "inf"], + "y": [defined_latency_threshold, defined_latency_threshold, defined_latency_threshold], + "mode": "lines", + "marker": { + "size": 15, + }, + "line": { + "dash": "dot", + "width": 3, + }, + "type": "scatter", + } + ] + } diff --git a/backend/app/api/v1/endpoints/telco/telcoJobs.py b/backend/app/api/v1/endpoints/telco/telcoJobs.py index a6c688a6..fc2044de 100644 --- a/backend/app/api/v1/endpoints/telco/telcoJobs.py +++ b/backend/app/api/v1/endpoints/telco/telcoJobs.py @@ -25,7 +25,7 @@ async def jobs(start_date: date = Query(None, description="Start date for search pretty: bool = Query(False, description="Output content in pretty format.")): if start_date is None: start_date = datetime.utcnow().date() - start_date = start_date - timedelta(days=7) + start_date = start_date - timedelta(days=5) if end_date is None: end_date = datetime.utcnow().date()