Skip to content

Commit

Permalink
push
Browse files Browse the repository at this point in the history
  • Loading branch information
sousinha1997 committed Aug 27, 2024
1 parent 0e7097d commit 22a8614
Show file tree
Hide file tree
Showing 22 changed files with 584 additions and 373 deletions.
12 changes: 10 additions & 2 deletions quisby/benchmarks/auto_hpl/extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,21 @@
from quisby.pricing import cloud_pricing
from quisby.benchmarks.linpack.extract import linpack_format_data

from quisby.util import read_config


def extract_auto_hpl_data(path, system_name):

summary_data = []
server = read_config("server", "name")
result_dir = read_config("server", "result_dir")

if path.endswith(".csv"):
with open(path) as file:
results = []
file_data = file.readlines()
sum_path = path.split("/./")[1]
summary_data.append([system_name, "http://" + server + "/results/" + result_dir + "/" + sum_path])

if len(file_data) > 1:
header_row = file_data[-2].strip().split(":")
Expand All @@ -24,8 +32,8 @@ def extract_auto_hpl_data(path, system_name):
)

if results:
return results
return results, summary_data

else:
return None
return None, None

20 changes: 11 additions & 9 deletions quisby/benchmarks/coremark/compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,9 @@ def extract_prefix_and_number(input_string):
match = re.search(r'^(.*?)(\d+)(.*?)$', input_string)
if match:
prefix = match.group(1)
return prefix
return None
suffix = match.group(3) # Extracts the suffix after the number
return prefix, suffix
return None, None


def compare_inst(item1, item2):
Expand All @@ -33,7 +34,7 @@ def compare_inst(item1, item2):
return extract_prefix_and_number(item1) == extract_prefix_and_number(item2)


def compare_coremark_results(spreadsheets, spreadsheetId, test_name, table_name=["System name","Price/perf"]):
def compare_coremark_results(spreadsheets, spreadsheetId, test_name, table_name=["System name","Price-perf"]):
values = []
results = []
spreadsheet_name = []
Expand All @@ -51,12 +52,13 @@ def compare_coremark_results(spreadsheets, spreadsheetId, test_name, table_name=
for ele in list_2:
# Check max throughput
if value[0][0] in table_name and ele[0][0] in table_name and value[0][0] == ele[0][0]:
results.append([""])
for item1 in value:
for item2 in ele:
if item1[0] == item2[0]:
results = merge_lists_alternately(results, item1, item2)
break
if compare_inst(value[1][0], ele[1][0]):
results.append([""])
for item1 in value:
for item2 in ele:
if item1[0] == item2[0]:
results = merge_lists_alternately(results, item1, item2)
break

elif value[0][0] == "Cost/Hr" and ele[0][0] == "Cost/Hr":
if compare_inst(value[1][0], ele[1][0]):
Expand Down
113 changes: 76 additions & 37 deletions quisby/benchmarks/coremark/coremark.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,17 @@

""" Custom key to sort the data base don instance name """
from itertools import groupby

from quisby import custom_logger
import re

from quisby.util import read_config
from quisby.pricing.cloud_pricing import get_cloud_pricing

from quisby.util import process_instance

from quisby.util import mk_int


def extract_prefix_and_number(input_string):
match = re.search(r'^(.*?)(\d+)(.*?)$', input_string)
Expand All @@ -18,27 +24,29 @@ def extract_prefix_and_number(input_string):


def custom_key(item):
cloud_type = read_config("cloud","cloud_type")
cloud_type = read_config("cloud", "cloud_type")
if item[1][0] == "localhost":
return item[1][0]
elif cloud_type == "aws":
instance_type =item[1][0].split(".")[0]
instance_number = item[1][0].split(".")[1]
instance_name = item[1][0]
instance_type = instance_name.split(".")[0]
instance_number = instance_name.split(".")[1]
return instance_type, instance_number
elif cloud_type == "gcp":
instance_type = item[1][0].split("-")[0]
instance_number = int(item[1][0].split('-')[-1])
return instance_type, instance_number
elif cloud_type == "azure":
instance_type, instance_number, version=extract_prefix_and_number(item[1][0])
return instance_type, instance_number
instance_type, instance_number, version = extract_prefix_and_number(item[1][0])
return instance_type, version, instance_number


def calc_price_performance(inst, avg):
region = read_config("cloud", "region")
cloud_type = read_config("cloud", "cloud_type")
os_type = read_config("test", "os_type")
cost_per_hour = None
price_perf = 0.0
try:
cost_per_hour = get_cloud_pricing(
inst, region, cloud_type.lower(), os_type)
Expand All @@ -49,53 +57,84 @@ def calc_price_performance(inst, avg):
return cost_per_hour, price_perf


def create_summary_coremark_data(results, OS_RELEASE):
def group_data(results):
cloud_type = read_config("cloud", "cloud_type")
if cloud_type == "aws":
return groupby(results, key=lambda x: process_instance(x[1][0], "family", "version", "feature", "machine_type"))
elif cloud_type == "azure":
results = sorted(results, key=lambda x: process_instance(x[1][0], "family", "feature"))
return groupby(results, key=lambda x: process_instance(x[1][0], "family", "feature"))
elif cloud_type == "gcp":
return groupby(results, key=lambda x: process_instance(x[1][0], "family", "version","sub_family","feature"))


def sort_data(results):
cloud_type = read_config("cloud", "cloud_type")
if cloud_type == "aws":
results.sort(key=lambda x: str(process_instance(x[1][0], "family")))
elif cloud_type == "azure":
results.sort(key=lambda x: str(process_instance(x[1][0], "family", "version", "feature")))
elif cloud_type == "gcp":
results.sort(key=lambda x: str(process_instance(x[1][0], "family", "version", "sub_family")))

def create_summary_coremark_data(results, OS_RELEASE, sorted_results=None):
final_results = []
cal_data = [["System name", "Test_passes-"+OS_RELEASE]]

# Sort data based on instance name
sorted_data = sorted(results, key=custom_key)
cost_per_hour, price_per_perf = [], []

# Add summary data
for item in sorted_data:
sum = 0
avg = 0
iterations = 0
for index in range(3, len(item)):
sum = sum + float(item[index][1])
iterations = iterations + 1
avg = float(sum/iterations)
try:
cph, pp = calc_price_performance(item[1][0], avg)
except Exception as exc:
custom_logger.error(str(exc))
break
cal_data.append([item[1][0], avg])
price_per_perf.append([item[1][0], pp])
cost_per_hour.append([item[1][0], cph])

final_results += [[""]]
final_results += cal_data
final_results.append([""])
final_results.append(["Cost/Hr"])
final_results += cost_per_hour
final_results.append([""])
final_results.append(["Price/perf", f"Passes/$-{OS_RELEASE}"])
final_results += price_per_perf
results = list(filter(None, results))
sort_data(results)

for _, items in group_data(results):
cal_data = [["System name", "Test_passes-" + OS_RELEASE]]
items = list(items)
sorted_data = sorted(items, key=lambda x: mk_int(process_instance(x[1][0], "size")))
# sorted_results.extend(sorted_data)
cost_per_hour, price_per_perf = [], []

# Add summary data
for item in sorted_data:
sum = 0
avg = 0
iterations = 0
for index in range(3, len(item)):
sum = sum + float(item[index][1])
iterations = iterations + 1
avg = float(sum/iterations)
try:
cph, pp = calc_price_performance(item[1][0], avg)
except Exception as exc:
custom_logger.error(str(exc))
break
cal_data.append([item[1][0], avg])
price_per_perf.append([item[1][0], pp])
cost_per_hour.append([item[1][0], cph])
sorted_results=[[""]]
sorted_results += cal_data
sorted_results.append([""])
sorted_results.append(["Cost/Hr"])
sorted_results += cost_per_hour
sorted_results.append([""])
sorted_results.append(["Price-perf", f"Passes/$-{OS_RELEASE}"])
sorted_results += price_per_perf
final_results.extend(sorted_results)
return final_results


def extract_coremark_data(path, system_name, OS_RELEASE):
""""""
results = []
processed_data =[]
summary_data = []
server = read_config("server", "name")
result_dir = read_config("server", "result_dir")

# Extract data from file
try:
if path.endswith(".csv"):
with open(path) as file:
coremark_results = file.readlines()
sum_path = path.split("/./")[1]
summary_data.append([system_name, "http://" + server + "/results/" + result_dir + "/" + sum_path])
else:
return None
except Exception as exc:
Expand All @@ -118,7 +157,7 @@ def extract_coremark_data(path, system_name, OS_RELEASE):
iteration = iteration + 1
results.append(processed_data)

return results
return results, summary_data



4 changes: 2 additions & 2 deletions quisby/benchmarks/coremark/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def graph_coremark_data(spreadsheetId, range, action):
header_row.extend(row)
title = "%s : %s" % (range, "Test Passes")
subtitle = "Average Test Passes"
elif "Price/perf" in row:
elif "Price-perf" in row:
start_index = index
header_row.extend(row)
title = "%s : %s" % (range, "Price-Performance")
Expand All @@ -133,7 +133,7 @@ def graph_coremark_data(spreadsheetId, range, action):
"chart": {
"spec": {
"title": title,
"subtitle": subtitle,
"subtitle": subtitle + " : "+graph_data[1][0],
"basicChart": {
"chartType": "COMBO",
"legendPosition": "RIGHT_LEGEND",
Expand Down
Loading

0 comments on commit 22a8614

Please sign in to comment.