-
Notifications
You must be signed in to change notification settings - Fork 0
/
evaluation.py
43 lines (34 loc) · 1.14 KB
/
evaluation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
"""
@author Jack Ringer
Date: 4/26/2023
Description:
Contains code to evaluate .csv files generated by parameter_sweep.py
"""
import pandas as pd
import os
def get_accuracy(df: pd.DataFrame):
"""
Get accuracy of a given dataframe (# correct consensus / total runs)
:param df: pd.DataFrame, results generated by parameter_sweep.py
:return: float, accuracy
"""
num_correct = len(df[df["start"] == df["consensus"]])
total_runs = len(df)
return num_correct / total_runs
def print_accuracies(df: pd.DataFrame):
weight_vals = ["0.43 0.57", "0.46 0.54", "0.49 0.51"]
for weights in weight_vals:
sub_df = df.loc[df["weights"] == weights]
accuracy = get_accuracy(sub_df)
avg_iterations = sub_df["iterations"].mean()
print(f"Accuracy for weights {weights}: %.4f" % accuracy)
print("Avg # of iterations: %.2f\n" % avg_iterations)
def main():
for csv_file in os.listdir("results"):
csv_path = os.path.join("results", csv_file)
df = pd.read_csv(csv_path)
print(csv_file)
print_accuracies(df)
print("-" * 60)
if __name__ == "__main__":
main()