Skip to content

Commit

Permalink
Validations can now contain multiple metrics (see #20)
Browse files Browse the repository at this point in the history
  • Loading branch information
jm-cc committed Sep 23, 2020
1 parent 816a628 commit 35a56ad
Show file tree
Hide file tree
Showing 4 changed files with 135 additions and 29 deletions.
36 changes: 23 additions & 13 deletions gcvb/dashboard/apps/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,19 +42,29 @@ def data_preparation(run, test_id):
job.fill_at_job_creation_validation(ajc, validation,
loader.data_root, test["data"],
loader.config, loader.references)
v = {}
d["metrics"].append(v)
v["id"]=validation["id"]
v["type"]=validation.get("type","file_comparison")
v["tolerance"]=validation["tolerance"]
v["distance"]=run_summary["metrics"].get(v["id"],"N/A") #Todo, it's ok only for file comparison...
if v["distance"]=="N/A":
data["status"]="failure"
elif float(v["distance"])>float(v["tolerance"]):
data["status"]="failure"
v["from_results"] = [{"id" : f["id"],
"file" : job.format_launch_command(f["file"], loader.config, ajc)}
for f in validation.get("serve_from_results",[])]
for metric in validation.get("Metrics",[]):
v = {}
d["metrics"].append(v)
v["id"]=metric["id"]
v["type"]=metric.get("type","absolute" if validation["type"]=="file_comparison" else "relative")
v["tolerance"]=metric["tolerance"]
if v["type"] == "absolute":
v["distance"]=run_summary["metrics"].get(v["id"],"N/A")
else:
if v["id"] in run_summary["metrics"]:
if isinstance(metric["reference"], dict):
v["distance"] = "N/A" # no support for configuration dependent metric yet
else:
v["distance"] = abs(float(run_summary["metrics"][v["id"]]) - float(metric["reference"])) / float(metric["reference"])
else:
v["distance"] = "N/A"
if v["distance"]=="N/A":
data["status"]="failure"
elif float(v["distance"])>float(v["tolerance"]):
data["status"]="failure"
v["from_results"] = [{"id" : f["id"],
"file" : job.format_launch_command(f["file"], loader.config, ajc)}
for f in validation.get("serve_from_results",[])]
return data

#Content
Expand Down
2 changes: 1 addition & 1 deletion gcvb/job.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def fill_at_job_creation_task(at_job_creation, task, full_id, config):
return None

def fill_at_job_creation_validation(at_job_creation, validation, data_root, ref_data, config, valid):
at_job_creation["va_id"]=validation["id"]
at_job_creation["va_id"]=validation["Metrics"][0]["id"]
at_job_creation["va_executable"]=validation["executable"]
if validation["type"]=="file_comparison":
#specific values for file comparison
Expand Down
92 changes: 92 additions & 0 deletions gcvb/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
from enum import IntEnum
class JobStatus(IntEnum):
unlinked = -4
pending = -3
ready = -2
running = -1

class AbsoluteMetric:
def __init__(self, reference, tolerance, unit = None):
self.type = "absolute"
self.reference = reference
self.tolerance = tolerance
self.unit = unit
def within_tolerance(self, value):
return abs(value - self.reference) <= self.tolerance

class RelativeMetric:
def __init__(self, reference, tolerance):
self.type = "relative"
self.reference = reference
self.tolerance = tolerance
def within_tolerance(self, value):
return (abs(value - self.reference) / self.reference) <= self.tolerance

class Validation:
default_type = "relative"
default_reference = None
def __init__(self, valid_dict, config):
self.raw_dict = valid_dict
self.status = JobStatus.unlinked
self.executable = valid_dict["executable"]
self.type = valid_dict["type"]
self.launch_command = valid_dict["launch_command"]
self.recorded_metrics = {}
self.init_metrics(config)

def init_metrics(self, config):
self.expected_metrics = {}
for metric in self.raw_dict.get("Metrics", []):
t = metric.get("type", self.default_type)
if t not in ["relative", "absolute"]:
raise ValueError("'type' must be 'relative' or 'absolute'.")
#reference is either a dict or a number.
ref = metric.get("reference", self.default_reference)
if ref is None:
raise ValueError("'reference' must be provided.")
if isinstance(ref, dict):
if config in ref:
if t == "relative":
self.expected_metrics[metric["id"]] = RelativeMetric(ref[config], metric["tolerance"])
else:
self.expected_metrics[metric["id"]] = AbsoluteMetric(ref[config], metric["tolerance"])
else:
if t == "relative":
self.expected_metrics[metric["id"]] = RelativeMetric(ref, metric["tolerance"])
else:
self.expected_metrics[metric["id"]] = AbsoluteMetric(ref, metric["tolerance"])

class FileComparisonValidation(Validation):
default_type = "absolute"
default_reference = 0

class Task():
def __init__(self, task_dict, config):
self.raw_dict = task_dict
self.status = JobStatus.unlinked
self.exectuable = task_dict["executable"]
self.options = task_dict.get("options", '')
self.launch_command = task_dict["launch_command"]
self.nprocs = task_dict["nprocs"]
self.nthreads = task_dict["nthreads"]
# Validations
self.Validations = []
for v in task_dict.get("Validations", []):
if v["type"] == "script":
self.Validations.append(Validation(v, config))
else:
self.Validations.append(FileComparisonValidation(v, config))

class Test():
def __init__(self, test_dict, config):
self.raw_dict = test_dict
# Tasks
self.Tasks = []
for t in test_dict.get("Tasks"):
self.Tasks.append(Task(t, config))
# Steps
self.Steps = []
for t in self.Tasks:
self.Steps.append(t)
for v in t.Validations:
self.Steps.append(t)
34 changes: 19 additions & 15 deletions gcvb/validation.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,13 @@ def __init__(self, validation_base, run_base, configuration=None):
self.validation_base[t["id"]]={}
for ta in t["Tasks"]:
for v in ta.get("Validations",[]):
d={}
self.validation_base[t["id"]][v["id"]]=d
for k in ["id","tolerance","type","reference"]:
if k in v:
d[k]=v[k]
for m in v.get("Metrics", []):
d={}
self.validation_base[t["id"]][m["id"]]=d
for k in ["id","tolerance","type","reference"]:
if k in m:
d[k]=m[k]
d["launch_type"] = v["type"]
self.success={}
self.failure={}
self.missing_validations={}
Expand All @@ -27,21 +29,23 @@ def __init__(self, validation_base, run_base, configuration=None):
if self.status[test_id]!="failure":
self.status[test_id]="missing_validation"
continue
validation_type=valid.setdefault("type","file_comparison")
if validation_type=="file_comparison":
validation_type=valid.setdefault("type","absolute" if valid["launch_type"]=="file_comparison" else "relative")
if validation_type=="absolute":
t=float(test[validation_metric])
self.__within_tolerance(t,test_id,valid)
elif validation_type=="configuration_independent":
rel_change=relative_change(float(test[validation_metric]),float(valid["reference"]))
elif validation_type=="relative":
if isinstance(valid["reference"],dict):
if configuration in valid["reference"]:
ref = float(valid["reference"][configuration])
else:
continue
else:
ref = float(valid["reference"])
rel_change=relative_change(float(test[validation_metric]),ref)
t=abs(rel_change)
self.__within_tolerance(t,test_id,valid)
elif validation_type=="configuration_dependent":
if configuration in valid["reference"]:
rel_change=relative_change(float(test[validation_metric]),float(valid["reference"][configuration]))
t=abs(rel_change)
self.__within_tolerance(t,test_id,valid)
else:
raise ValueError("Unknown validation type \"%s\". Should be in (file_comparison,configuration_independent,configuration_dependent)" % validation_type)
raise ValueError("Unknown validation type \"%s\". Should be in (absolute, relative)" % validation_type)

def __within_tolerance(self,test_value,test_id,valid):
res={"id" : valid["id"], "tolerance" : valid["tolerance"], "distance" : test_value, "type" : valid["type"]}
Expand Down

0 comments on commit 35a56ad

Please sign in to comment.