diff --git a/gcvb/dashboard/apps/test.py b/gcvb/dashboard/apps/test.py index fab4ab1..fb8d81c 100644 --- a/gcvb/dashboard/apps/test.py +++ b/gcvb/dashboard/apps/test.py @@ -42,19 +42,29 @@ def data_preparation(run, test_id): job.fill_at_job_creation_validation(ajc, validation, loader.data_root, test["data"], loader.config, loader.references) - v = {} - d["metrics"].append(v) - v["id"]=validation["id"] - v["type"]=validation.get("type","file_comparison") - v["tolerance"]=validation["tolerance"] - v["distance"]=run_summary["metrics"].get(v["id"],"N/A") #Todo, it's ok only for file comparison... - if v["distance"]=="N/A": - data["status"]="failure" - elif float(v["distance"])>float(v["tolerance"]): - data["status"]="failure" - v["from_results"] = [{"id" : f["id"], - "file" : job.format_launch_command(f["file"], loader.config, ajc)} - for f in validation.get("serve_from_results",[])] + for metric in validation.get("Metrics",[]): + v = {} + d["metrics"].append(v) + v["id"]=metric["id"] + v["type"]=metric.get("type","absolute" if validation["type"]=="file_comparison" else "relative") + v["tolerance"]=metric["tolerance"] + if v["type"] == "absolute": + v["distance"]=run_summary["metrics"].get(v["id"],"N/A") + else: + if v["id"] in run_summary["metrics"]: + if isinstance(metric["reference"], dict): + v["distance"] = "N/A" # no support for configuration dependent metric yet + else: + v["distance"] = abs(float(run_summary["metrics"][v["id"]]) - float(metric["reference"])) / float(metric["reference"]) + else: + v["distance"] = "N/A" + if v["distance"]=="N/A": + data["status"]="failure" + elif float(v["distance"])>float(v["tolerance"]): + data["status"]="failure" + v["from_results"] = [{"id" : f["id"], + "file" : job.format_launch_command(f["file"], loader.config, ajc)} + for f in validation.get("serve_from_results",[])] return data #Content diff --git a/gcvb/job.py b/gcvb/job.py index 73331ee..fb1355a 100644 --- a/gcvb/job.py +++ b/gcvb/job.py @@ -64,7 +64,7 @@ def fill_at_job_creation_task(at_job_creation, task, full_id, config): return None def fill_at_job_creation_validation(at_job_creation, validation, data_root, ref_data, config, valid): - at_job_creation["va_id"]=validation["id"] + at_job_creation["va_id"]=validation["Metrics"][0]["id"] at_job_creation["va_executable"]=validation["executable"] if validation["type"]=="file_comparison": #specific values for file comparison diff --git a/gcvb/model.py b/gcvb/model.py new file mode 100644 index 0000000..2b5e204 --- /dev/null +++ b/gcvb/model.py @@ -0,0 +1,92 @@ +from enum import IntEnum +class JobStatus(IntEnum): + unlinked = -4 + pending = -3 + ready = -2 + running = -1 + +class AbsoluteMetric: + def __init__(self, reference, tolerance, unit = None): + self.type = "absolute" + self.reference = reference + self.tolerance = tolerance + self.unit = unit + def within_tolerance(self, value): + return abs(value - self.reference) <= self.tolerance + +class RelativeMetric: + def __init__(self, reference, tolerance): + self.type = "relative" + self.reference = reference + self.tolerance = tolerance + def within_tolerance(self, value): + return (abs(value - self.reference) / self.reference) <= self.tolerance + +class Validation: + default_type = "relative" + default_reference = None + def __init__(self, valid_dict, config): + self.raw_dict = valid_dict + self.status = JobStatus.unlinked + self.executable = valid_dict["executable"] + self.type = valid_dict["type"] + self.launch_command = valid_dict["launch_command"] + self.recorded_metrics = {} + self.init_metrics(config) + + def init_metrics(self, config): + self.expected_metrics = {} + for metric in self.raw_dict.get("Metrics", []): + t = metric.get("type", self.default_type) + if t not in ["relative", "absolute"]: + raise ValueError("'type' must be 'relative' or 'absolute'.") + #reference is either a dict or a number. + ref = metric.get("reference", self.default_reference) + if ref is None: + raise ValueError("'reference' must be provided.") + if isinstance(ref, dict): + if config in ref: + if t == "relative": + self.expected_metrics[metric["id"]] = RelativeMetric(ref[config], metric["tolerance"]) + else: + self.expected_metrics[metric["id"]] = AbsoluteMetric(ref[config], metric["tolerance"]) + else: + if t == "relative": + self.expected_metrics[metric["id"]] = RelativeMetric(ref, metric["tolerance"]) + else: + self.expected_metrics[metric["id"]] = AbsoluteMetric(ref, metric["tolerance"]) + +class FileComparisonValidation(Validation): + default_type = "absolute" + default_reference = 0 + +class Task(): + def __init__(self, task_dict, config): + self.raw_dict = task_dict + self.status = JobStatus.unlinked + self.exectuable = task_dict["executable"] + self.options = task_dict.get("options", '') + self.launch_command = task_dict["launch_command"] + self.nprocs = task_dict["nprocs"] + self.nthreads = task_dict["nthreads"] + # Validations + self.Validations = [] + for v in task_dict.get("Validations", []): + if v["type"] == "script": + self.Validations.append(Validation(v, config)) + else: + self.Validations.append(FileComparisonValidation(v, config)) + +class Test(): + def __init__(self, test_dict, config): + self.raw_dict = test_dict + # Tasks + self.Tasks = [] + for t in test_dict.get("Tasks"): + self.Tasks.append(Task(t, config)) + # Steps + self.Steps = [] + for t in self.Tasks: + self.Steps.append(t) + for v in t.Validations: + self.Steps.append(t) \ No newline at end of file diff --git a/gcvb/validation.py b/gcvb/validation.py index d1a7fee..023b719 100644 --- a/gcvb/validation.py +++ b/gcvb/validation.py @@ -11,11 +11,13 @@ def __init__(self, validation_base, run_base, configuration=None): self.validation_base[t["id"]]={} for ta in t["Tasks"]: for v in ta.get("Validations",[]): - d={} - self.validation_base[t["id"]][v["id"]]=d - for k in ["id","tolerance","type","reference"]: - if k in v: - d[k]=v[k] + for m in v.get("Metrics", []): + d={} + self.validation_base[t["id"]][m["id"]]=d + for k in ["id","tolerance","type","reference"]: + if k in m: + d[k]=m[k] + d["launch_type"] = v["type"] self.success={} self.failure={} self.missing_validations={} @@ -27,21 +29,23 @@ def __init__(self, validation_base, run_base, configuration=None): if self.status[test_id]!="failure": self.status[test_id]="missing_validation" continue - validation_type=valid.setdefault("type","file_comparison") - if validation_type=="file_comparison": + validation_type=valid.setdefault("type","absolute" if valid["launch_type"]=="file_comparison" else "relative") + if validation_type=="absolute": t=float(test[validation_metric]) self.__within_tolerance(t,test_id,valid) - elif validation_type=="configuration_independent": - rel_change=relative_change(float(test[validation_metric]),float(valid["reference"])) + elif validation_type=="relative": + if isinstance(valid["reference"],dict): + if configuration in valid["reference"]: + ref = float(valid["reference"][configuration]) + else: + continue + else: + ref = float(valid["reference"]) + rel_change=relative_change(float(test[validation_metric]),ref) t=abs(rel_change) self.__within_tolerance(t,test_id,valid) - elif validation_type=="configuration_dependent": - if configuration in valid["reference"]: - rel_change=relative_change(float(test[validation_metric]),float(valid["reference"][configuration])) - t=abs(rel_change) - self.__within_tolerance(t,test_id,valid) else: - raise ValueError("Unknown validation type \"%s\". Should be in (file_comparison,configuration_independent,configuration_dependent)" % validation_type) + raise ValueError("Unknown validation type \"%s\". Should be in (absolute, relative)" % validation_type) def __within_tolerance(self,test_value,test_id,valid): res={"id" : valid["id"], "tolerance" : valid["tolerance"], "distance" : test_value, "type" : valid["type"]}