From 0c2b5afcfa1f8ea2d814c881b218068df9e114ef Mon Sep 17 00:00:00 2001 From: almaz Date: Mon, 11 Nov 2024 19:50:46 +0100 Subject: [PATCH] update comparison workflow intputs --- src/ui/compare.py | 13 ++++++++++++- src/workflow.py | 10 ++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/src/ui/compare.py b/src/ui/compare.py index 36861b3..26dd141 100644 --- a/src/ui/compare.py +++ b/src/ui/compare.py @@ -45,7 +45,18 @@ def run_compare(eval_dirs: List[str] = None): f.validate_paths(g.eval_dirs) # ==================== Workflow input ==================== - w.workflow_input(g.api, team_files_dirs=g.eval_dirs) + # /model-benchmark/42912_COCO 2017 instance segmentation/67993_Train YOLOv8 | v9 | v10 | v11/visualizations/template.vue + reports = None + try: + reports_paths = [path.rstrip("/") + "/visualizations/template.vue" for path in g.eval_dirs] + reports = [g.api.file.get_info_by_path(g.team_id, path) for path in reports_paths] + except Exception as e: + sly.logger.warning(f"Failed to get model benchmark reports FileInfos: {repr(e)}") + + if reports is not None: + w.workflow_input(g.api, model_benchmark_reports=reports) + else: + w.workflow_input(g.api, team_files_dirs=g.eval_dirs) # ======================================================= comp = ModelComparison(g.api, g.eval_dirs, progress=comp_pbar, workdir=workdir) diff --git a/src/workflow.py b/src/workflow.py index fa32ba8..b123ebb 100644 --- a/src/workflow.py +++ b/src/workflow.py @@ -10,6 +10,7 @@ def workflow_input( project_info: Optional[sly.ProjectInfo] = None, session_id: Optional[int] = None, team_files_dirs: Optional[List[str]] = None, + model_benchmark_reports: Optional[List[sly.api.file_api.FileInfo]] = None, ): if project_info: # Create a project version before the task starts @@ -52,6 +53,15 @@ def workflow_input( except Exception as e: sly.logger.debug(f"Failed to add input to the workflow: {repr(e)}") + if model_benchmark_reports: + # Add input model benchmark reports to the workflow + try: + for model_benchmark_report in model_benchmark_reports: + api.app.workflow.add_input_file(model_benchmark_report) + sly.logger.debug(f"Workflow Input: Model Benchmark Report ID - {model_benchmark_report.id}") + except Exception as e: + sly.logger.debug(f"Failed to add input to the workflow: {repr(e)}") + def workflow_output( api: sly.Api,