diff --git a/src/ui/compare.py b/src/ui/compare.py index fb69380..95ab9e8 100644 --- a/src/ui/compare.py +++ b/src/ui/compare.py @@ -45,17 +45,17 @@ def run_compare(eval_dirs: List[str] = None): f.validate_paths(g.eval_dirs) # ==================== Workflow input ==================== - reports = None - try: - reports_paths = [path.rstrip("/") + "/visualizations/template.vue" for path in g.eval_dirs] - reports = [g.api.file.get_info_by_path(g.team_id, path) for path in reports_paths] - except Exception as e: - sly.logger.warning(f"Failed to get model benchmark reports FileInfos: {repr(e)}") + # reports = None + # try: + # reports_paths = [path.rstrip("/") + "/visualizations/template.vue" for path in g.eval_dirs] + # reports = [g.api.file.get_info_by_path(g.team_id, path) for path in reports_paths] + # except Exception as e: + # sly.logger.warning(f"Failed to get model benchmark reports FileInfos: {repr(e)}") - if reports is not None: - w.workflow_input(g.api, model_benchmark_reports=reports) - else: - w.workflow_input(g.api, team_files_dirs=g.eval_dirs) + # if reports is not None: + # w.workflow_input(g.api, model_benchmark_reports=reports) + # else: + # w.workflow_input(g.api, team_files_dirs=g.eval_dirs) # ======================================================= comp = ModelComparison(g.api, g.eval_dirs, progress=comp_pbar, workdir=workdir) @@ -70,7 +70,13 @@ def run_compare(eval_dirs: List[str] = None): models_comparison_report.show() # ==================== Workflow output ==================== - w.workflow_output(g.api, model_comparison_report=report) + reports = [] + try: + reports_paths = [path.rstrip("/") + "/visualizations/template.vue" for path in g.eval_dirs] + reports = [g.api.file.get_info_by_path(g.team_id, path) for path in reports_paths] + except Exception as e: + sly.logger.warning(f"Failed to get model benchmark reports FileInfos: {repr(e)}") + w.workflow_output(g.api, input_benchmark_reports=reports, model_comparison_report=report) # ======================================================= comp_pbar.hide() diff --git a/src/ui/evaluation.py b/src/ui/evaluation.py index 910dd06..1d85094 100644 --- a/src/ui/evaluation.py +++ b/src/ui/evaluation.py @@ -194,7 +194,7 @@ def run_evaluation( eval_pbar.hide() # ==================== Workflow output ==================== - w.workflow_output(g.api, res_dir, template_vis_file) + w.workflow_output(g.api, model_benchmark_report=template_vis_file) # ======================================================= sly.logger.info( diff --git a/src/workflow.py b/src/workflow.py index b123ebb..1b75784 100644 --- a/src/workflow.py +++ b/src/workflow.py @@ -58,28 +58,24 @@ def workflow_input( try: for model_benchmark_report in model_benchmark_reports: api.app.workflow.add_input_file(model_benchmark_report) - sly.logger.debug(f"Workflow Input: Model Benchmark Report ID - {model_benchmark_report.id}") + sly.logger.debug( + f"Workflow Input: Model Benchmark Report ID - {model_benchmark_report.id}" + ) except Exception as e: sly.logger.debug(f"Failed to add input to the workflow: {repr(e)}") def workflow_output( api: sly.Api, - eval_team_files_dir: Optional[str] = None, model_benchmark_report: Optional[sly.api.file_api.FileInfo] = None, + input_benchmark_reports: Optional[List[sly.api.file_api.FileInfo]] = None, model_comparison_report: Optional[sly.api.file_api.FileInfo] = None, ): if model_benchmark_report: try: - # Add output evaluation results folder to the workflow - eval_dir_relation_settings = sly.WorkflowSettings(title="Evaluation Artifacts") - eval_dir_meta = sly.WorkflowMeta(relation_settings=eval_dir_relation_settings) - api.app.workflow.add_output_folder(eval_team_files_dir, meta=eval_dir_meta) - sly.logger.debug(f"Workflow Output: Team Files dir - {eval_team_files_dir}") - # Add output model benchmark report to the workflow mb_relation_settings = sly.WorkflowSettings( - title="Model Evaluation", + title="Model Benchmark", icon="assignment", icon_color="#dcb0ff", icon_bg_color="#faebff", @@ -96,17 +92,17 @@ def workflow_output( if model_comparison_report: try: # Add output model benchmark report to the workflow - comparison_relation_settings = sly.WorkflowSettings( - title="Model Evaluation", + relation_settings = sly.WorkflowSettings( + title="Model Comparison", icon="assignment", icon_color="#ffc084", icon_bg_color="#fff2e6", url=f"/model-benchmark?id={model_comparison_report.id}", url_title="Open Comparison Report", ) - meta = sly.WorkflowMeta(relation_settings=comparison_relation_settings) - api.app.workflow.add_output_file(model_comparison_report, meta=meta) - sly.logger.debug(f"Model Comparison Report ID - {model_comparison_report.id}") - + meta = sly.WorkflowMeta(node_settings=relation_settings) + for model_benchmark_report in input_benchmark_reports: + api.app.workflow.add_input_file(model_benchmark_report, meta=meta) + sly.logger.debug(f"Model Benchmark Report ID - {model_benchmark_report.id}") except Exception as e: sly.logger.debug(f"Failed to add output to the workflow: {repr(e)}")