Skip to content

Commit

Permalink
update workflows
Browse files Browse the repository at this point in the history
  • Loading branch information
almazgimaev committed Nov 12, 2024
1 parent 9289127 commit 77718d1
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 27 deletions.
28 changes: 17 additions & 11 deletions src/ui/compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,17 +45,17 @@ def run_compare(eval_dirs: List[str] = None):
f.validate_paths(g.eval_dirs)

# ==================== Workflow input ====================
reports = None
try:
reports_paths = [path.rstrip("/") + "/visualizations/template.vue" for path in g.eval_dirs]
reports = [g.api.file.get_info_by_path(g.team_id, path) for path in reports_paths]
except Exception as e:
sly.logger.warning(f"Failed to get model benchmark reports FileInfos: {repr(e)}")
# reports = None
# try:
# reports_paths = [path.rstrip("/") + "/visualizations/template.vue" for path in g.eval_dirs]
# reports = [g.api.file.get_info_by_path(g.team_id, path) for path in reports_paths]
# except Exception as e:
# sly.logger.warning(f"Failed to get model benchmark reports FileInfos: {repr(e)}")

if reports is not None:
w.workflow_input(g.api, model_benchmark_reports=reports)
else:
w.workflow_input(g.api, team_files_dirs=g.eval_dirs)
# if reports is not None:
# w.workflow_input(g.api, model_benchmark_reports=reports)
# else:
# w.workflow_input(g.api, team_files_dirs=g.eval_dirs)
# =======================================================

comp = ModelComparison(g.api, g.eval_dirs, progress=comp_pbar, workdir=workdir)
Expand All @@ -70,7 +70,13 @@ def run_compare(eval_dirs: List[str] = None):
models_comparison_report.show()

# ==================== Workflow output ====================
w.workflow_output(g.api, model_comparison_report=report)
reports = []
try:
reports_paths = [path.rstrip("/") + "/visualizations/template.vue" for path in g.eval_dirs]
reports = [g.api.file.get_info_by_path(g.team_id, path) for path in reports_paths]
except Exception as e:
sly.logger.warning(f"Failed to get model benchmark reports FileInfos: {repr(e)}")
w.workflow_output(g.api, input_benchmark_reports=reports, model_comparison_report=report)
# =======================================================

comp_pbar.hide()
Expand Down
2 changes: 1 addition & 1 deletion src/ui/evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ def run_evaluation(
eval_pbar.hide()

# ==================== Workflow output ====================
w.workflow_output(g.api, res_dir, template_vis_file)
w.workflow_output(g.api, model_benchmark_report=template_vis_file)
# =======================================================

sly.logger.info(
Expand Down
26 changes: 11 additions & 15 deletions src/workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,28 +58,24 @@ def workflow_input(
try:
for model_benchmark_report in model_benchmark_reports:
api.app.workflow.add_input_file(model_benchmark_report)
sly.logger.debug(f"Workflow Input: Model Benchmark Report ID - {model_benchmark_report.id}")
sly.logger.debug(
f"Workflow Input: Model Benchmark Report ID - {model_benchmark_report.id}"
)
except Exception as e:
sly.logger.debug(f"Failed to add input to the workflow: {repr(e)}")


def workflow_output(
api: sly.Api,
eval_team_files_dir: Optional[str] = None,
model_benchmark_report: Optional[sly.api.file_api.FileInfo] = None,
input_benchmark_reports: Optional[List[sly.api.file_api.FileInfo]] = None,
model_comparison_report: Optional[sly.api.file_api.FileInfo] = None,
):
if model_benchmark_report:
try:
# Add output evaluation results folder to the workflow
eval_dir_relation_settings = sly.WorkflowSettings(title="Evaluation Artifacts")
eval_dir_meta = sly.WorkflowMeta(relation_settings=eval_dir_relation_settings)
api.app.workflow.add_output_folder(eval_team_files_dir, meta=eval_dir_meta)
sly.logger.debug(f"Workflow Output: Team Files dir - {eval_team_files_dir}")

# Add output model benchmark report to the workflow
mb_relation_settings = sly.WorkflowSettings(
title="Model Evaluation",
title="Model Benchmark",
icon="assignment",
icon_color="#dcb0ff",
icon_bg_color="#faebff",
Expand All @@ -96,17 +92,17 @@ def workflow_output(
if model_comparison_report:
try:
# Add output model benchmark report to the workflow
comparison_relation_settings = sly.WorkflowSettings(
title="Model Evaluation",
relation_settings = sly.WorkflowSettings(
title="Model Comparison",
icon="assignment",
icon_color="#ffc084",
icon_bg_color="#fff2e6",
url=f"/model-benchmark?id={model_comparison_report.id}",
url_title="Open Comparison Report",
)
meta = sly.WorkflowMeta(relation_settings=comparison_relation_settings)
api.app.workflow.add_output_file(model_comparison_report, meta=meta)
sly.logger.debug(f"Model Comparison Report ID - {model_comparison_report.id}")

meta = sly.WorkflowMeta(node_settings=relation_settings)
for model_benchmark_report in input_benchmark_reports:
api.app.workflow.add_input_file(model_benchmark_report, meta=meta)
sly.logger.debug(f"Model Benchmark Report ID - {model_benchmark_report.id}")
except Exception as e:
sly.logger.debug(f"Failed to add output to the workflow: {repr(e)}")

0 comments on commit 77718d1

Please sign in to comment.