Skip to content

Commit

Permalink
update UI and Workflow
Browse files Browse the repository at this point in the history
  • Loading branch information
almazgimaev committed Oct 24, 2024
1 parent ac0357a commit 88e713f
Show file tree
Hide file tree
Showing 3 changed files with 104 additions and 56 deletions.
4 changes: 2 additions & 2 deletions src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,11 @@
from src.ui.evaluation import eval_button, evaluation_container, run_evaluation

tabs = widgets.Tabs(
labels=["Evaluate models", "Compare models"],
labels=["Model Evaluation", "Model Comparison"],
contents=[evaluation_container, compare_contatiner],
)
tabs_card = widgets.Card(
title="Model evaluation",
title="Model Benchmark",
content=tabs,
description="Select the task you want to perform",
)
Expand Down
21 changes: 18 additions & 3 deletions src/ui/compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,30 +2,41 @@

import src.functions as f
import src.globals as g
import src.workflow as w
import supervisely.app.widgets as widgets
from supervisely._utils import rand_str
from supervisely.nn.benchmark.comparison.model_comparison import ModelComparison


def run_compare(eval_dirs: List[str] = None):
work_dir = g.STORAGE_DIR + "/model-comparison-" + rand_str(6)
workdir = g.STORAGE_DIR + "/model-comparison-" + rand_str(6)
team_files_selector.disable()
models_comparison_report.hide()
pbar.show()

g.eval_dirs = eval_dirs or team_files_selector.get_selected_paths()
f.validate_paths(g.eval_dirs)

comp = ModelComparison(g.api, g.eval_dirs, progress=pbar, output_dir=work_dir)
# ==================== Workflow input ====================
w.workflow_input(g.api, team_files_dirs=g.eval_dirs)
# =======================================================

comp = ModelComparison(g.api, g.eval_dirs, progress=pbar, workdir=workdir)
comp.visualize()
res_dir = f.get_res_dir(g.eval_dirs)
comp.upload_results(g.team_id, remote_dir=res_dir, progress=pbar)

report = g.api.file.get_info_by_path(g.team_id, comp.get_report_link())
print(f"REPORT: https://dev.supervisely.com/model-benchmark?id={report.id}")
g.api.task.set_output_report(g.task_id, report.id, report.name)

models_comparison_report.set(report)
models_comparison_report.show()

# ==================== Workflow output ====================
w.workflow_output(g.api, model_comparison_report=report)
# =======================================================

pbar.hide()

compare_button.loading = False
Expand All @@ -40,7 +51,11 @@ def run_compare(eval_dirs: List[str] = None):
)
models_comparison_report.hide()
team_files_selector = widgets.TeamFilesSelector(
g.team_id, multiple_selection=True, selection_file_type="folder", max_height=350
g.team_id,
multiple_selection=True,
selection_file_type="folder",
max_height=350,
initial_folder="/model-benchmark",
)

compare_contatiner = widgets.Container(
Expand Down
135 changes: 84 additions & 51 deletions src/workflow.py
Original file line number Diff line number Diff line change
@@ -1,69 +1,102 @@
# This module contains functions that are used to configure the input and output of the workflow for the current app,
# and versioning feature that creates a project version before the task starts.
from typing import List, Optional

import supervisely as sly


def workflow_input(
api: sly.Api,
project_info: sly.ProjectInfo,
session_id: int,
project_info: Optional[sly.ProjectInfo] = None,
session_id: Optional[int] = None,
team_files_dirs: Optional[List[str]] = None,
):
# Create a project version before the task starts
try:
project_version_id = api.project.version.create(
project_info,
f"Evaluator for Model Benchmark",
f"This backup was created automatically by Supervisely before the Evaluator for Model Benchmark task with ID: {api.task_id}",
)
except Exception as e:
sly.logger.debug(f"Failed to create a project version: {repr(e)}")
project_version_id = None
if project_info:
# Create a project version before the task starts
try:
project_version_id = api.project.version.create(
project_info,
f"Evaluator for Model Benchmark",
f"This backup was created automatically by Supervisely before the Evaluator for Model Benchmark task with ID: {api.task_id}",
)
except Exception as e:
sly.logger.debug(f"Failed to create a project version: {repr(e)}")
project_version_id = None

# Add input project to the workflow
try:
if project_version_id is None:
project_version_id = (
project_info.version.get("id", None) if project_info.version else None
# Add input project to the workflow
try:
if project_version_id is None:
project_version_id = (
project_info.version.get("id", None) if project_info.version else None
)
api.app.workflow.add_input_project(project_info.id, version_id=project_version_id)
sly.logger.debug(
f"Workflow Input: Project ID - {project_info.id}, Project Version ID - {project_version_id}"
)
api.app.workflow.add_input_project(project_info.id, version_id=project_version_id)
sly.logger.debug(
f"Workflow Input: Project ID - {project_info.id}, Project Version ID - {project_version_id}"
)
except Exception as e:
sly.logger.debug(f"Failed to add input to the workflow: {repr(e)}")
except Exception as e:
sly.logger.debug(f"Failed to add input to the workflow: {repr(e)}")

# Add input model session to the workflow
try:
api.app.workflow.add_input_task(session_id)
sly.logger.debug(f"Workflow Input: Session ID - {session_id}")
except Exception as e:
sly.logger.debug(f"Failed to add input to the workflow: {repr(e)}")
# Add input model session to the workflow
try:
api.app.workflow.add_input_task(session_id)
sly.logger.debug(f"Workflow Input: Session ID - {session_id}")
except Exception as e:
sly.logger.debug(f"Failed to add input to the workflow: {repr(e)}")

if team_files_dirs:
# Add input evaluation results folders to the workflow
try:
for team_files_dir in team_files_dirs:
api.app.workflow.add_input_folder(team_files_dir)
sly.logger.debug(f"Workflow Input: Team Files dir - {team_files_dir}")
except Exception as e:
sly.logger.debug(f"Failed to add input to the workflow: {repr(e)}")


def workflow_output(
api: sly.Api,
eval_team_files_dir: str,
model_benchmark_report: sly.api.file_api.FileInfo,
eval_team_files_dir: Optional[str] = None,
model_benchmark_report: Optional[sly.api.file_api.FileInfo] = None,
model_comparison_report: Optional[sly.api.file_api.FileInfo] = None,
):
try:
# Add output evaluation results folder to the workflow
eval_dir_relation_settings = sly.WorkflowSettings(title="Evaluation Artifacts")
eval_dir_meta = sly.WorkflowMeta(relation_settings=eval_dir_relation_settings)
api.app.workflow.add_output_folder(eval_team_files_dir, meta=eval_dir_meta)
sly.logger.debug(f"Workflow Output: Team Files dir - {eval_team_files_dir}")
if model_benchmark_report:
try:
# Add output evaluation results folder to the workflow
eval_dir_relation_settings = sly.WorkflowSettings(title="Evaluation Artifacts")
eval_dir_meta = sly.WorkflowMeta(relation_settings=eval_dir_relation_settings)
api.app.workflow.add_output_folder(eval_team_files_dir, meta=eval_dir_meta)
sly.logger.debug(f"Workflow Output: Team Files dir - {eval_team_files_dir}")

# Add output model benchmark report to the workflow
mb_relation_settings = sly.WorkflowSettings(
title="Model Benchmark",
icon="assignment",
icon_color="#674EA7",
icon_bg_color="#CCCCFF",
url=f"/model-benchmark?id={model_benchmark_report.id}",
url_title="Open Report",
)
meta = sly.WorkflowMeta(relation_settings=mb_relation_settings)
api.app.workflow.add_output_file(model_benchmark_report, meta=meta)
sly.logger.debug("Model Benchmark Report ID - {model_benchmark_report.id}")
# Add output model benchmark report to the workflow
mb_relation_settings = sly.WorkflowSettings(
title="Model Evaluation",
icon="assignment",
icon_color="#dcb0ff",
icon_bg_color="#faebff",
url=f"/model-benchmark?id={model_benchmark_report.id}",
url_title="Open Benchmark Report",
)
meta = sly.WorkflowMeta(relation_settings=mb_relation_settings)
api.app.workflow.add_output_file(model_benchmark_report, meta=meta)
sly.logger.debug(f"Model Evaluation Report ID - {model_benchmark_report.id}")

except Exception as e:
sly.logger.debug(f"Failed to add output to the workflow: {repr(e)}")

if model_comparison_report:
try:
# Add output model benchmark report to the workflow
comparison_relation_settings = sly.WorkflowSettings(
title="Model Evaluation",
icon="assignment",
icon_color="#ffc084",
icon_bg_color="#fff2e6",
url=f"/model-benchmark?id={model_comparison_report.id}",
url_title="Open Comparison Report",
)
meta = sly.WorkflowMeta(relation_settings=comparison_relation_settings)
api.app.workflow.add_output_file(model_comparison_report, meta=meta)
sly.logger.debug(f"Model Comparison Report ID - {model_comparison_report.id}")

except Exception as e:
sly.logger.debug(f"Failed to add output to the workflow: {repr(e)}")
except Exception as e:
sly.logger.debug(f"Failed to add output to the workflow: {repr(e)}")

0 comments on commit 88e713f

Please sign in to comment.