generated from supervisely-ecosystem/ecosystem-app-template-gui
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
ac0357a
commit 88e713f
Showing
3 changed files
with
104 additions
and
56 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,69 +1,102 @@ | ||
# This module contains functions that are used to configure the input and output of the workflow for the current app, | ||
# and versioning feature that creates a project version before the task starts. | ||
from typing import List, Optional | ||
|
||
import supervisely as sly | ||
|
||
|
||
def workflow_input( | ||
api: sly.Api, | ||
project_info: sly.ProjectInfo, | ||
session_id: int, | ||
project_info: Optional[sly.ProjectInfo] = None, | ||
session_id: Optional[int] = None, | ||
team_files_dirs: Optional[List[str]] = None, | ||
): | ||
# Create a project version before the task starts | ||
try: | ||
project_version_id = api.project.version.create( | ||
project_info, | ||
f"Evaluator for Model Benchmark", | ||
f"This backup was created automatically by Supervisely before the Evaluator for Model Benchmark task with ID: {api.task_id}", | ||
) | ||
except Exception as e: | ||
sly.logger.debug(f"Failed to create a project version: {repr(e)}") | ||
project_version_id = None | ||
if project_info: | ||
# Create a project version before the task starts | ||
try: | ||
project_version_id = api.project.version.create( | ||
project_info, | ||
f"Evaluator for Model Benchmark", | ||
f"This backup was created automatically by Supervisely before the Evaluator for Model Benchmark task with ID: {api.task_id}", | ||
) | ||
except Exception as e: | ||
sly.logger.debug(f"Failed to create a project version: {repr(e)}") | ||
project_version_id = None | ||
|
||
# Add input project to the workflow | ||
try: | ||
if project_version_id is None: | ||
project_version_id = ( | ||
project_info.version.get("id", None) if project_info.version else None | ||
# Add input project to the workflow | ||
try: | ||
if project_version_id is None: | ||
project_version_id = ( | ||
project_info.version.get("id", None) if project_info.version else None | ||
) | ||
api.app.workflow.add_input_project(project_info.id, version_id=project_version_id) | ||
sly.logger.debug( | ||
f"Workflow Input: Project ID - {project_info.id}, Project Version ID - {project_version_id}" | ||
) | ||
api.app.workflow.add_input_project(project_info.id, version_id=project_version_id) | ||
sly.logger.debug( | ||
f"Workflow Input: Project ID - {project_info.id}, Project Version ID - {project_version_id}" | ||
) | ||
except Exception as e: | ||
sly.logger.debug(f"Failed to add input to the workflow: {repr(e)}") | ||
except Exception as e: | ||
sly.logger.debug(f"Failed to add input to the workflow: {repr(e)}") | ||
|
||
# Add input model session to the workflow | ||
try: | ||
api.app.workflow.add_input_task(session_id) | ||
sly.logger.debug(f"Workflow Input: Session ID - {session_id}") | ||
except Exception as e: | ||
sly.logger.debug(f"Failed to add input to the workflow: {repr(e)}") | ||
# Add input model session to the workflow | ||
try: | ||
api.app.workflow.add_input_task(session_id) | ||
sly.logger.debug(f"Workflow Input: Session ID - {session_id}") | ||
except Exception as e: | ||
sly.logger.debug(f"Failed to add input to the workflow: {repr(e)}") | ||
|
||
if team_files_dirs: | ||
# Add input evaluation results folders to the workflow | ||
try: | ||
for team_files_dir in team_files_dirs: | ||
api.app.workflow.add_input_folder(team_files_dir) | ||
sly.logger.debug(f"Workflow Input: Team Files dir - {team_files_dir}") | ||
except Exception as e: | ||
sly.logger.debug(f"Failed to add input to the workflow: {repr(e)}") | ||
|
||
|
||
def workflow_output( | ||
api: sly.Api, | ||
eval_team_files_dir: str, | ||
model_benchmark_report: sly.api.file_api.FileInfo, | ||
eval_team_files_dir: Optional[str] = None, | ||
model_benchmark_report: Optional[sly.api.file_api.FileInfo] = None, | ||
model_comparison_report: Optional[sly.api.file_api.FileInfo] = None, | ||
): | ||
try: | ||
# Add output evaluation results folder to the workflow | ||
eval_dir_relation_settings = sly.WorkflowSettings(title="Evaluation Artifacts") | ||
eval_dir_meta = sly.WorkflowMeta(relation_settings=eval_dir_relation_settings) | ||
api.app.workflow.add_output_folder(eval_team_files_dir, meta=eval_dir_meta) | ||
sly.logger.debug(f"Workflow Output: Team Files dir - {eval_team_files_dir}") | ||
if model_benchmark_report: | ||
try: | ||
# Add output evaluation results folder to the workflow | ||
eval_dir_relation_settings = sly.WorkflowSettings(title="Evaluation Artifacts") | ||
eval_dir_meta = sly.WorkflowMeta(relation_settings=eval_dir_relation_settings) | ||
api.app.workflow.add_output_folder(eval_team_files_dir, meta=eval_dir_meta) | ||
sly.logger.debug(f"Workflow Output: Team Files dir - {eval_team_files_dir}") | ||
|
||
# Add output model benchmark report to the workflow | ||
mb_relation_settings = sly.WorkflowSettings( | ||
title="Model Benchmark", | ||
icon="assignment", | ||
icon_color="#674EA7", | ||
icon_bg_color="#CCCCFF", | ||
url=f"/model-benchmark?id={model_benchmark_report.id}", | ||
url_title="Open Report", | ||
) | ||
meta = sly.WorkflowMeta(relation_settings=mb_relation_settings) | ||
api.app.workflow.add_output_file(model_benchmark_report, meta=meta) | ||
sly.logger.debug("Model Benchmark Report ID - {model_benchmark_report.id}") | ||
# Add output model benchmark report to the workflow | ||
mb_relation_settings = sly.WorkflowSettings( | ||
title="Model Evaluation", | ||
icon="assignment", | ||
icon_color="#dcb0ff", | ||
icon_bg_color="#faebff", | ||
url=f"/model-benchmark?id={model_benchmark_report.id}", | ||
url_title="Open Benchmark Report", | ||
) | ||
meta = sly.WorkflowMeta(relation_settings=mb_relation_settings) | ||
api.app.workflow.add_output_file(model_benchmark_report, meta=meta) | ||
sly.logger.debug(f"Model Evaluation Report ID - {model_benchmark_report.id}") | ||
|
||
except Exception as e: | ||
sly.logger.debug(f"Failed to add output to the workflow: {repr(e)}") | ||
|
||
if model_comparison_report: | ||
try: | ||
# Add output model benchmark report to the workflow | ||
comparison_relation_settings = sly.WorkflowSettings( | ||
title="Model Evaluation", | ||
icon="assignment", | ||
icon_color="#ffc084", | ||
icon_bg_color="#fff2e6", | ||
url=f"/model-benchmark?id={model_comparison_report.id}", | ||
url_title="Open Comparison Report", | ||
) | ||
meta = sly.WorkflowMeta(relation_settings=comparison_relation_settings) | ||
api.app.workflow.add_output_file(model_comparison_report, meta=meta) | ||
sly.logger.debug(f"Model Comparison Report ID - {model_comparison_report.id}") | ||
|
||
except Exception as e: | ||
sly.logger.debug(f"Failed to add output to the workflow: {repr(e)}") | ||
except Exception as e: | ||
sly.logger.debug(f"Failed to add output to the workflow: {repr(e)}") |