Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgrade SDK to 6.73.255. Support comparison for semantic segmentation task type #15

Merged
merged 6 commits into from
Dec 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions config.json
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,6 @@
"task_location": "workspace_tasks",
"entrypoint": "python -m uvicorn src.main:app --host 0.0.0.0 --port 8000",
"port": 8000,
"docker_image": "supervisely/model-benchmark:1.0.16",
"instance_version": "6.12.5"
"docker_image": "supervisely/model-benchmark:1.0.17",
"instance_version": "6.12.12"
}
2 changes: 1 addition & 1 deletion dev_requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# git+https://github.com/supervisely/supervisely.git@model-benchmark
supervisely[model-benchmark]==6.73.239
supervisely[model-benchmark]==6.73.255

# torch==1.13.0
# torchvision==0.14.0
Expand Down
4 changes: 2 additions & 2 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
FROM supervisely/base-py-sdk:6.73.208

RUN python3 -m pip install supervisely[model-benchmark]==6.73.239
RUN python3 -m pip install supervisely[model-benchmark]==6.73.255

LABEL python_sdk_version=6.73.239
LABEL python_sdk_version=6.73.255
2 changes: 1 addition & 1 deletion local.env
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,5 @@ WORKSPACE_ID = 680
# PROJECT_ID = 41021
SLY_APP_DATA_DIR = "APP_DATA"

TASK_ID = 68257
TASK_ID = 68088
# modal.state.sessionId=66693
5 changes: 4 additions & 1 deletion src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,10 @@ async def evaluate(request: Request):
req = await request.json()
try:
state = req["state"]
return {"data": run_evaluation(state["session_id"], state["project_id"])}
session_id = state["session_id"]
project_id = state["project_id"]
dataset_ids = state.get("dataset_ids", None)
return {"data": run_evaluation(session_id, project_id, dataset_ids=dataset_ids)}
except Exception as e:
sly.logger.error(f"Error during model evaluation: {e}")
return {"error": str(e)}
Expand Down
8 changes: 4 additions & 4 deletions src/ui/compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,19 +63,19 @@ def run_compare(eval_dirs: List[str] = None):
res_dir = f.get_res_dir(g.eval_dirs)
res_dir = comp.upload_results(g.team_id, remote_dir=res_dir, progress=comp_pbar)

report = g.api.file.get_info_by_path(g.team_id, comp.get_report_link())
g.api.task.set_output_report(g.task_id, report.id, report.name)
g.api.task.set_output_report(g.task_id, comp.lnk.id, comp.lnk.name, "Click to open the report")

models_comparison_report.set(report)
models_comparison_report.set(comp.report)
models_comparison_report.show()

# ==================== Workflow output ====================
w.workflow_output(g.api, model_comparison_report=report)
w.workflow_output(g.api, model_comparison_report=comp.report)
# =======================================================

comp_pbar.hide()
compare_button.loading = False

sly.logger.info(f"Model comparison report uploaded to: {res_dir}")
sly.logger.info(f"Report link: {comp.get_report_link()}")

return res_dir
143 changes: 71 additions & 72 deletions src/ui/evaluation.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Dict, Optional, Union
from typing import Dict, Optional, Tuple, Union

import yaml

Expand All @@ -20,6 +20,14 @@
SlyTqdm,
Text,
)
from supervisely.nn.benchmark import (
InstanceSegmentationBenchmark,
InstanceSegmentationEvaluator,
ObjectDetectionBenchmark,
ObjectDetectionEvaluator,
SemanticSegmentationBenchmark,
SemanticSegmentationEvaluator,
)
from supervisely.nn.inference.session import SessionJSON

no_classes_label = Text(
Expand Down Expand Up @@ -78,31 +86,52 @@
]
)

benchmark_cls_type = Union[
ObjectDetectionBenchmark, InstanceSegmentationBenchmark, SemanticSegmentationBenchmark
]

evaluator_cls_type = Union[
ObjectDetectionEvaluator, InstanceSegmentationEvaluator, SemanticSegmentationEvaluator
]


def get_benchmark_and_evaluator_classes(
task_type: sly.nn.TaskType,
) -> Tuple[benchmark_cls_type, evaluator_cls_type]:
if task_type == sly.nn.TaskType.OBJECT_DETECTION:
return ObjectDetectionBenchmark, ObjectDetectionEvaluator
elif task_type == sly.nn.TaskType.INSTANCE_SEGMENTATION:
return (InstanceSegmentationBenchmark, InstanceSegmentationEvaluator)
elif task_type == sly.nn.TaskType.SEMANTIC_SEGMENTATION:
return (SemanticSegmentationBenchmark, SemanticSegmentationEvaluator)
else:
raise ValueError(f"Unknown task type: {task_type}")


@f.with_clean_up_progress(eval_pbar)
def run_evaluation(
session_id: Optional[int] = None,
project_id: Optional[int] = None,
params: Optional[Union[str, Dict]] = None,
dataset_ids: Optional[Tuple[int]] = None,
):
work_dir = g.STORAGE_DIR + "/benchmark_" + sly.rand_str(6)

if session_id is not None:
g.session_id = session_id
if project_id is not None:
g.project_id = project_id
g.session_id = session_id or g.session_id
g.project_id = project_id or g.project_id

project = g.api.project.get_info_by_id(g.project_id)
if g.session is None:
g.session = SessionJSON(g.api, g.session_id)
task_type = g.session.get_deploy_info()["task_type"]

if all_datasets_checkbox.is_checked():
dataset_ids = None
else:
dataset_ids = sel_dataset.get_selected_ids()
if len(dataset_ids) == 0:
raise ValueError("No datasets selected")
if dataset_ids is None:
if all_datasets_checkbox.is_checked():
dataset_ids = None
else:
dataset_ids = sel_dataset.get_selected_ids()
if len(dataset_ids) == 0:
raise ValueError("No datasets selected")

# ==================== Workflow input ====================
w.workflow_input(g.api, project, g.session_id)
Expand All @@ -119,48 +148,23 @@ def run_evaluation(

params = eval_params.get_value() or params
if isinstance(params, str):
sly.Annotation.filter_labels_by_classes
params = yaml.safe_load(params)

if task_type == sly.nn.TaskType.OBJECT_DETECTION:
if params is None:
params = sly.nn.benchmark.ObjectDetectionEvaluator.load_yaml_evaluation_params()
params = yaml.safe_load(params)
bm = sly.nn.benchmark.ObjectDetectionBenchmark(
g.api,
project.id,
gt_dataset_ids=dataset_ids,
output_dir=work_dir,
progress=eval_pbar,
progress_secondary=sec_eval_pbar,
classes_whitelist=g.selected_classes,
evaluation_params=params,
)
elif task_type == sly.nn.TaskType.INSTANCE_SEGMENTATION:
if params is None:
params = sly.nn.benchmark.InstanceSegmentationEvaluator.load_yaml_evaluation_params()
params = yaml.safe_load(params)
bm = sly.nn.benchmark.InstanceSegmentationBenchmark(
g.api,
project.id,
gt_dataset_ids=dataset_ids,
output_dir=work_dir,
progress=eval_pbar,
progress_secondary=sec_eval_pbar,
classes_whitelist=g.selected_classes,
evaluation_params=params,
)
elif task_type == sly.nn.TaskType.SEMANTIC_SEGMENTATION:
params = sly.nn.benchmark.SemanticSegmentationEvaluator.load_yaml_evaluation_params()
bm = sly.nn.benchmark.SemanticSegmentationBenchmark(
g.api,
project.id,
gt_dataset_ids=dataset_ids,
output_dir=work_dir,
progress=eval_pbar,
progress_secondary=sec_eval_pbar,
classes_whitelist=g.selected_classes,
evaluation_params=params,
)
bm_cls, evaluator_cls = get_benchmark_and_evaluator_classes(task_type)
if params is None:
params = evaluator_cls.load_yaml_evaluation_params()
params = yaml.safe_load(params)
bm: benchmark_cls_type = bm_cls(
g.api,
project.id,
gt_dataset_ids=dataset_ids,
output_dir=work_dir,
progress=eval_pbar,
progress_secondary=sec_eval_pbar,
classes_whitelist=g.selected_classes,
evaluation_params=params,
)

bm.evaluator_app_info = g.api.task.get_info_by_id(g.task_id)
sly.logger.info(f"{g.session_id = }")
Expand Down Expand Up @@ -197,30 +201,21 @@ def run_evaluation(
bm.visualize()

bm.upload_eval_results(res_dir + "/evaluation/")
remote_dir = bm.upload_visualizations(res_dir + "/visualizations/")

report = bm.upload_report_link(remote_dir)
g.api.task.set_output_report(g.task_id, report.id, report.name)
bm.upload_visualizations(res_dir + "/visualizations/")

template_vis_file = g.api.file.get_info_by_path(
sly.env.team_id(), res_dir + "/visualizations/template.vue"
)
report_model_benchmark.set(template_vis_file)
g.api.task.set_output_report(g.task_id, bm.lnk.id, bm.lnk.name, "Click to open the report")
report_model_benchmark.set(bm.report)
report_model_benchmark.show()
eval_pbar.hide()

# ==================== Workflow output ====================
w.workflow_output(g.api, res_dir, template_vis_file)
w.workflow_output(g.api, res_dir, bm.report)
# =======================================================

sly.logger.info(
f"Predictions project: "
f" name {bm.dt_project_info.name}, "
f" workspace_id {bm.dt_project_info.workspace_id}. "
# f"Differences project: "
# f" name {bm.diff_project_info.name}, "
# f" workspace_id {bm.diff_project_info.workspace_id}"
f"Predictions project {bm.dt_project_info.name}, workspace ID: {bm.dt_project_info.workspace_id}."
)
sly.logger.info(f"Report link: {bm.get_report_link()}")

eval_button.loading = False

Expand All @@ -246,17 +241,21 @@ def set_selected_classes_and_show_info():


def update_eval_params():
if g.session is None:
g.session = SessionJSON(g.api, g.session_id)
g.session = SessionJSON(g.api, g.session_id)
task_type = g.session.get_deploy_info()["task_type"]
if task_type == sly.nn.TaskType.OBJECT_DETECTION:
params = sly.nn.benchmark.ObjectDetectionEvaluator.load_yaml_evaluation_params()
params = ObjectDetectionEvaluator.load_yaml_evaluation_params()
elif task_type == sly.nn.TaskType.INSTANCE_SEGMENTATION:
params = sly.nn.benchmark.InstanceSegmentationEvaluator.load_yaml_evaluation_params()
params = InstanceSegmentationEvaluator.load_yaml_evaluation_params()
elif task_type == sly.nn.TaskType.SEMANTIC_SEGMENTATION:
params = "# Semantic Segmentation evaluation parameters are not available yet."
params = ""
eval_params.set_text(params, language_mode="yaml")
eval_params_card.uncollapse()

if task_type == sly.nn.TaskType.SEMANTIC_SEGMENTATION:
eval_params_card.hide()
else:
eval_params_card.show()
eval_params_card.uncollapse()


def handle_selectors(active: bool):
Expand Down
Loading