Skip to content

Commit

Permalink
Upgrade SDK version to 6.73.239. Support for Semantic Segmentation. B…
Browse files Browse the repository at this point in the history
…ug fixes (#14)
  • Loading branch information
almazgimaev authored Nov 28, 2024
1 parent 1188b87 commit 49c41ea
Show file tree
Hide file tree
Showing 7 changed files with 85 additions and 61 deletions.
4 changes: 2 additions & 2 deletions config.json
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,6 @@
"task_location": "workspace_tasks",
"entrypoint": "python -m uvicorn src.main:app --host 0.0.0.0 --port 8000",
"port": 8000,
"docker_image": "supervisely/model-benchmark:1.0.15",
"instance_version": "6.11.19"
"docker_image": "supervisely/model-benchmark:1.0.16",
"instance_version": "6.12.5"
}
8 changes: 7 additions & 1 deletion dev_requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,8 @@
# git+https://github.com/supervisely/supervisely.git@model-benchmark
supervisely[model-benchmark]==6.73.215
supervisely[model-benchmark]==6.73.239

# torch==1.13.0
# torchvision==0.14.0
# torch==1.12.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113
# torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113
# cupy-cuda111
4 changes: 2 additions & 2 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
FROM supervisely/base-py-sdk:6.73.208

RUN python3 -m pip install supervisely[model-benchmark]==6.73.215
RUN python3 -m pip install supervisely[model-benchmark]==6.73.239

LABEL python_sdk_version=6.73.215
LABEL python_sdk_version=6.73.239
2 changes: 1 addition & 1 deletion local.env
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,5 @@ WORKSPACE_ID = 680
# PROJECT_ID = 41021
SLY_APP_DATA_DIR = "APP_DATA"

TASK_ID = 60447
TASK_ID = 68257
# modal.state.sessionId=66693
2 changes: 2 additions & 0 deletions src/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
geometry_to_task_type = {
TaskType.OBJECT_DETECTION: [sly.Rectangle, sly.AnyGeometry],
TaskType.INSTANCE_SEGMENTATION: [sly.Bitmap, sly.Polygon, sly.AnyGeometry],
TaskType.SEMANTIC_SEGMENTATION: [sly.Bitmap, sly.Polygon, sly.AnyGeometry],
}


Expand Down Expand Up @@ -98,6 +99,7 @@ def wrapper(*args, **kwargs):
finally:
with pbar(message="Application is started ...", total=1) as pb:
pb.update(1)
pbar.hide()

return wrapper

Expand Down
122 changes: 69 additions & 53 deletions src/ui/evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,59 +6,59 @@
import src.globals as g
import src.workflow as w
import supervisely as sly
import supervisely.app.widgets as widgets
from supervisely._utils import rand_str
from supervisely.nn import TaskType
from supervisely.nn.benchmark import (
InstanceSegmentationBenchmark,
ObjectDetectionBenchmark,
)
from supervisely.nn.benchmark.evaluation.instance_segmentation_evaluator import (
InstanceSegmentationEvaluator,
)
from supervisely.nn.benchmark.evaluation.object_detection_evaluator import (
ObjectDetectionEvaluator,
from supervisely.app.widgets import (
Button,
Card,
Checkbox,
Container,
Editor,
Progress,
ReportThumbnail,
SelectAppSession,
SelectDataset,
SelectProject,
SlyTqdm,
Text,
)
from supervisely.nn.inference.session import SessionJSON


no_classes_label = widgets.Text(
no_classes_label = Text(
"Not found any classes in the project that are present in the model", status="error"
)
no_classes_label.hide()
total_classes_text = widgets.Text(status="info")
selected_matched_text = widgets.Text(status="success")
not_matched_text = widgets.Text(status="warning")
total_classes_text = Text(status="info")
selected_matched_text = Text(status="success")
not_matched_text = Text(status="warning")

sel_app_session = widgets.SelectAppSession(g.team_id, tags=g.deployed_nn_tags, show_label=True)
sel_project = widgets.SelectProject(default_id=None, workspace_id=g.workspace_id)
sel_dataset = widgets.SelectDataset(multiselect=True, compact=True)
sel_app_session = SelectAppSession(g.team_id, tags=g.deployed_nn_tags, show_label=True)
sel_project = SelectProject(default_id=None, workspace_id=g.workspace_id)
sel_dataset = SelectDataset(multiselect=True, compact=True)
sel_dataset.hide()
all_datasets_checkbox = widgets.Checkbox("All datasets", checked=True)
all_datasets_checkbox = Checkbox("All datasets", checked=True)

eval_params = widgets.Editor(
eval_params = Editor(
initial_text=None,
language_mode="yaml",
height_lines=16,
)
eval_params_card = widgets.Card(
eval_params_card = Card(
title="Evaluation parameters",
content=eval_params,
collapsable=True,
)
eval_params_card.collapse()


eval_button = widgets.Button("Evaluate")
eval_button = Button("Evaluate")
eval_button.disable()

eval_pbar = widgets.SlyTqdm()
sec_eval_pbar = widgets.Progress("")
eval_pbar = SlyTqdm()
sec_eval_pbar = Progress("")

report_model_benchmark = widgets.ReportThumbnail()
report_model_benchmark = ReportThumbnail()
report_model_benchmark.hide()

evaluation_container = widgets.Container(
evaluation_container = Container(
[
sel_project,
all_datasets_checkbox,
Expand All @@ -83,7 +83,7 @@ def run_evaluation(
project_id: Optional[int] = None,
params: Optional[Union[str, Dict]] = None,
):
work_dir = g.STORAGE_DIR + "/benchmark_" + rand_str(6)
work_dir = g.STORAGE_DIR + "/benchmark_" + sly.rand_str(6)

if session_id is not None:
g.session_id = session_id
Expand All @@ -102,7 +102,6 @@ def run_evaluation(
if len(dataset_ids) == 0:
raise ValueError("No datasets selected")


# ==================== Workflow input ====================
w.workflow_input(g.api, project, g.session_id)
# =======================================================
Expand All @@ -116,38 +115,52 @@ def run_evaluation(
eval_pbar.show()
sec_eval_pbar.show()

evaluation_params = eval_params.get_value() or params
if isinstance(evaluation_params, str):
evaluation_params = yaml.safe_load(evaluation_params)
params = eval_params.get_value() or params
if isinstance(params, str):
params = yaml.safe_load(params)

if task_type == TaskType.OBJECT_DETECTION:
if evaluation_params is None:
evaluation_params = ObjectDetectionEvaluator.load_yaml_evaluation_params()
evaluation_params = yaml.safe_load(evaluation_params)
bm = ObjectDetectionBenchmark(
if task_type == sly.nn.TaskType.OBJECT_DETECTION:
if params is None:
params = sly.nn.benchmark.ObjectDetectionEvaluator.load_yaml_evaluation_params()
params = yaml.safe_load(params)
bm = sly.nn.benchmark.ObjectDetectionBenchmark(
g.api,
project.id,
gt_dataset_ids=dataset_ids,
output_dir=work_dir,
progress=eval_pbar,
progress_secondary=sec_eval_pbar,
classes_whitelist=g.selected_classes,
evaluation_params=params,
)
elif task_type == sly.nn.TaskType.INSTANCE_SEGMENTATION:
if params is None:
params = sly.nn.benchmark.InstanceSegmentationEvaluator.load_yaml_evaluation_params()
params = yaml.safe_load(params)
bm = sly.nn.benchmark.InstanceSegmentationBenchmark(
g.api,
project.id,
gt_dataset_ids=dataset_ids,
output_dir=work_dir,
progress=eval_pbar,
progress_secondary=sec_eval_pbar,
classes_whitelist=g.selected_classes,
evaluation_params=evaluation_params,
evaluation_params=params,
)
elif task_type == TaskType.INSTANCE_SEGMENTATION:
if evaluation_params is None:
evaluation_params = InstanceSegmentationEvaluator.load_yaml_evaluation_params()
evaluation_params = yaml.safe_load(evaluation_params)
bm = InstanceSegmentationBenchmark(
elif task_type == sly.nn.TaskType.SEMANTIC_SEGMENTATION:
params = sly.nn.benchmark.SemanticSegmentationEvaluator.load_yaml_evaluation_params()
bm = sly.nn.benchmark.SemanticSegmentationBenchmark(
g.api,
project.id,
gt_dataset_ids=dataset_ids,
output_dir=work_dir,
progress=eval_pbar,
progress_secondary=sec_eval_pbar,
classes_whitelist=g.selected_classes,
evaluation_params=evaluation_params,
evaluation_params=params,
)

bm.evaluator_app_info = g.api.task.get_info_by_id(g.task_id)
sly.logger.info(f"{g.session_id = }")

task_info = g.api.task.get_info_by_id(g.session_id)
Expand Down Expand Up @@ -201,9 +214,9 @@ def run_evaluation(
f"Predictions project: "
f" name {bm.dt_project_info.name}, "
f" workspace_id {bm.dt_project_info.workspace_id}. "
f"Differences project: "
f" name {bm.diff_project_info.name}, "
f" workspace_id {bm.diff_project_info.workspace_id}"
# f"Differences project: "
# f" name {bm.diff_project_info.name}, "
# f" workspace_id {bm.diff_project_info.workspace_id}"
)

eval_button.loading = False
Expand Down Expand Up @@ -233,10 +246,12 @@ def update_eval_params():
if g.session is None:
g.session = SessionJSON(g.api, g.session_id)
task_type = g.session.get_deploy_info()["task_type"]
if task_type == TaskType.OBJECT_DETECTION:
params = ObjectDetectionEvaluator.load_yaml_evaluation_params()
elif task_type == TaskType.INSTANCE_SEGMENTATION:
params = InstanceSegmentationEvaluator.load_yaml_evaluation_params()
if task_type == sly.nn.TaskType.OBJECT_DETECTION:
params = sly.nn.benchmark.ObjectDetectionEvaluator.load_yaml_evaluation_params()
elif task_type == sly.nn.TaskType.INSTANCE_SEGMENTATION:
params = sly.nn.benchmark.InstanceSegmentationEvaluator.load_yaml_evaluation_params()
elif task_type == sly.nn.TaskType.SEMANTIC_SEGMENTATION:
params = "# Semantic Segmentation evaluation parameters are not available yet."
eval_params.set_text(params, language_mode="yaml")
eval_params_card.uncollapse()

Expand Down Expand Up @@ -269,9 +284,10 @@ def handle_sel_app_session(session_id: Optional[int]):
if g.session_id:
update_eval_params()


@all_datasets_checkbox.value_changed
def handle_all_datasets_checkbox(checked: bool):
if checked:
sel_dataset.hide()
else:
sel_dataset.show()
sel_dataset.show()
4 changes: 2 additions & 2 deletions src/workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def workflow_output(

# Add output model benchmark report to the workflow
mb_relation_settings = sly.WorkflowSettings(
title="Model Evaluation",
title="Model Benchmark",
icon="assignment",
icon_color="#dcb0ff",
icon_bg_color="#faebff",
Expand All @@ -97,7 +97,7 @@ def workflow_output(
try:
# Add output model benchmark report to the workflow
comparison_relation_settings = sly.WorkflowSettings(
title="Model Evaluation",
title="Model Comparison",
icon="assignment",
icon_color="#ffc084",
icon_bg_color="#fff2e6",
Expand Down

0 comments on commit 49c41ea

Please sign in to comment.