From 5fd1cece04a5d66159759b450d7e55bb52b47970 Mon Sep 17 00:00:00 2001 From: almaz Date: Wed, 11 Dec 2024 14:57:48 +0100 Subject: [PATCH 1/6] test sdk branch --- requirements.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 requirements.txt diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..ea4af99 --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +git+https://github.com/supervisely/supervisely.git@semsegm-bm-comparison From d8448aa7d73d98598a8eea038cbecf5d719b7a7e Mon Sep 17 00:00:00 2001 From: almaz Date: Wed, 11 Dec 2024 14:58:11 +0100 Subject: [PATCH 2/6] refactor comparison --- src/ui/compare.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/ui/compare.py b/src/ui/compare.py index fb69380..f22a99c 100644 --- a/src/ui/compare.py +++ b/src/ui/compare.py @@ -63,19 +63,19 @@ def run_compare(eval_dirs: List[str] = None): res_dir = f.get_res_dir(g.eval_dirs) res_dir = comp.upload_results(g.team_id, remote_dir=res_dir, progress=comp_pbar) - report = g.api.file.get_info_by_path(g.team_id, comp.get_report_link()) - g.api.task.set_output_report(g.task_id, report.id, report.name) + g.api.task.set_output_report(g.task_id, comp.lnk.id, comp.lnk.name, "Click to open the report") - models_comparison_report.set(report) + models_comparison_report.set(comp.report) models_comparison_report.show() # ==================== Workflow output ==================== - w.workflow_output(g.api, model_comparison_report=report) + w.workflow_output(g.api, model_comparison_report=comp.report) # ======================================================= comp_pbar.hide() compare_button.loading = False sly.logger.info(f"Model comparison report uploaded to: {res_dir}") + sly.logger.info(f"Report link: {comp.get_report_link()}") return res_dir From 5dfe28ef1e89696eb442af9cfb1122e2d15c71b0 Mon Sep 17 00:00:00 2001 From: almaz Date: Wed, 11 Dec 2024 14:58:18 +0100 Subject: [PATCH 3/6] refactor evaluation --- src/ui/evaluation.py | 126 +++++++++++++++++++++---------------------- 1 file changed, 62 insertions(+), 64 deletions(-) diff --git a/src/ui/evaluation.py b/src/ui/evaluation.py index 4e265d9..0390eb8 100644 --- a/src/ui/evaluation.py +++ b/src/ui/evaluation.py @@ -1,4 +1,4 @@ -from typing import Dict, Optional, Union +from typing import Dict, Optional, Tuple, Union import yaml @@ -20,6 +20,14 @@ SlyTqdm, Text, ) +from supervisely.nn.benchmark import ( + InstanceSegmentationBenchmark, + InstanceSegmentationEvaluator, + ObjectDetectionBenchmark, + ObjectDetectionEvaluator, + SemanticSegmentationBenchmark, + SemanticSegmentationEvaluator, +) from supervisely.nn.inference.session import SessionJSON no_classes_label = Text( @@ -78,6 +86,27 @@ ] ) +benchmark_cls_type = Union[ + ObjectDetectionBenchmark, InstanceSegmentationBenchmark, SemanticSegmentationBenchmark +] + +evaluator_cls_type = Union[ + ObjectDetectionEvaluator, InstanceSegmentationEvaluator, SemanticSegmentationEvaluator +] + + +def get_benchmark_and_evaluator_classes( + task_type: sly.nn.TaskType, +) -> Tuple[benchmark_cls_type, evaluator_cls_type]: + if task_type == sly.nn.TaskType.OBJECT_DETECTION: + return ObjectDetectionBenchmark, ObjectDetectionEvaluator + elif task_type == sly.nn.TaskType.INSTANCE_SEGMENTATION: + return (InstanceSegmentationBenchmark, InstanceSegmentationEvaluator) + elif task_type == sly.nn.TaskType.SEMANTIC_SEGMENTATION: + return (SemanticSegmentationBenchmark, SemanticSegmentationEvaluator) + else: + raise ValueError(f"Unknown task type: {task_type}") + @f.with_clean_up_progress(eval_pbar) def run_evaluation( @@ -87,10 +116,8 @@ def run_evaluation( ): work_dir = g.STORAGE_DIR + "/benchmark_" + sly.rand_str(6) - if session_id is not None: - g.session_id = session_id - if project_id is not None: - g.project_id = project_id + g.session_id = session_id or g.session_id + g.project_id = project_id or g.project_id project = g.api.project.get_info_by_id(g.project_id) if g.session is None: @@ -119,48 +146,23 @@ def run_evaluation( params = eval_params.get_value() or params if isinstance(params, str): + sly.Annotation.filter_labels_by_classes params = yaml.safe_load(params) - if task_type == sly.nn.TaskType.OBJECT_DETECTION: - if params is None: - params = sly.nn.benchmark.ObjectDetectionEvaluator.load_yaml_evaluation_params() - params = yaml.safe_load(params) - bm = sly.nn.benchmark.ObjectDetectionBenchmark( - g.api, - project.id, - gt_dataset_ids=dataset_ids, - output_dir=work_dir, - progress=eval_pbar, - progress_secondary=sec_eval_pbar, - classes_whitelist=g.selected_classes, - evaluation_params=params, - ) - elif task_type == sly.nn.TaskType.INSTANCE_SEGMENTATION: - if params is None: - params = sly.nn.benchmark.InstanceSegmentationEvaluator.load_yaml_evaluation_params() - params = yaml.safe_load(params) - bm = sly.nn.benchmark.InstanceSegmentationBenchmark( - g.api, - project.id, - gt_dataset_ids=dataset_ids, - output_dir=work_dir, - progress=eval_pbar, - progress_secondary=sec_eval_pbar, - classes_whitelist=g.selected_classes, - evaluation_params=params, - ) - elif task_type == sly.nn.TaskType.SEMANTIC_SEGMENTATION: - params = sly.nn.benchmark.SemanticSegmentationEvaluator.load_yaml_evaluation_params() - bm = sly.nn.benchmark.SemanticSegmentationBenchmark( - g.api, - project.id, - gt_dataset_ids=dataset_ids, - output_dir=work_dir, - progress=eval_pbar, - progress_secondary=sec_eval_pbar, - classes_whitelist=g.selected_classes, - evaluation_params=params, - ) + bm_cls, evaluator_cls = get_benchmark_and_evaluator_classes(task_type) + if params is None: + params = evaluator_cls.load_yaml_evaluation_params() + params = yaml.safe_load(params) + bm: benchmark_cls_type = bm_cls( + g.api, + project.id, + gt_dataset_ids=dataset_ids, + output_dir=work_dir, + progress=eval_pbar, + progress_secondary=sec_eval_pbar, + classes_whitelist=g.selected_classes, + evaluation_params=params, + ) bm.evaluator_app_info = g.api.task.get_info_by_id(g.task_id) sly.logger.info(f"{g.session_id = }") @@ -197,30 +199,21 @@ def run_evaluation( bm.visualize() bm.upload_eval_results(res_dir + "/evaluation/") - remote_dir = bm.upload_visualizations(res_dir + "/visualizations/") + bm.upload_visualizations(res_dir + "/visualizations/") - report = bm.upload_report_link(remote_dir) - g.api.task.set_output_report(g.task_id, report.id, report.name) - - template_vis_file = g.api.file.get_info_by_path( - sly.env.team_id(), res_dir + "/visualizations/template.vue" - ) - report_model_benchmark.set(template_vis_file) + g.api.task.set_output_report(g.task_id, bm.lnk.id, bm.lnk.name, "Click to open the report") + report_model_benchmark.set(bm.report) report_model_benchmark.show() eval_pbar.hide() # ==================== Workflow output ==================== - w.workflow_output(g.api, res_dir, template_vis_file) + w.workflow_output(g.api, res_dir, bm.report) # ======================================================= sly.logger.info( - f"Predictions project: " - f" name {bm.dt_project_info.name}, " - f" workspace_id {bm.dt_project_info.workspace_id}. " - # f"Differences project: " - # f" name {bm.diff_project_info.name}, " - # f" workspace_id {bm.diff_project_info.workspace_id}" + f"Predictions project {bm.dt_project_info.name}, workspace ID: {bm.dt_project_info.workspace_id}." ) + sly.logger.info(f"Report link: {bm.get_report_link()}") eval_button.loading = False @@ -250,13 +243,18 @@ def update_eval_params(): g.session = SessionJSON(g.api, g.session_id) task_type = g.session.get_deploy_info()["task_type"] if task_type == sly.nn.TaskType.OBJECT_DETECTION: - params = sly.nn.benchmark.ObjectDetectionEvaluator.load_yaml_evaluation_params() + params = ObjectDetectionEvaluator.load_yaml_evaluation_params() elif task_type == sly.nn.TaskType.INSTANCE_SEGMENTATION: - params = sly.nn.benchmark.InstanceSegmentationEvaluator.load_yaml_evaluation_params() + params = InstanceSegmentationEvaluator.load_yaml_evaluation_params() elif task_type == sly.nn.TaskType.SEMANTIC_SEGMENTATION: - params = "# Semantic Segmentation evaluation parameters are not available yet." + params = "" eval_params.set_text(params, language_mode="yaml") - eval_params_card.uncollapse() + + if task_type == sly.nn.TaskType.SEMANTIC_SEGMENTATION: + eval_params_card.hide() + else: + eval_params_card.show() + eval_params_card.uncollapse() def handle_selectors(active: bool): From d23d1199e83504ab83cf186505748830959a03a9 Mon Sep 17 00:00:00 2001 From: almaz Date: Mon, 16 Dec 2024 15:25:41 +0100 Subject: [PATCH 4/6] add dataset_ids argument (when running from script) --- local.env | 2 +- src/main.py | 5 ++++- src/ui/evaluation.py | 17 +++++++++-------- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/local.env b/local.env index abda3fa..66cb857 100644 --- a/local.env +++ b/local.env @@ -3,5 +3,5 @@ WORKSPACE_ID = 680 # PROJECT_ID = 41021 SLY_APP_DATA_DIR = "APP_DATA" -TASK_ID = 68257 +TASK_ID = 68088 # modal.state.sessionId=66693 \ No newline at end of file diff --git a/src/main.py b/src/main.py index a5b45fc..0d3f5cb 100644 --- a/src/main.py +++ b/src/main.py @@ -41,7 +41,10 @@ async def evaluate(request: Request): req = await request.json() try: state = req["state"] - return {"data": run_evaluation(state["session_id"], state["project_id"])} + session_id = state["session_id"] + project_id = state["project_id"] + dataset_ids = state.get("dataset_ids", None) + return {"data": run_evaluation(session_id, project_id, dataset_ids=dataset_ids)} except Exception as e: sly.logger.error(f"Error during model evaluation: {e}") return {"error": str(e)} diff --git a/src/ui/evaluation.py b/src/ui/evaluation.py index 0390eb8..986ae0e 100644 --- a/src/ui/evaluation.py +++ b/src/ui/evaluation.py @@ -113,6 +113,7 @@ def run_evaluation( session_id: Optional[int] = None, project_id: Optional[int] = None, params: Optional[Union[str, Dict]] = None, + dataset_ids: Optional[Tuple[int]] = None, ): work_dir = g.STORAGE_DIR + "/benchmark_" + sly.rand_str(6) @@ -124,12 +125,13 @@ def run_evaluation( g.session = SessionJSON(g.api, g.session_id) task_type = g.session.get_deploy_info()["task_type"] - if all_datasets_checkbox.is_checked(): - dataset_ids = None - else: - dataset_ids = sel_dataset.get_selected_ids() - if len(dataset_ids) == 0: - raise ValueError("No datasets selected") + if dataset_ids is None: + if all_datasets_checkbox.is_checked(): + dataset_ids = None + else: + dataset_ids = sel_dataset.get_selected_ids() + if len(dataset_ids) == 0: + raise ValueError("No datasets selected") # ==================== Workflow input ==================== w.workflow_input(g.api, project, g.session_id) @@ -239,8 +241,7 @@ def set_selected_classes_and_show_info(): def update_eval_params(): - if g.session is None: - g.session = SessionJSON(g.api, g.session_id) + g.session = SessionJSON(g.api, g.session_id) task_type = g.session.get_deploy_info()["task_type"] if task_type == sly.nn.TaskType.OBJECT_DETECTION: params = ObjectDetectionEvaluator.load_yaml_evaluation_params() From ec9e3cfc1cf054dda0190c8551f995825368c99e Mon Sep 17 00:00:00 2001 From: almaz Date: Mon, 16 Dec 2024 15:26:43 +0100 Subject: [PATCH 5/6] update dockerfile and requirements --- dev_requirements.txt | 2 +- docker/Dockerfile | 4 ++-- requirements.txt | 1 - 3 files changed, 3 insertions(+), 4 deletions(-) delete mode 100644 requirements.txt diff --git a/dev_requirements.txt b/dev_requirements.txt index ad88b26..5d79d23 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,5 +1,5 @@ # git+https://github.com/supervisely/supervisely.git@model-benchmark -supervisely[model-benchmark]==6.73.239 +supervisely[model-benchmark]==6.73.255 # torch==1.13.0 # torchvision==0.14.0 diff --git a/docker/Dockerfile b/docker/Dockerfile index 353b10a..b74ba71 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,5 +1,5 @@ FROM supervisely/base-py-sdk:6.73.208 -RUN python3 -m pip install supervisely[model-benchmark]==6.73.239 +RUN python3 -m pip install supervisely[model-benchmark]==6.73.255 -LABEL python_sdk_version=6.73.239 \ No newline at end of file +LABEL python_sdk_version=6.73.255 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index ea4af99..0000000 --- a/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -git+https://github.com/supervisely/supervisely.git@semsegm-bm-comparison From 030968f535bb28c4bae299292ad05cc14e41a792 Mon Sep 17 00:00:00 2001 From: almaz Date: Mon, 16 Dec 2024 15:50:07 +0100 Subject: [PATCH 6/6] Update docker image and instance version in config.json --- config.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config.json b/config.json index a21d141..f7a78b8 100644 --- a/config.json +++ b/config.json @@ -11,6 +11,6 @@ "task_location": "workspace_tasks", "entrypoint": "python -m uvicorn src.main:app --host 0.0.0.0 --port 8000", "port": 8000, - "docker_image": "supervisely/model-benchmark:1.0.16", - "instance_version": "6.12.5" + "docker_image": "supervisely/model-benchmark:1.0.17", + "instance_version": "6.12.12" }