Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgrade SDK version to 6.73.239. Bug fixes. Fixed compatibility with Onnx and TesortRT runtimes. #76

Merged
merged 12 commits into from
Nov 28, 2024
10 changes: 8 additions & 2 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,26 @@ ENV DEBIAN_FRONTEND=noninteractive
RUN apt update && apt install python3-pip -y
RUN apt-get install -y git

RUN pip3 install nvidia-cudnn-cu11==8.9.4.25 --no-cache-dir
RUN pip3 install --pre --extra-index-url https://pypi.nvidia.com/ tensorrt==9.0.1.post11.dev4 --no-cache-dir

RUN pip3 install networkx==2.8.8
RUN pip3 install torch==2.3.1 torchvision==0.18.1 --index-url https://download.pytorch.org/whl/cu121
RUN pip3 install protobuf==3.20.1
RUN pip3 install onnx==1.12.0

RUN apt-get install ffmpeg libgeos-dev libsm6 libxext6 libexiv2-dev libxrender-dev libboost-all-dev -y
RUN apt update && apt-get install ffmpeg libgeos-dev libsm6 libxext6 libexiv2-dev libxrender-dev libboost-all-dev -y

RUN pip3 install requests==2.28.2 urllib3==1.26.15
RUN pip3 install packaging==21.3
RUN pip3 install dill==0.3.8
RUN pip3 install ruamel.yaml==0.17.21

RUN pip3 install ultralytics==8.3.15
RUN python3 -m pip install supervisely[tracking,model-benchmark]==6.73.232
RUN pip3 install onnxruntime-gpu==1.18.0
RUN pip3 install dill==0.2.8.2 tf2onnx==1.16.1 skl2onnx==1.17.0
RUN pip3 uninstall -y nvidia-cudnn-cu11
RUN python3 -m pip install supervisely[tracking,model-benchmark]==6.73.239
RUN python3 -c "from supervisely.nn.tracker import *"

LABEL python_sdk_version=6.73.232
1 change: 1 addition & 0 deletions export_weights/config.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
{
"name": "Export YOLOv8 weights",
"type": "app",
"description": "to TorchScript and ONNX formats",
"categories": [
"neural network",
Expand Down
2 changes: 1 addition & 1 deletion serve/config.json
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
"deployed_nn"
],
"community_agent": false,
"docker_image": "supervisely/yolov8:1.0.35",
"docker_image": "supervisely/yolov8:1.0.38",
"instance_version": "6.12.5",
"entrypoint": "python3 -m uvicorn src.main:m.app --app-dir ./serve --host 0.0.0.0 --port 8000 --ws websockets",
"port": 8000,
Expand Down
14 changes: 12 additions & 2 deletions serve/src/yolov8.py
Original file line number Diff line number Diff line change
Expand Up @@ -578,7 +578,14 @@ def _load_runtime(self, weights_path: str, format: str, **kwargs):
self._dump_yaml_checkpoint_info(model, os.path.dirname(weights_path))
else:
exported_weights_path = weights_path
model = YOLO(exported_weights_path)

task_type_map = {
"object detection": "detect",
"instance segmentation": "segment",
"pose estimation": "pose",
}

model = YOLO(exported_weights_path, task=task_type_map[self.task_type])
return model

def _load_onnx(self, weights_path: str):
Expand Down Expand Up @@ -684,7 +691,10 @@ def parse_model_name(checkpoint_name: str):

def get_arch_from_model_name(model_name: str):
# yolov8n-det
p = r"yolov(\d+)"
if "11" in model_name:
p = r"yolo(\d+)"
else:
p = r"yolov(\d+)"
match = re.match(p, model_name.lower())
if match:
return f"YOLOv{match.group(1)}"
2 changes: 1 addition & 1 deletion train/config.json
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
"train"
],
"description": "Dashboard to configure, start and monitor YOLOv8 | v9 | v10 | v11 training",
"docker_image": "supervisely/yolov8:1.0.35",
"docker_image": "supervisely/yolov8:1.0.38",
"instance_version": "6.12.5",
"entrypoint": "python3 -m uvicorn src.main:app --app-dir ./train --host 0.0.0.0 --port 8000 --ws websockets",
"task_location": "workspace_tasks",
Expand Down
2 changes: 1 addition & 1 deletion train/dev_requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
supervisely[model-benchmark]==6.73.232
supervisely[model-benchmark]==6.73.239
13 changes: 3 additions & 10 deletions train/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -2217,9 +2217,6 @@ def get_image_infos_by_split(split: list):
sly.logger.info(
f"Predictions project name: {bm.dt_project_info.name}. Workspace_id: {bm.dt_project_info.workspace_id}"
)
sly.logger.info(
f"Differences project name: {bm.diff_project_info.name}. Workspace_id: {bm.diff_project_info.workspace_id}"
)
except Exception as e:
sly.logger.error(f"Model benchmark failed. {repr(e)}", exc_info=True)
creating_report_f.hide()
Expand All @@ -2228,8 +2225,6 @@ def get_image_infos_by_split(split: list):
try:
if bm.dt_project_info:
api.project.remove(bm.dt_project_info.id)
if bm.diff_project_info:
api.project.remove(bm.diff_project_info.id)
except Exception as e2:
pass
# ----------------------------------------------- - ---------------------------------------------- #
Expand Down Expand Up @@ -3171,6 +3166,8 @@ def get_image_infos_by_split(split: list):
dataset_name,
image_names,
) in image_names_per_dataset.items():
if "/" in dataset_name:
dataset_name = dataset_name.split("/")[-1]
ds_info = ds_infos_dict[dataset_name]
image_infos.extend(
api.image.get_list(
Expand Down Expand Up @@ -3236,6 +3233,7 @@ def get_image_infos_by_split(split: list):

# 4. Evaluate
bm._evaluate(gt_project_path, dt_project_path)
bm._dump_eval_inference_info(bm._eval_inference_info)

# 5. Upload evaluation results
eval_res_dir = get_eval_results_dir_name(
Expand Down Expand Up @@ -3266,9 +3264,6 @@ def get_image_infos_by_split(split: list):
sly.logger.info(
f"Predictions project name: {bm.dt_project_info.name}. Workspace_id: {bm.dt_project_info.workspace_id}"
)
sly.logger.info(
f"Differences project name: {bm.diff_project_info.name}. Workspace_id: {bm.diff_project_info.workspace_id}"
)
except Exception as e:
sly.logger.error(f"Model benchmark failed. {repr(e)}", exc_info=True)
creating_report_f.hide()
Expand All @@ -3277,8 +3272,6 @@ def get_image_infos_by_split(split: list):
try:
if bm.dt_project_info:
api.project.remove(bm.dt_project_info.id)
if bm.diff_project_info:
api.project.remove(bm.diff_project_info.id)
except Exception as e2:
pass
# ----------------------------------------------- - ---------------------------------------------- #
Expand Down