Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: ml model path #890

Merged
merged 8 commits into from
Sep 29, 2023
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
<arg name="use_validator" default="true" description="use obstacle_pointcloud based validator"/>
<arg name="lidar_detection_score_threshold" default="0.35"/>
<arg name="centerpoint_model_name" default="centerpoint_tiny"/>
<arg name="centerpoint_model_path" default="$(find-pkg-share lidar_centerpoint)/data"/>
<arg name="centerpoint_model_path" default="$(var data_path)/lidar_centerpoint"/>
<arg name="lidar_model_param_path" default="$(find-pkg-share lidar_centerpoint)/config"/>
<arg name="objects_validation_method" default="obstacle_pointcloud"/>
<arg name="objects_filter_method" default="lanelet_filter"/>
Expand Down
7 changes: 4 additions & 3 deletions launch/tier4_perception_launch/launch/perception.launch.xml
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,12 @@

<!-- CenterPoint model parameters -->
<arg name="centerpoint_model_name" default="centerpoint_tiny" description="options: `centerpoint` or `centerpoint_tiny`"/>
<arg name="centerpoint_model_path" default="$(find-pkg-share lidar_centerpoint)/data"/>
<arg name="centerpoint_model_path" default="$(var data_path)/lidar_centerpoint"/>

<!-- Common parameters -->
<arg name="input/pointcloud" default="/sensing/lidar/concatenated/pointcloud" description="The topic will be used in the detection module"/>
<arg name="mode" default="camera_lidar_fusion" description="options: `camera_lidar_radar_fusion`, `camera_lidar_fusion`, `lidar_radar_fusion`, `lidar` or `radar`"/>
<arg name="data_path" default="$(env HOME)/autoware_data" description="packages data and artifacts directory path"/>
<arg name="lidar_detection_model" default="centerpoint" description="options: `centerpoint`, `apollo`, `pointpainting`, `clustering`"/>
<arg name="image_raw0" default="/sensing/camera/camera0/image_rect_color" description="image raw topic name"/>
<arg name="camera_info0" default="/sensing/camera/camera0/camera_info" description="camera info topic name"/>
Expand Down Expand Up @@ -78,11 +79,11 @@
<arg name="traffic_light_image_number" default="1" description="choose traffic light image raw number(1-2)"/>
<arg
name="traffic_light_fine_detector_model_path"
default="$(find-pkg-share traffic_light_fine_detector)/data"
default="$(var data_path)/traffic_light_fine_detector"
description="options: `tlr_yolox_s_batch_**`. The batch number must be either one of 1, 4, 6"
/>
<arg name="traffic_light_fine_detector_model_name" default="tlr_yolox_s_batch_6" description="options: `tlr_yolox_s_batch_**`. The batch number must be either one of 1, 4, 6"/>
<arg name="traffic_light_classifier_model_path" default="$(find-pkg-share traffic_light_classifier)/data" description="classifier onnx model path"/>
<arg name="traffic_light_classifier_model_path" default="$(var data_path)/traffic_light_classifier" description="classifier onnx model path"/>
<arg
name="traffic_light_classifier_model_name"
default="traffic_light_classifier_mobilenetv2_batch_6"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,9 @@
<arg name="input/camera_info7" default="/camera_info7"/>
<arg name="input/pointcloud" default="/sensing/lidar/top/rectified/pointcloud"/>
<arg name="output/objects" default="objects"/>
<arg name="data_path" default="$(env HOME)/autoware_data" description="packages data and artifacts directory path"/>
<arg name="model_name" default="pointpainting" description="options: `pointpainting`"/>
<arg name="model_path" default="$(find-pkg-share image_projection_based_fusion)/data"/>
<arg name="model_path" default="$(var data_path)/image_projection_based_fusion"/>
<arg name="model_param_path" default="$(find-pkg-share image_projection_based_fusion)/config/$(var model_name).param.yaml"/>
<arg name="class_remapper_param_path" default="$(find-pkg-share lidar_centerpoint)/config/detection_class_remapper.param.yaml"/>
<arg name="sync_param_path" default="$(find-pkg-share image_projection_based_fusion)/config/roi_sync.param.yaml"/>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
<arg name="score_threshold" default="0.35"/>
<arg name="rviz_config1" default="$(var rviz_path)/centerpoint_tiny.rviz" description="rviz config"/>
<arg name="rviz_config2" default="$(var rviz_path)/centerpoint.rviz" description="rviz config"/>
<arg name="data_path" default="$(env HOME)/autoware_data" description="packages data and artifacts directory path"/>

<set_parameter name="use_sim_time" value="true"/>
<group>
Expand All @@ -21,7 +22,7 @@
<arg name="input/pointcloud" value="$(var input/pointcloud)"/>
<arg name="score_threshold" value="$(var score_threshold)"/>
<arg name="model_name" value="centerpoint"/>
<arg name="model_path" value="$(find-pkg-share lidar_centerpoint)/data"/>
<arg name="model_path" value="$(var data_path)/lidar_centerpoint"/>
<arg name="model_param_path" value="$(find-pkg-share lidar_centerpoint)/config/$(var model_name).param.yaml"/>
</include>
</group>
Expand All @@ -35,7 +36,7 @@
<arg name="input/pointcloud" value="$(var input/pointcloud)"/>
<arg name="score_threshold" value="$(var score_threshold)"/>
<arg name="model_name" value="centerpoint_tiny"/>
<arg name="model_path" value="$(find-pkg-share lidar_centerpoint)/data"/>
<arg name="model_path" value="$(var data_path)/lidar_centerpoint"/>
<arg name="model_param_path" value="$(find-pkg-share lidar_centerpoint)/config/$(var model_name).param.yaml"/>
</include>
</group>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,9 @@
<launch>
<arg name="input/pointcloud" default="/sensing/lidar/pointcloud"/>
<arg name="output/objects" default="objects"/>
<arg name="data_path" default="$(env HOME)/autoware_data" description="packages data and artifacts directory path"/>
<arg name="model_name" default="centerpoint_tiny" description="options: `centerpoint` or `centerpoint_tiny`"/>
<arg name="model_path" default="$(find-pkg-share lidar_centerpoint)/data"/>
<arg name="model_path" default="$(var data_path)/lidar_centerpoint"/>
<arg name="model_param_path" default="$(find-pkg-share lidar_centerpoint)/config/$(var model_name).param.yaml"/>
<arg name="class_remapper_param_path" default="$(find-pkg-share lidar_centerpoint)/config/detection_class_remapper.param.yaml"/>
<arg name="score_threshold" default="0.35"/>
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
<?xml version="1.0"?>
<launch>
<arg name="data_path" default="$(env HOME)/autoware_data" description="packages data and artifacts directory path"/>
<arg name="model_name" default="centerpoint_tiny" description="options: `centerpoint` or `centerpoint_tiny`"/>
<arg name="model_path" default="$(find-pkg-share lidar_centerpoint)/data"/>
<arg name="model_path" default="$(var data_path)/lidar_centerpoint"/>
<arg name="model_param_path" default="$(find-pkg-share lidar_centerpoint)/config/$(var model_name).param.yaml"/>
<arg name="class_remapper_param_path" default="$(find-pkg-share lidar_centerpoint)/config/detection_class_remapper.param.yaml"/>
<arg name="score_threshold" default="0.35"/>
Expand Down
3 changes: 2 additions & 1 deletion perception/tensorrt_yolo/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ Jocher, G., et al. (2021). ultralytics/yolov5: v6.0 - YOLOv5n 'Nano' models, Rob

| Name | Type | Default Value | Description |
| ----------------------- | ------ | ------------- | ------------------------------------------------------------------ |
| `data_path` | string | "" | Packages data and artifacts directory path |
| `onnx_file` | string | "" | The onnx file name for yolo model |
| `engine_file` | string | "" | The tensorrt engine file name for yolo model |
| `label_file` | string | "" | The label file with label names for detected objects written on it |
Expand All @@ -71,7 +72,7 @@ This package includes multiple licenses.

All YOLO ONNX models are converted from the officially trained model. If you need information about training datasets and conditions, please refer to the official repositories.

All models are downloaded automatically when building. When launching the node with a model for the first time, the model is automatically converted to TensorRT, although this may take some time.
All models are downloaded during env preparation by ansible (as mention in [installation](https://autowarefoundation.github.io/autoware-documentation/main/installation/autoware/source-installation/)). It is also possible to download them manually, see [Manual downloading of artifacts](https://github.com/autowarefoundation/autoware/tree/main/ansible/roles/artifacts) . When launching the node with a model for the first time, the model is automatically converted to TensorRT, although this may take some time.

### YOLOv3

Expand Down
9 changes: 5 additions & 4 deletions perception/tensorrt_yolo/launch/tensorrt_yolo.launch.xml
Original file line number Diff line number Diff line change
Expand Up @@ -3,19 +3,20 @@
<arg name="label_file" default="coco.names"/>
<arg name="input_topic" default="/image_raw"/>
<arg name="output_topic" default="rois"/>
<arg name="engine_file" default="$(find-pkg-share tensorrt_yolo)/data/$(var yolo_type).engine"/>
<arg name="data_path" default="$(env HOME)/autoware_data" description="packages data and artifacts directory path"/>
<arg name="engine_file" default="$(var data_path)/tensorrt_yolo/$(var yolo_type).engine"/>
<arg name="calib_image_directory" default="$(find-pkg-share tensorrt_yolo)/calib_image/"/>
<arg name="mode" default="FP32"/>
<arg name="gpu_id" default="0"/>
<node pkg="tensorrt_yolo" exec="tensorrt_yolo_node" name="$(anon tensorrt_yolo)" output="screen">
<remap from="in/image" to="$(var input_topic)"/>
<remap from="out/objects" to="$(var output_topic)"/>
<remap from="out/image" to="$(var output_topic)/debug/image"/>
<param name="onnx_file" type="str" value="$(find-pkg-share tensorrt_yolo)/data/$(var yolo_type).onnx"/>
<param name="onnx_file" type="str" value="$(var data_path)/tensorrt_yolo/$(var yolo_type).onnx"/>
<param name="engine_file" type="str" value="$(var engine_file)"/>
<param name="label_file" type="str" value="$(find-pkg-share tensorrt_yolo)/data/$(var label_file)"/>
<param name="label_file" type="str" value="$(var data_path)/tensorrt_yolo/$(var label_file)"/>
<param name="calib_image_directory" type="str" value="$(var calib_image_directory)"/>
<param name="calib_cache_file" type="str" value="$(find-pkg-share tensorrt_yolo)/data/$(var yolo_type).cache"/>
<param name="calib_cache_file" type="str" value="$(var data_path)/tensorrt_yolo/$(var yolo_type).cache"/>
<param name="mode" type="str" value="$(var mode)"/>
<param name="gpu_id" type="int" value="$(var gpu_id)"/>
<param from="$(find-pkg-share tensorrt_yolo)/config/$(var yolo_type).param.yaml"/>
Expand Down
4 changes: 2 additions & 2 deletions perception/tensorrt_yolox/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ those are labeled as `UNKNOWN`, while detected rectangles are drawn in the visua

## Onnx model

A sample model (named `yolox-tiny.onnx`) is downloaded automatically during the build process.
A sample model (named `yolox-tiny.onnx`) is downloaded by ansible script on env preparation stage, if not, please, follow [Manual downloading of artifacts](https://github.com/autowarefoundation/autoware/tree/main/ansible/roles/artifacts).
To accelerate Non-maximum-suppression (NMS), which is one of the common post-process after object detection inference,
`EfficientNMS_TRT` module is attached after the ordinal YOLOX (tiny) network.
The `EfficientNMS_TRT` module contains fixed values for `score_threshold` and `nms_threshold` in it,
Expand Down Expand Up @@ -146,7 +146,7 @@ Please refer [the official document](https://github.com/Megvii-BaseDetection/YOL

## Label file

A sample label file (named `label.txt`)is also downloaded automatically during the build process
A sample label file (named `label.txt`)is also downloaded automatically during env preparation process
(**NOTE:** This file is incompatible with models that output labels for the COCO dataset (e.g., models from the official YOLOX repository)).

This file represents the correspondence between class index (integer outputted from YOLOX network) and
Expand Down
3 changes: 2 additions & 1 deletion perception/tensorrt_yolox/launch/yolox_s_plus_opt.launch.xml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
<arg name="input/image" default="/sensing/camera/camera0/image_rect_color"/>
<arg name="output/objects" default="/perception/object_recognition/detection/rois0"/>
<arg name="model_name" default="yolox-sPlus-T4-960x960-pseudo-finetune"/>
<arg name="model_path" default="$(find-pkg-share tensorrt_yolox)/data"/>
<arg name="data_path" default="$(env HOME)/autoware_data" description="packages data and artifacts directory path"/>
<arg name="model_path" default="$(var data_path)/tensorrt_yolox"/>
<arg name="score_threshold" default="0.35"/>
<arg name="nms_threshold" default="0.7"/>
<arg name="precision" default="int8" description="operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]"/>
Expand Down
3 changes: 2 additions & 1 deletion perception/tensorrt_yolox/launch/yolox_tiny.launch.xml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@
<arg name="input/image" default="/sensing/camera/camera0/image_rect_color"/>
<arg name="output/objects" default="/perception/object_recognition/detection/rois0"/>
<arg name="model_name" default="yolox-tiny"/>
<arg name="model_path" default="$(find-pkg-share tensorrt_yolox)/data"/>
<arg name="data_path" default="$(env HOME)/autoware_data" description="packages data and artifacts directory path"/>
<arg name="model_path" default="$(var data_path)/tensorrt_yolox"/>
<arg name="score_threshold" default="0.35"/>
<arg name="nms_threshold" default="0.7"/>
<arg name="precision" default="fp16" description="operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]"/>
Expand Down
1 change: 1 addition & 0 deletions perception/traffic_light_classifier/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
| Name | Type | Description |
| ----------------- | ---- | ------------------------------------------- |
| `classifier_type` | int | if the value is `1`, cnn_classifier is used |
| `data_path` | str | packages data and artifacts directory path |

### Core Parameters

Expand Down Expand Up @@ -147,7 +148,7 @@

#### Prerequisites

**Step 1.** Download and install Miniconda from the [official website](https://mmpretrain.readthedocs.io/en/latest/get_started.html).

Check warning on line 151 in perception/traffic_light_classifier/README.md

View workflow job for this annotation

GitHub Actions / spell-check-differential

Unknown word (Miniconda)

**Step 2.** Create a conda virtual environment and activate it

Expand All @@ -164,9 +165,9 @@
conda install pytorch==1.13.1 torchvision==0.14.1 pytorch-cuda=11.6 -c pytorch -c nvidia
```

#### Install mmlab/mmpretrain

Check warning on line 168 in perception/traffic_light_classifier/README.md

View workflow job for this annotation

GitHub Actions / spell-check-differential

Unknown word (mmpretrain)

**Step 1.** Install mmpretrain from source

Check warning on line 170 in perception/traffic_light_classifier/README.md

View workflow job for this annotation

GitHub Actions / spell-check-differential

Unknown word (mmpretrain)

```bash
cd ~/
Expand All @@ -177,7 +178,7 @@

### Training

MMPretrain offers a training script that is controlled through a configuration file.

Check warning on line 181 in perception/traffic_light_classifier/README.md

View workflow job for this annotation

GitHub Actions / spell-check-differential

Unknown word (Pretrain)
Leveraging an inheritance design pattern, you can effortlessly tailor the training script
using Python files as configuration files.

Expand Down Expand Up @@ -292,13 +293,13 @@
python tools/train.py configs/mobilenet_v2/mobilenet-v2_8xb32_custom.py
```

Training logs and weights will be saved in the `work_dirs/mobilenet-v2_8xb32_custom` folder.

Check warning on line 296 in perception/traffic_light_classifier/README.md

View workflow job for this annotation

GitHub Actions / spell-check-differential

Unknown word (mobilenet)

### Convert PyTorh model to ONNX model

Check warning on line 298 in perception/traffic_light_classifier/README.md

View workflow job for this annotation

GitHub Actions / spell-check-differential

Unknown word (Torh)

#### Install mmdeploy

Check warning on line 300 in perception/traffic_light_classifier/README.md

View workflow job for this annotation

GitHub Actions / spell-check-differential

Unknown word (mmdeploy)

The 'mmdeploy' toolset is designed for deploying your trained model onto various target devices.

Check warning on line 302 in perception/traffic_light_classifier/README.md

View workflow job for this annotation

GitHub Actions / spell-check-differential

Unknown word (mmdeploy)

Check warning on line 302 in perception/traffic_light_classifier/README.md

View workflow job for this annotation

GitHub Actions / spell-check-differential

Unknown word (toolset)
With its capabilities, you can seamlessly convert PyTorch models into the ONNX format.

```bash
Expand Down Expand Up @@ -338,7 +339,7 @@
--work-dir mmdeploy_model/mobilenet_v2
```

Converted ONNX model will be saved in the `mmdeploy/mmdeploy_model/mobilenet_v2` folder.

Check warning on line 342 in perception/traffic_light_classifier/README.md

View workflow job for this annotation

GitHub Actions / spell-check-differential

Unknown word (mmdeploy)

After obtaining your onnx model, update parameters defined in the launch file (e.g. `model_file_path`, `label_file_path`, `input_h`, `input_w`...).
Note that, we only support labels defined in [tier4_perception_msgs::msg::TrafficLightElement](https://github.com/tier4/tier4_autoware_msgs/blob/tier4/universe/tier4_perception_msgs/msg/traffic_light/TrafficLightElement.msg).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,9 @@
<arg name="input/image" default="~/image_raw"/>
<arg name="input/rois" default="~/rois"/>
<arg name="output/traffic_signals" default="classified/traffic_signals"/>
<arg name="classifier_label_path" default="$(find-pkg-share traffic_light_classifier)/data/lamp_labels.txt" description="classifier label path"/>
<arg name="classifier_model_path" default="$(find-pkg-share traffic_light_classifier)/data/traffic_light_classifier_mobilenetv2_batch_6.onnx" description="classifier onnx model path"/>
<arg name="data_path" default="$(env HOME)/autoware_data" description="packages data and artifacts directory path"/>
<arg name="classifier_label_path" default="$(var data_path)/traffic_light_classifier/lamp_labels.txt" description="classifier label path"/>
<arg name="classifier_model_path" default="$(var data_path)/traffic_light_classifier/traffic_light_classifier_mobilenetv2_batch_6.onnx" description="classifier onnx model path"/>
<arg name="classifier_precision" default="fp16"/>

<arg name="use_gpu" default="true"/>
Expand Down
13 changes: 7 additions & 6 deletions perception/traffic_light_fine_detector/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,12 +50,13 @@ Based on the camera image and the global ROI array detected by `map_based_detect

### Node Parameters

| Name | Type | Default Value | Description |
| -------------------------- | ------ | ------------- | ------------------------------------------------------------------ |
| `fine_detector_model_path` | string | "" | The onnx file name for yolo model |
| `fine_detector_label_path` | string | "" | The label file with label names for detected objects written on it |
| `fine_detector_precision` | string | "fp32" | The inference mode: "fp32", "fp16" |
| `approximate_sync` | bool | false | Flag for whether to ues approximate sync policy |
| Name | Type | Default Value | Description |
| -------------------------- | ------ | --------------------------- | ------------------------------------------------------------------ |
| `data_path` | string | "$(env HOME)/autoware_data" | packages data and artifacts directory path |
| `fine_detector_model_path` | string | "" | The onnx file name for yolo model |
| `fine_detector_label_path` | string | "" | The label file with label names for detected objects written on it |
| `fine_detector_precision` | string | "fp32" | The inference mode: "fp32", "fp16" |
| `approximate_sync` | bool | false | Flag for whether to ues approximate sync policy |

## Assumptions / Known limits

Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
<launch>
<arg name="fine_detector_label_path" default="$(find-pkg-share traffic_light_fine_detector)/data/tlr_labels.txt" description="fine detector label path"/>
<arg name="fine_detector_model_path" default="$(find-pkg-share traffic_light_fine_detector)/data/tlr_yolox_s_batch_6.onnx" description="fine detector onnx model path"/>
<arg name="data_path" default="$(env HOME)/autoware_data" description="packages data and artifacts directory path"/>
<arg name="fine_detector_label_path" default="$(var data_path)/traffic_light_fine_detector/tlr_labels.txt" description="fine detector label path"/>
<arg name="fine_detector_model_path" default="$(var data_path)/traffic_light_fine_detector/tlr_yolox_s_batch_6.onnx" description="fine detector onnx model path"/>
<arg name="fine_detector_precision" default="fp16"/>
<arg name="fine_detector_score_thresh" default="0.3"/>
<arg name="fine_detector_nms_thresh" default="0.65"/>
Expand Down
Loading
Loading