Skip to content

Commit

Permalink
chore(tensorrt_yolox): rework parameters (#6239)
Browse files Browse the repository at this point in the history
* chore: use config

Signed-off-by: tzhong518 <[email protected]>

* style(pre-commit): autofix

* fix: rename

Signed-off-by: tzhong518 <[email protected]>

* fix: add json schema

Signed-off-by: tzhong518 <[email protected]>

* style(pre-commit): autofix

* fix: add comment to param.yaml

Signed-off-by: tzhong518 <[email protected]>

---------

Signed-off-by: tzhong518 <[email protected]>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: kminoda <[email protected]>
  • Loading branch information
3 people authored Feb 26, 2024
1 parent f95c450 commit 754742e
Show file tree
Hide file tree
Showing 7 changed files with 249 additions and 74 deletions.
1 change: 1 addition & 0 deletions perception/tensorrt_yolox/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -114,4 +114,5 @@ endif()

ament_auto_package(INSTALL_TO_SHARE
launch
config
)
15 changes: 15 additions & 0 deletions perception/tensorrt_yolox/config/yolox_s_plus_opt.param.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
/**:
ros__parameters:
model_path: "$(var data_path)/tensorrt_yolox/$(var model_name).onnx"
label_path: "$(var data_path)/tensorrt_yolox/label.txt"
score_threshold: 0.35
nms_threshold: 0.7
precision: "int8" # Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8].
calibration_algorithm: "Entropy" # Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax].
dla_core_id: -1 # If positive ID value is specified, the node assign inference task to the DLA core.
quantize_first_layer: false # If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8.
quantize_last_layer: false # If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8.
profile_per_layer: false # If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose.
clip_value: 6.0 # If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration.
preprocess_on_gpu: true # If true, pre-processing is performed on GPU.
calibration_image_list_path: "" # Path to a file which contains path to images. Those images will be used for int8 quantization.
15 changes: 15 additions & 0 deletions perception/tensorrt_yolox/config/yolox_tiny.param.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
/**:
ros__parameters:
model_path: "$(var data_path)/tensorrt_yolox/$(var model_name).onnx"
label_path: "$(var data_path)/tensorrt_yolox/label.txt"
score_threshold: 0.35
nms_threshold: 0.7
precision: "fp16" # Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8].
calibration_algorithm: "MinMax" # Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax].
dla_core_id: -1 # If positive ID value is specified, the node assign inference task to the DLA core.
quantize_first_layer: false # If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8.
quantize_last_layer: false # If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8.
profile_per_layer: false # If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose.
clip_value: 0.0 # If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration.
preprocess_on_gpu: true # If true, pre-processing is performed on GPU.
calibration_image_list_path: "" # Path to a file which contains path to images. Those images will be used for int8 quantization.
39 changes: 2 additions & 37 deletions perception/tensorrt_yolox/launch/yolox_s_plus_opt.launch.xml
Original file line number Diff line number Diff line change
Expand Up @@ -5,30 +5,7 @@
<arg name="output/objects" default="/perception/object_recognition/detection/rois0"/>
<arg name="model_name" default="yolox-sPlus-T4-960x960-pseudo-finetune"/>
<arg name="data_path" default="$(env HOME)/autoware_data" description="packages data and artifacts directory path"/>
<arg name="model_path" default="$(var data_path)/tensorrt_yolox"/>
<arg name="score_threshold" default="0.35"/>
<arg name="nms_threshold" default="0.7"/>
<arg name="precision" default="int8" description="operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]"/>
<arg
name="calibration_algorithm"
default="Entropy"
description="Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]"
/>
<arg name="dla_core_id" default="-1" description="If positive ID value is specified, the node assign inference task to the DLA core"/>
<arg name="quantize_first_layer" default="false" description="If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8"/>
<arg name="quantize_last_layer" default="false" description="If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8"/>
<arg
name="profile_per_layer"
default="false"
description="If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
/>
<arg
name="clip_value"
default="6.0"
description="If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
/>
<arg name="preprocess_on_gpu" default="true" description="If true, pre-processing is performed on GPU"/>
<arg name="calibration_image_list_path" default="" description="Path to a file which contains path to images. Those images will be used for int8 quantization."/>
<arg name="yolox_param_path" default="$(find-pkg-share tensorrt_yolox)/config/yolox_s_plus_opt.param.yaml"/>
<arg name="use_decompress" default="true" description="use image decompress"/>
<arg name="build_only" default="false" description="exit after trt engine is built"/>

Expand All @@ -40,19 +17,7 @@
<node pkg="tensorrt_yolox" exec="tensorrt_yolox_node_exe" name="tensorrt_yolox" output="screen">
<remap from="~/in/image" to="$(var input/image)"/>
<remap from="~/out/objects" to="$(var output/objects)"/>
<param name="score_threshold" value="$(var score_threshold)"/>
<param name="nms_threshold" value="$(var nms_threshold)"/>
<param name="model_path" value="$(var model_path)/$(var model_name).onnx"/>
<param name="label_path" value="$(var model_path)/label.txt"/>
<param name="precision" value="$(var precision)"/>
<param name="calibration_algorithm" value="$(var calibration_algorithm)"/>
<param name="dla_core_id" value="$(var dla_core_id)"/>
<param name="quantize_first_layer" value="$(var quantize_first_layer)"/>
<param name="quantize_last_layer" value="$(var quantize_last_layer)"/>
<param name="profile_per_layer" value="$(var profile_per_layer)"/>
<param name="clip_value" value="$(var clip_value)"/>
<param name="preprocess_on_gpu" value="$(var preprocess_on_gpu)"/>
<param name="calibration_image_list_path" value="$(var calibration_image_list_path)"/>
<param from="$(var yolox_param_path)" allow_substs="true"/>
<param name="build_only" value="$(var build_only)"/>
</node>
</launch>
39 changes: 2 additions & 37 deletions perception/tensorrt_yolox/launch/yolox_tiny.launch.xml
Original file line number Diff line number Diff line change
Expand Up @@ -4,30 +4,7 @@
<arg name="output/objects" default="/perception/object_recognition/detection/rois0"/>
<arg name="model_name" default="yolox-tiny"/>
<arg name="data_path" default="$(env HOME)/autoware_data" description="packages data and artifacts directory path"/>
<arg name="model_path" default="$(var data_path)/tensorrt_yolox"/>
<arg name="score_threshold" default="0.35"/>
<arg name="nms_threshold" default="0.7"/>
<arg name="precision" default="fp16" description="operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]"/>
<arg
name="calibration_algorithm"
default="MinMax"
description="Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]"
/>
<arg name="dla_core_id" default="-1" description="If positive ID value is specified, the node assign inference task to the DLA core"/>
<arg name="quantize_first_layer" default="false" description="If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8"/>
<arg name="quantize_last_layer" default="false" description="If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8"/>
<arg
name="profile_per_layer"
default="false"
description="If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
/>
<arg
name="clip_value"
default="0.0"
description="If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
/>
<arg name="preprocess_on_gpu" default="true" description="If true, pre-processing is performed on GPU"/>
<arg name="calibration_image_list_path" default="" description="Path to a file which contains path to images. Those images will be used for int8 quantization."/>
<arg name="yolox_param_path" default="$(find-pkg-share tensorrt_yolox)/config/yolox_tiny.param.yaml"/>
<arg name="use_decompress" default="true" description="use image decompress"/>
<arg name="build_only" default="false" description="exit after trt engine is built"/>

Expand All @@ -39,19 +16,7 @@
<node pkg="tensorrt_yolox" exec="tensorrt_yolox_node_exe" name="tensorrt_yolox" output="screen">
<remap from="~/in/image" to="$(var input/image)"/>
<remap from="~/out/objects" to="$(var output/objects)"/>
<param name="score_threshold" value="$(var score_threshold)"/>
<param name="nms_threshold" value="$(var nms_threshold)"/>
<param name="model_path" value="$(var model_path)/$(var model_name).onnx"/>
<param name="label_path" value="$(var model_path)/label.txt"/>
<param name="precision" value="$(var precision)"/>
<param name="calibration_algorithm" value="$(var calibration_algorithm)"/>
<param name="dla_core_id" value="$(var dla_core_id)"/>
<param name="quantize_first_layer" value="$(var quantize_first_layer)"/>
<param name="quantize_last_layer" value="$(var quantize_last_layer)"/>
<param name="profile_per_layer" value="$(var profile_per_layer)"/>
<param name="clip_value" value="$(var clip_value)"/>
<param name="preprocess_on_gpu" value="$(var preprocess_on_gpu)"/>
<param name="calibration_image_list_path" value="$(var calibration_image_list_path)"/>
<param from="$(var yolox_param_path)" allow_substs="true"/>
<param name="build_only" value="$(var build_only)"/>
</node>
</launch>
107 changes: 107 additions & 0 deletions perception/tensorrt_yolox/schema/yolox_s_plus_opt.schema.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Parameters for tensorrt_yolox_s_plus_opt Nodes",
"type": "object",
"definitions": {
"yolox_s_plus_opt": {
"type": "object",
"properties": {
"model_path": {
"type": "string",
"default": "$(var data_path)/tensorrt_yolox/$(var model_name).onnx",
"description": "Path to onnx model."
},
"label_path": {
"type": "string",
"default": "$(var data_path)/tensorrt_yolox/label.txt",
"description": "Path to label file."
},
"score_threshold": {
"type": "number",
"default": 0.35,
"minimum": 0.0,
"maximum": 1.0,
"description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored."
},
"nms_threshold": {
"type": "number",
"default": 0.7,
"minimum": 0.0,
"maximum": 1.0,
"description": "A threshold value of NMS."
},
"precision": {
"type": "string",
"default": "int8",
"description": "Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]."
},
"calibration_algorithm": {
"type": "string",
"default": "Entropy",
"description": "Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]."
},
"dla_core_id": {
"type": "number",
"default": -1,
"description": "If positive ID value is specified, the node assign inference task to the DLA core."
},
"quantize_first_layer": {
"type": "boolean",
"default": false,
"description": "If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8."
},
"quantize_last_layer": {
"type": "boolean",
"default": false,
"description": "If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8."
},
"profile_per_layer": {
"type": "boolean",
"default": false,
"description": "If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
},
"clip_value": {
"type": "number",
"default": 6.0,
"description": "If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
},
"preprocess_on_gpu": {
"type": "boolean",
"default": true,
"description": "If true, pre-processing is performed on GPU."
},
"calibration_image_list_path": {
"type": "string",
"default": "",
"description": "Path to a file which contains path to images. Those images will be used for int8 quantization."
}
},
"required": [
"model_path",
"label_path",
"score_threshold",
"nms_threshold",
"precision",
"calibration_algorithm",
"dla_core_id",
"quantize_first_layer",
"quantize_last_layer",
"profile_per_layer",
"clip_value",
"preprocess_on_gpu"
]
}
},
"properties": {
"/**": {
"type": "object",
"properties": {
"ros__parameters": {
"$ref": "#/definitions/yolox_s_plus_opt"
}
},
"required": ["ros__parameters"]
}
},
"required": ["/**"]
}
107 changes: 107 additions & 0 deletions perception/tensorrt_yolox/schema/yolox_tiny.schema.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Parameters for tensorrt_yolox_tiny Nodes",
"type": "object",
"definitions": {
"yolox_tiny": {
"type": "object",
"properties": {
"model_path": {
"type": "string",
"default": "$(var data_path)/tensorrt_yolox/$(var model_name).onnx",
"description": "Path to onnx model."
},
"label_path": {
"type": "string",
"default": "$(var data_path)/tensorrt_yolox/label.txt",
"description": "Path to label file."
},
"score_threshold": {
"type": "number",
"default": 0.35,
"minimum": 0.0,
"maximum": 1.0,
"description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored."
},
"nms_threshold": {
"type": "number",
"default": 0.7,
"minimum": 0.0,
"maximum": 1.0,
"description": "A threshold value of NMS."
},
"precision": {
"type": "string",
"default": "fp16",
"description": "Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]."
},
"calibration_algorithm": {
"type": "string",
"default": "MinMax",
"description": "Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]."
},
"dla_core_id": {
"type": "number",
"default": -1,
"description": "If positive ID value is specified, the node assign inference task to the DLA core."
},
"quantize_first_layer": {
"type": "boolean",
"default": false,
"description": "If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8."
},
"quantize_last_layer": {
"type": "boolean",
"default": false,
"description": "If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8."
},
"profile_per_layer": {
"type": "boolean",
"default": false,
"description": "If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
},
"clip_value": {
"type": "number",
"default": 0.0,
"description": "If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
},
"preprocess_on_gpu": {
"type": "boolean",
"default": true,
"description": "If true, pre-processing is performed on GPU."
},
"calibration_image_list_path": {
"type": "string",
"default": "",
"description": "Path to a file which contains path to images. Those images will be used for int8 quantization."
}
},
"required": [
"model_path",
"label_path",
"score_threshold",
"nms_threshold",
"precision",
"calibration_algorithm",
"dla_core_id",
"quantize_first_layer",
"quantize_last_layer",
"profile_per_layer",
"clip_value",
"preprocess_on_gpu"
]
}
},
"properties": {
"/**": {
"type": "object",
"properties": {
"ros__parameters": {
"$ref": "#/definitions/yolox_tiny"
}
},
"required": ["ros__parameters"]
}
},
"required": ["/**"]
}

0 comments on commit 754742e

Please sign in to comment.