diff --git a/perception/tensorrt_yolox/CMakeLists.txt b/perception/tensorrt_yolox/CMakeLists.txt
index 21860854ccd5f..a5498a845e62e 100644
--- a/perception/tensorrt_yolox/CMakeLists.txt
+++ b/perception/tensorrt_yolox/CMakeLists.txt
@@ -114,4 +114,5 @@ endif()
ament_auto_package(INSTALL_TO_SHARE
launch
+ config
)
diff --git a/perception/tensorrt_yolox/config/yolox_s_plus_opt.param.yaml b/perception/tensorrt_yolox/config/yolox_s_plus_opt.param.yaml
new file mode 100644
index 0000000000000..bc67173442094
--- /dev/null
+++ b/perception/tensorrt_yolox/config/yolox_s_plus_opt.param.yaml
@@ -0,0 +1,15 @@
+/**:
+ ros__parameters:
+ model_path: "$(var data_path)/tensorrt_yolox/$(var model_name).onnx"
+ label_path: "$(var data_path)/tensorrt_yolox/label.txt"
+ score_threshold: 0.35
+ nms_threshold: 0.7
+ precision: "int8" # Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8].
+ calibration_algorithm: "Entropy" # Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax].
+ dla_core_id: -1 # If positive ID value is specified, the node assign inference task to the DLA core.
+ quantize_first_layer: false # If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8.
+ quantize_last_layer: false # If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8.
+ profile_per_layer: false # If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose.
+ clip_value: 6.0 # If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration.
+ preprocess_on_gpu: true # If true, pre-processing is performed on GPU.
+ calibration_image_list_path: "" # Path to a file which contains path to images. Those images will be used for int8 quantization.
diff --git a/perception/tensorrt_yolox/config/yolox_tiny.param.yaml b/perception/tensorrt_yolox/config/yolox_tiny.param.yaml
new file mode 100644
index 0000000000000..e45742a7afb95
--- /dev/null
+++ b/perception/tensorrt_yolox/config/yolox_tiny.param.yaml
@@ -0,0 +1,15 @@
+/**:
+ ros__parameters:
+ model_path: "$(var data_path)/tensorrt_yolox/$(var model_name).onnx"
+ label_path: "$(var data_path)/tensorrt_yolox/label.txt"
+ score_threshold: 0.35
+ nms_threshold: 0.7
+ precision: "fp16" # Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8].
+ calibration_algorithm: "MinMax" # Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax].
+ dla_core_id: -1 # If positive ID value is specified, the node assign inference task to the DLA core.
+ quantize_first_layer: false # If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8.
+ quantize_last_layer: false # If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8.
+ profile_per_layer: false # If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose.
+ clip_value: 0.0 # If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration.
+ preprocess_on_gpu: true # If true, pre-processing is performed on GPU.
+ calibration_image_list_path: "" # Path to a file which contains path to images. Those images will be used for int8 quantization.
diff --git a/perception/tensorrt_yolox/launch/yolox_s_plus_opt.launch.xml b/perception/tensorrt_yolox/launch/yolox_s_plus_opt.launch.xml
index 3f8d7897ab5d3..dd15eda2913ce 100644
--- a/perception/tensorrt_yolox/launch/yolox_s_plus_opt.launch.xml
+++ b/perception/tensorrt_yolox/launch/yolox_s_plus_opt.launch.xml
@@ -5,30 +5,7 @@
-
-
-
-
-
-
-
-
-
-
-
-
+
@@ -40,19 +17,7 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
+
diff --git a/perception/tensorrt_yolox/launch/yolox_tiny.launch.xml b/perception/tensorrt_yolox/launch/yolox_tiny.launch.xml
index 2f08031ea159f..9e5d1c371b13b 100644
--- a/perception/tensorrt_yolox/launch/yolox_tiny.launch.xml
+++ b/perception/tensorrt_yolox/launch/yolox_tiny.launch.xml
@@ -4,30 +4,7 @@
-
-
-
-
-
-
-
-
-
-
-
-
+
@@ -39,19 +16,7 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
+
diff --git a/perception/tensorrt_yolox/schema/yolox_s_plus_opt.schema.json b/perception/tensorrt_yolox/schema/yolox_s_plus_opt.schema.json
new file mode 100644
index 0000000000000..ce1ad6c2d0caf
--- /dev/null
+++ b/perception/tensorrt_yolox/schema/yolox_s_plus_opt.schema.json
@@ -0,0 +1,107 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Parameters for tensorrt_yolox_s_plus_opt Nodes",
+ "type": "object",
+ "definitions": {
+ "yolox_s_plus_opt": {
+ "type": "object",
+ "properties": {
+ "model_path": {
+ "type": "string",
+ "default": "$(var data_path)/tensorrt_yolox/$(var model_name).onnx",
+ "description": "Path to onnx model."
+ },
+ "label_path": {
+ "type": "string",
+ "default": "$(var data_path)/tensorrt_yolox/label.txt",
+ "description": "Path to label file."
+ },
+ "score_threshold": {
+ "type": "number",
+ "default": 0.35,
+ "minimum": 0.0,
+ "maximum": 1.0,
+ "description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored."
+ },
+ "nms_threshold": {
+ "type": "number",
+ "default": 0.7,
+ "minimum": 0.0,
+ "maximum": 1.0,
+ "description": "A threshold value of NMS."
+ },
+ "precision": {
+ "type": "string",
+ "default": "int8",
+ "description": "Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]."
+ },
+ "calibration_algorithm": {
+ "type": "string",
+ "default": "Entropy",
+ "description": "Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]."
+ },
+ "dla_core_id": {
+ "type": "number",
+ "default": -1,
+ "description": "If positive ID value is specified, the node assign inference task to the DLA core."
+ },
+ "quantize_first_layer": {
+ "type": "boolean",
+ "default": false,
+ "description": "If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8."
+ },
+ "quantize_last_layer": {
+ "type": "boolean",
+ "default": false,
+ "description": "If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8."
+ },
+ "profile_per_layer": {
+ "type": "boolean",
+ "default": false,
+ "description": "If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
+ },
+ "clip_value": {
+ "type": "number",
+ "default": 6.0,
+ "description": "If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
+ },
+ "preprocess_on_gpu": {
+ "type": "boolean",
+ "default": true,
+ "description": "If true, pre-processing is performed on GPU."
+ },
+ "calibration_image_list_path": {
+ "type": "string",
+ "default": "",
+ "description": "Path to a file which contains path to images. Those images will be used for int8 quantization."
+ }
+ },
+ "required": [
+ "model_path",
+ "label_path",
+ "score_threshold",
+ "nms_threshold",
+ "precision",
+ "calibration_algorithm",
+ "dla_core_id",
+ "quantize_first_layer",
+ "quantize_last_layer",
+ "profile_per_layer",
+ "clip_value",
+ "preprocess_on_gpu"
+ ]
+ }
+ },
+ "properties": {
+ "/**": {
+ "type": "object",
+ "properties": {
+ "ros__parameters": {
+ "$ref": "#/definitions/yolox_s_plus_opt"
+ }
+ },
+ "required": ["ros__parameters"]
+ }
+ },
+ "required": ["/**"]
+}
diff --git a/perception/tensorrt_yolox/schema/yolox_tiny.schema.json b/perception/tensorrt_yolox/schema/yolox_tiny.schema.json
new file mode 100644
index 0000000000000..f47b28e47a3f8
--- /dev/null
+++ b/perception/tensorrt_yolox/schema/yolox_tiny.schema.json
@@ -0,0 +1,107 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Parameters for tensorrt_yolox_tiny Nodes",
+ "type": "object",
+ "definitions": {
+ "yolox_tiny": {
+ "type": "object",
+ "properties": {
+ "model_path": {
+ "type": "string",
+ "default": "$(var data_path)/tensorrt_yolox/$(var model_name).onnx",
+ "description": "Path to onnx model."
+ },
+ "label_path": {
+ "type": "string",
+ "default": "$(var data_path)/tensorrt_yolox/label.txt",
+ "description": "Path to label file."
+ },
+ "score_threshold": {
+ "type": "number",
+ "default": 0.35,
+ "minimum": 0.0,
+ "maximum": 1.0,
+ "description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored."
+ },
+ "nms_threshold": {
+ "type": "number",
+ "default": 0.7,
+ "minimum": 0.0,
+ "maximum": 1.0,
+ "description": "A threshold value of NMS."
+ },
+ "precision": {
+ "type": "string",
+ "default": "fp16",
+ "description": "Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]."
+ },
+ "calibration_algorithm": {
+ "type": "string",
+ "default": "MinMax",
+ "description": "Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]."
+ },
+ "dla_core_id": {
+ "type": "number",
+ "default": -1,
+ "description": "If positive ID value is specified, the node assign inference task to the DLA core."
+ },
+ "quantize_first_layer": {
+ "type": "boolean",
+ "default": false,
+ "description": "If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8."
+ },
+ "quantize_last_layer": {
+ "type": "boolean",
+ "default": false,
+ "description": "If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8."
+ },
+ "profile_per_layer": {
+ "type": "boolean",
+ "default": false,
+ "description": "If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
+ },
+ "clip_value": {
+ "type": "number",
+ "default": 0.0,
+ "description": "If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
+ },
+ "preprocess_on_gpu": {
+ "type": "boolean",
+ "default": true,
+ "description": "If true, pre-processing is performed on GPU."
+ },
+ "calibration_image_list_path": {
+ "type": "string",
+ "default": "",
+ "description": "Path to a file which contains path to images. Those images will be used for int8 quantization."
+ }
+ },
+ "required": [
+ "model_path",
+ "label_path",
+ "score_threshold",
+ "nms_threshold",
+ "precision",
+ "calibration_algorithm",
+ "dla_core_id",
+ "quantize_first_layer",
+ "quantize_last_layer",
+ "profile_per_layer",
+ "clip_value",
+ "preprocess_on_gpu"
+ ]
+ }
+ },
+ "properties": {
+ "/**": {
+ "type": "object",
+ "properties": {
+ "ros__parameters": {
+ "$ref": "#/definitions/yolox_tiny"
+ }
+ },
+ "required": ["ros__parameters"]
+ }
+ },
+ "required": ["/**"]
+}