From 1950ae60bd42c440532a2354d2a1e6861af2e750 Mon Sep 17 00:00:00 2001
From: Adam Serafin <adam.serafin@luxonis.com>
Date: Wed, 24 Jan 2024 10:33:48 +0100
Subject: [PATCH] SR-LR Update Iron (#482)

---
 depthai_bridge/src/ImuConverter.cpp           |   8 +-
 depthai_bridge/src/TFPublisher.cpp            |   2 +-
 .../urdf/include/depthai_macro.urdf.xacro     |  66 +++++---
 depthai_examples/CMakeLists.txt               |   3 +
 .../launch/yolov4_publisher.launch.py         |  54 ++++--
 depthai_examples/src/yolov4_publisher.cpp     | 141 +++++++++++++++
 .../src/yolov4_spatial_publisher.cpp          | 160 +++++++-----------
 depthai_ros_driver/config/oak_d_sr.yaml       |   6 +
 depthai_ros_driver/config/sr_rgbd.yaml        |   9 +
 .../dai_nodes/nn/detection.hpp                |  17 +-
 .../dai_nodes/nn/nn_wrapper.hpp               |   6 +-
 .../dai_nodes/nn/segmentation.hpp             |   6 +-
 .../dai_nodes/nn/spatial_detection.hpp        |  25 ++-
 .../dai_nodes/nn/spatial_nn_wrapper.hpp       |   6 +-
 .../dai_nodes/sensors/sensor_wrapper.hpp      |   2 +-
 .../depthai_ros_driver/dai_nodes/stereo.hpp   |   2 +-
 .../param_handlers/imu_param_handler.hpp      |   1 +
 .../param_handlers/nn_param_handler.hpp       |   3 +-
 .../param_handlers/stereo_param_handler.hpp   |   3 +-
 depthai_ros_driver/launch/camera.launch.py    |   8 +-
 depthai_ros_driver/launch/rgbd_pcl.launch.py  |   1 +
 .../launch/sr_rgbd_pcl.launch.py              |  89 ++++++++++
 depthai_ros_driver/src/camera.cpp             |   4 +-
 .../src/dai_nodes/nn/nn_wrapper.cpp           |   5 +-
 .../src/dai_nodes/nn/segmentation.cpp         |   7 +-
 .../src/dai_nodes/nn/spatial_nn_wrapper.cpp   |   7 +-
 .../src/dai_nodes/sensors/sensor_helpers.cpp  |   2 +-
 .../src/dai_nodes/sensors/sensor_wrapper.cpp  |  14 ++
 depthai_ros_driver/src/dai_nodes/stereo.cpp   |  28 ++-
 .../param_handlers/camera_param_handler.cpp   |   1 +
 .../src/param_handlers/nn_param_handler.cpp   |   4 +-
 .../param_handlers/sensor_param_handler.cpp   |  18 +-
 .../param_handlers/stereo_param_handler.cpp   |  10 +-
 .../src/pipeline/pipeline_generator.cpp       |   4 +-
 34 files changed, 536 insertions(+), 186 deletions(-)
 create mode 100644 depthai_examples/src/yolov4_publisher.cpp
 create mode 100644 depthai_ros_driver/config/oak_d_sr.yaml
 create mode 100644 depthai_ros_driver/config/sr_rgbd.yaml
 create mode 100644 depthai_ros_driver/launch/sr_rgbd_pcl.launch.py

diff --git a/depthai_bridge/src/ImuConverter.cpp b/depthai_bridge/src/ImuConverter.cpp
index 31b02d72..f1d7e241 100644
--- a/depthai_bridge/src/ImuConverter.cpp
+++ b/depthai_bridge/src/ImuConverter.cpp
@@ -135,13 +135,7 @@ void ImuConverter::toRosDaiMsg(std::shared_ptr<dai::IMUData> inData, std::deque<
                 tstamp = accel.getTimestampDevice();
             else
                 tstamp = accel.getTimestamp();
-            if(_enable_rotation) {
-                auto rot = inData->packets[i].rotationVector;
-                auto magn = inData->packets[i].magneticField;
-                CreateUnitMessage(msg, tstamp, accel, gyro, rot, magn);
-            } else {
-                CreateUnitMessage(msg, tstamp, accel, gyro);
-            }
+            CreateUnitMessage(msg, tstamp, accel, gyro, rot, magn);
             outImuMsgs.push_back(msg);
         }
     }
diff --git a/depthai_bridge/src/TFPublisher.cpp b/depthai_bridge/src/TFPublisher.cpp
index 730d5793..3a3f32cd 100644
--- a/depthai_bridge/src/TFPublisher.cpp
+++ b/depthai_bridge/src/TFPublisher.cpp
@@ -16,7 +16,7 @@
 #include "rclcpp/rclcpp.hpp"
 #include "tf2/LinearMath/Matrix3x3.h"
 #include "tf2/LinearMath/Quaternion.h"
-#include "tf2_geometry_msgs/tf2_geometry_msgs/tf2_geometry_msgs.hpp"
+#include "tf2_geometry_msgs/tf2_geometry_msgs.hpp"
 
 namespace dai {
 namespace ros {
diff --git a/depthai_descriptions/urdf/include/depthai_macro.urdf.xacro b/depthai_descriptions/urdf/include/depthai_macro.urdf.xacro
index 55118df1..9416e4e5 100644
--- a/depthai_descriptions/urdf/include/depthai_macro.urdf.xacro
+++ b/depthai_descriptions/urdf/include/depthai_macro.urdf.xacro
@@ -12,6 +12,10 @@
         <xacro:property name="has_imu" value="false" />
         <xacro:property name="baseline" value="0.075" />
 
+        <xacro:if value="${model in ['OAK-D-SR']}">
+            <xacro:property name="baseline" value="0.02" />
+        </xacro:if>
+
         <xacro:if value="${model in ['OAK-D', 'OAK-D-PRO', 'OAK-D-POE']}">
             <xacro:property name="has_imu" value="true" />
         </xacro:if>
@@ -19,22 +23,25 @@
         <xacro:base camera_name="${camera_name}" parent="${parent}" camera_model="${camera_model}" base_frame="${base_frame}" cam_pos_x="${cam_pos_x}" cam_pos_y="${cam_pos_y}" cam_pos_z="${cam_pos_z}" cam_roll="${cam_roll}" cam_pitch="${cam_pitch}" cam_yaw="${cam_yaw}" has_imu="${has_imu}"/>
 
         <!-- RGB Camera -->
-        <link name="${camera_name}_rgb_camera_frame" />
-
-        <joint name="${camera_name}_rgb_camera_joint" type="fixed">
-            <parent link="${base_frame}"/>
-            <child link="${camera_name}_rgb_camera_frame"/>
-            <origin xyz="0 0 0" rpy="0 0 0" />
-        </joint>
-
-        <link name="${camera_name}_rgb_camera_optical_frame"/>
-
-        <joint name="${camera_name}_rgb_camera_optical_joint" type="fixed">
-            <origin xyz="0 0 0" rpy="-${M_PI/2} 0.0 -${M_PI/2}"/>
-            <parent link="${camera_name}_rgb_camera_frame"/>
-            <child link="${camera_name}_rgb_camera_optical_frame"/>
-        </joint>
-
+        <xacro:unless value="${model in ['OAK-D-SR']}">
+            <link name="${camera_name}_rgb_camera_frame" />
+
+            <joint name="${camera_name}_rgb_camera_joint" type="fixed">
+                <parent link="${base_frame}"/>
+                <child link="${camera_name}_rgb_camera_frame"/>
+                <origin xyz="0 0 0" rpy="0 0 0" />
+            </joint>
+
+            <link name="${camera_name}_rgb_camera_optical_frame"/>
+
+            <joint name="${camera_name}_rgb_camera_optical_joint" type="fixed">
+                <origin xyz="0 0 0" rpy="-${M_PI/2} 0.0 -${M_PI/2}"/>
+                <parent link="${camera_name}_rgb_camera_frame"/>
+                <child link="${camera_name}_rgb_camera_optical_frame"/>
+            </joint>
+        </xacro:unless>
+        <xacro:unless value="${model in ['OAK-D-LR']}">
+            
         <!-- Left Camera -->
         <link name="${camera_name}_left_camera_frame" />
 
@@ -70,16 +77,34 @@
             <child link="${camera_name}_right_camera_optical_frame"/>
         </joint>
 
+        </xacro:unless>
 
-    </xacro:macro>
+        <xacro:if value="${model in ['OAK-D-LR']}">
 
-        <!-- right Camera -->
+        <!-- left Camera -->
+        <link name="${camera_name}_left_camera_frame" />
+
+        <joint name="${camera_name}_left_camera_joint" type="fixed">
+            <parent link="${base_frame}"/>
+            <child link="${camera_name}_left_camera_frame"/>
+            <origin xyz="0 0.1 0" rpy="0 0 0" />
+        </joint>
+
+        <link name="${camera_name}_left_camera_optical_frame"/>
+
+        <joint name="${camera_name}_left_camera_optical_joint" type="fixed">
+            <origin xyz="0 0 0" rpy="-${M_PI/2} 0.0 -${M_PI/2}"/>
+            <parent link="${camera_name}_left_camera_frame"/>
+            <child link="${camera_name}_left_camera_optical_frame"/>
+        </joint>
+
+         <!-- right Camera -->
         <link name="${camera_name}_right_camera_frame" />
 
         <joint name="${camera_name}_right_camera_joint" type="fixed">
             <parent link="${base_frame}"/>
             <child link="${camera_name}_right_camera_frame"/>
-            <origin xyz="0 -${baseline/2} 0" rpy="0 0 0" />
+            <origin xyz="0 -0.05 0" rpy="0 0 0" />
         </joint>
 
         <link name="${camera_name}_right_camera_optical_frame"/>
@@ -89,8 +114,7 @@
             <parent link="${camera_name}_right_camera_frame"/>
             <child link="${camera_name}_right_camera_optical_frame"/>
         </joint>
-
-
+    </xacro:if>
     </xacro:macro>
 
 </robot>
\ No newline at end of file
diff --git a/depthai_examples/CMakeLists.txt b/depthai_examples/CMakeLists.txt
index 61273f8b..fd2e230f 100644
--- a/depthai_examples/CMakeLists.txt
+++ b/depthai_examples/CMakeLists.txt
@@ -101,9 +101,11 @@ dai_add_node_ros2(stereo_inertial_node src/stereo_inertial_publisher.cpp)
 dai_add_node_ros2(feature_tracker src/feature_tracker_publisher.cpp)
 dai_add_node_ros2(stereo_node src/stereo_publisher.cpp)
 dai_add_node_ros2(yolov4_spatial_node src/yolov4_spatial_publisher.cpp)
+dai_add_node_ros2(yolov4_node src/yolov4_publisher.cpp)
 
 target_compile_definitions(mobilenet_node PRIVATE BLOB_NAME="${mobilenet_blob_name}")
 target_compile_definitions(yolov4_spatial_node PRIVATE BLOB_NAME="${tiny_yolo_v4_blob_name}")
+target_compile_definitions(yolov4_node PRIVATE BLOB_NAME="${tiny_yolo_v4_blob_name}")
 target_compile_definitions(stereo_inertial_node PRIVATE BLOB_NAME="${tiny_yolo_v4_blob_name}")
 
 if($ENV{ROS_DISTRO} STREQUAL "galactic")
@@ -124,6 +126,7 @@ install(TARGETS
         stereo_inertial_node
         stereo_node
         yolov4_spatial_node
+        yolov4_node
         feature_tracker
         DESTINATION lib/${PROJECT_NAME})
 
diff --git a/depthai_examples/launch/yolov4_publisher.launch.py b/depthai_examples/launch/yolov4_publisher.launch.py
index 683379ba..79a3e773 100644
--- a/depthai_examples/launch/yolov4_publisher.launch.py
+++ b/depthai_examples/launch/yolov4_publisher.launch.py
@@ -4,6 +4,7 @@
 from launch import LaunchDescription, launch_description_sources
 from launch.actions import IncludeLaunchDescription
 from launch.actions import DeclareLaunchArgument
+from launch.conditions import IfCondition,UnlessCondition
 from launch.substitutions import LaunchConfiguration
 import launch_ros.actions
 import launch_ros.descriptions
@@ -131,7 +132,14 @@ def generate_launch_description():
         'monoResolution',
         default_value=monoResolution,
         description='Contains the resolution of the Mono Cameras. Available resolutions are 800p, 720p & 400p for OAK-D & 480p for OAK-D-Lite.')
-  
+
+    # declare_spatial_camera_cmd = DeclareLaunchArgument(
+    #     'spatial_camera',
+    #     default_value="true",
+    #     description='Enable spatial camera'
+    # )
+
+
     urdf_launch = IncludeLaunchDescription(
                             launch_description_sources.PythonLaunchDescriptionSource(
                                     os.path.join(urdf_launch_dir, 'urdf_launch.py')),
@@ -146,15 +154,32 @@ def generate_launch_description():
                                               'cam_pitch'   : cam_pitch,
                                               'cam_yaw'     : cam_yaw}.items())
     yolov4_spatial_node = launch_ros.actions.Node(
-            package='depthai_examples', executable='yolov4_spatial_node',
-            output='screen',
-            parameters=[{'tf_prefix': tf_prefix},
-                        {'camera_param_uri': camera_param_uri},
-                        {'sync_nn': sync_nn},
-                        {'nnName': nnName},
-                        {'resourceBaseFolder': resourceBaseFolder},
-                        {'monoResolution': monoResolution},
-                        {'spatial_camera': spatial_camera}])
+        package='depthai_examples', executable='yolov4_spatial_node',
+        output='screen',
+        parameters=[{'tf_prefix': tf_prefix},
+                    {'camera_param_uri': camera_param_uri},
+                    {'sync_nn': sync_nn},
+                    {'nnName': nnName},
+                    {'resourceBaseFolder': resourceBaseFolder},
+                    {'monoResolution': monoResolution},
+                    {'subpixel': subpixel},
+                    {'lrCheckTresh': lrCheckTresh},
+                    {'confidence': confidence},
+                    ],
+        condition=IfCondition(LaunchConfiguration('spatial_camera'))
+    )
+
+    yolov4_node = launch_ros.actions.Node(
+        package='depthai_examples', executable='yolov4_node',
+        output='screen',
+        parameters=[{'tf_prefix': tf_prefix},
+                    {'camera_param_uri': camera_param_uri},
+                    {'sync_nn': sync_nn},
+                    {'nnName': nnName},
+                    {'resourceBaseFolder': resourceBaseFolder},
+                    ],
+        condition=UnlessCondition(LaunchConfiguration('spatial_camera'))
+    )
 
     rviz_node = launch_ros.actions.Node(
             package='rviz2', executable='rviz2', output='screen',
@@ -181,11 +206,12 @@ def generate_launch_description():
     ld.add_action(declare_sync_nn_cmd)
     ld.add_action(urdf_launch)
 
-    if spatial_camera == True:
-        ld.add_action(declare_subpixel_cmd)
-        ld.add_action(declare_lrCheckTresh_cmd)
-        ld.add_action(declare_monoResolution_cmd)
+    ld.add_action(declare_subpixel_cmd)
+    ld.add_action(declare_lrCheckTresh_cmd)
+    ld.add_action(declare_monoResolution_cmd)
+
     ld.add_action(yolov4_spatial_node)
+    ld.add_action(yolov4_node)
 
     return ld
 
diff --git a/depthai_examples/src/yolov4_publisher.cpp b/depthai_examples/src/yolov4_publisher.cpp
new file mode 100644
index 00000000..2147c521
--- /dev/null
+++ b/depthai_examples/src/yolov4_publisher.cpp
@@ -0,0 +1,141 @@
+
+#include <cstdio>
+#include <iostream>
+
+#include "camera_info_manager/camera_info_manager.hpp"
+#include "depthai_bridge/BridgePublisher.hpp"
+#include "depthai_bridge/ImageConverter.hpp"
+#include "depthai_bridge/ImgDetectionConverter.hpp"
+#include "rclcpp/executors.hpp"
+#include "rclcpp/node.hpp"
+#include "sensor_msgs/msg/image.hpp"
+#include "vision_msgs/msg/detection2_d_array.hpp"
+
+// Inludes common necessary includes for development using depthai library
+#include "depthai/device/DataQueue.hpp"
+#include "depthai/device/Device.hpp"
+#include "depthai/pipeline/Pipeline.hpp"
+#include "depthai/pipeline/node/ColorCamera.hpp"
+#include "depthai/pipeline/node/DetectionNetwork.hpp"
+#include "depthai/pipeline/node/XLinkOut.hpp"
+
+const std::vector<std::string> label_map = {
+    "person",        "bicycle",      "car",           "motorbike",     "aeroplane",   "bus",         "train",       "truck",        "boat",
+    "traffic light", "fire hydrant", "stop sign",     "parking meter", "bench",       "bird",        "cat",         "dog",          "horse",
+    "sheep",         "cow",          "elephant",      "bear",          "zebra",       "giraffe",     "backpack",    "umbrella",     "handbag",
+    "tie",           "suitcase",     "frisbee",       "skis",          "snowboard",   "sports ball", "kite",        "baseball bat", "baseball glove",
+    "skateboard",    "surfboard",    "tennis racket", "bottle",        "wine glass",  "cup",         "fork",        "knife",        "spoon",
+    "bowl",          "banana",       "apple",         "sandwich",      "orange",      "broccoli",    "carrot",      "hot dog",      "pizza",
+    "donut",         "cake",         "chair",         "sofa",          "pottedplant", "bed",         "diningtable", "toilet",       "tvmonitor",
+    "laptop",        "mouse",        "remote",        "keyboard",      "cell phone",  "microwave",   "oven",        "toaster",      "sink",
+    "refrigerator",  "book",         "clock",         "vase",          "scissors",    "teddy bear",  "hair drier",  "toothbrush"};
+
+dai::Pipeline createPipeline(bool syncNN, std::string nnPath) {
+    dai::Pipeline pipeline;
+    auto colorCam = pipeline.create<dai::node::ColorCamera>();
+    auto detectionNetwork = pipeline.create<dai::node::YoloDetectionNetwork>();
+
+    // create xlink connections
+    auto xoutRgb = pipeline.create<dai::node::XLinkOut>();
+    auto xoutNN = pipeline.create<dai::node::XLinkOut>();
+
+    xoutRgb->setStreamName("preview");
+    xoutNN->setStreamName("detections");
+
+    // Properties
+    colorCam->setPreviewSize(416, 416);
+    colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P);
+    colorCam->setInterleaved(false);
+    colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR);
+    colorCam->setFps(40);
+
+    // Network specific settings
+    detectionNetwork->setConfidenceThreshold(0.5f);
+    detectionNetwork->setNumClasses(80);
+    detectionNetwork->setCoordinateSize(4);
+    detectionNetwork->setAnchors({10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319});
+    detectionNetwork->setAnchorMasks({{"side26", {1, 2, 3}}, {"side13", {3, 4, 5}}});
+    detectionNetwork->setIouThreshold(0.5f);
+    detectionNetwork->setBlobPath(nnPath);
+    detectionNetwork->setNumInferenceThreads(2);
+    detectionNetwork->input.setBlocking(false);
+
+    // Linking
+    colorCam->preview.link(detectionNetwork->input);
+    if(syncNN)
+        detectionNetwork->passthrough.link(xoutRgb->input);
+    else
+        colorCam->preview.link(xoutRgb->input);
+
+    detectionNetwork->out.link(xoutNN->input);
+    return pipeline;
+}
+
+int main(int argc, char** argv) {
+    rclcpp::init(argc, argv);
+    auto node = rclcpp::Node::make_shared("yolov4_node");
+
+    std::string tfPrefix, resourceBaseFolder, nnPath;
+    std::string camera_param_uri;
+    std::string nnName(BLOB_NAME);  // Set your blob name for the model here
+    bool syncNN;
+    std::string monoResolution = "400p";
+
+    node->declare_parameter("tf_prefix", "oak");
+    node->declare_parameter("camera_param_uri", camera_param_uri);
+    node->declare_parameter("sync_nn", true);
+    node->declare_parameter("nnName", "");
+    node->declare_parameter("resourceBaseFolder", "");
+
+    node->get_parameter("tf_prefix", tfPrefix);
+    node->get_parameter("camera_param_uri", camera_param_uri);
+    node->get_parameter("sync_nn", syncNN);
+    node->get_parameter("resourceBaseFolder", resourceBaseFolder);
+
+    if(resourceBaseFolder.empty()) {
+        throw std::runtime_error("Send the path to the resouce folder containing NNBlob in \'resourceBaseFolder\' ");
+    }
+
+    std::string nnParam;
+    node->get_parameter("nnName", nnParam);
+    if(nnParam != "x") {
+        node->get_parameter("nnName", nnName);
+    }
+
+    nnPath = resourceBaseFolder + "/" + nnName;
+    dai::Pipeline pipeline = createPipeline(syncNN, nnPath);
+    dai::Device device(pipeline);
+
+    auto colorQueue = device.getOutputQueue("preview", 30, false);
+    auto detectionQueue = device.getOutputQueue("detections", 30, false);
+    auto calibrationHandler = device.readCalibration();
+
+    dai::rosBridge::ImageConverter rgbConverter(tfPrefix + "_rgb_camera_optical_frame", false);
+    auto rgbCameraInfo = rgbConverter.calibrationToCameraInfo(calibrationHandler, dai::CameraBoardSocket::CAM_A, -1, -1);
+    dai::rosBridge::BridgePublisher<sensor_msgs::msg::Image, dai::ImgFrame> rgbPublish(colorQueue,
+                                                                                       node,
+                                                                                       std::string("color/image"),
+                                                                                       std::bind(&dai::rosBridge::ImageConverter::toRosMsg,
+                                                                                                 &rgbConverter,  // since the converter has the same frame name
+                                                                                                                 // and image type is also same we can reuse it
+                                                                                                 std::placeholders::_1,
+                                                                                                 std::placeholders::_2),
+                                                                                       30,
+                                                                                       rgbCameraInfo,
+                                                                                       "color");
+
+    dai::rosBridge::ImgDetectionConverter detConverter(tfPrefix + "_rgb_camera_optical_frame", 416, 416, false);
+    dai::rosBridge::BridgePublisher<vision_msgs::msg::Detection2DArray, dai::ImgDetections> detectionPublish(
+        detectionQueue,
+        node,
+        std::string("color/yolov4_detections"),
+        std::bind(&dai::rosBridge::ImgDetectionConverter::toRosMsg, &detConverter, std::placeholders::_1, std::placeholders::_2),
+        30);
+
+    detectionPublish.addPublisherCallback();
+    rgbPublish.addPublisherCallback();  // addPublisherCallback works only when the dataqueue is non blocking.
+
+    rclcpp::spin(node);
+
+    return 0;
+}
diff --git a/depthai_examples/src/yolov4_spatial_publisher.cpp b/depthai_examples/src/yolov4_spatial_publisher.cpp
index 1d7df58e..fe07bb9f 100644
--- a/depthai_examples/src/yolov4_spatial_publisher.cpp
+++ b/depthai_examples/src/yolov4_spatial_publisher.cpp
@@ -34,13 +34,11 @@ const std::vector<std::string> label_map = {
     "laptop",        "mouse",        "remote",        "keyboard",      "cell phone",  "microwave",   "oven",        "toaster",      "sink",
     "refrigerator",  "book",         "clock",         "vase",          "scissors",    "teddy bear",  "hair drier",  "toothbrush"};
 
-dai::Pipeline createPipeline(bool spatial_camera, bool syncNN, bool subpixel, std::string nnPath, int confidence, int LRchecktresh, std::string resolution) {
+dai::Pipeline createPipeline(bool syncNN, bool subpixel, std::string nnPath, int confidence, int LRchecktresh, std::string resolution) {
     dai::Pipeline pipeline;
     dai::node::MonoCamera::Properties::SensorResolution monoResolution;
     auto colorCam = pipeline.create<dai::node::ColorCamera>();
-    auto camRgb = pipeline.create<dai::node::ColorCamera>();  // non spatial add in
     auto spatialDetectionNetwork = pipeline.create<dai::node::YoloSpatialDetectionNetwork>();
-    auto detectionNetwork = pipeline.create<dai::node::YoloDetectionNetwork>();
     auto monoLeft = pipeline.create<dai::node::MonoCamera>();
     auto monoRight = pipeline.create<dai::node::MonoCamera>();
     auto stereo = pipeline.create<dai::node::StereoDepth>();
@@ -49,104 +47,71 @@ dai::Pipeline createPipeline(bool spatial_camera, bool syncNN, bool subpixel, st
     auto xoutRgb = pipeline.create<dai::node::XLinkOut>();
     auto xoutDepth = pipeline.create<dai::node::XLinkOut>();
     auto xoutNN = pipeline.create<dai::node::XLinkOut>();
-    auto nnOut = pipeline.create<dai::node::XLinkOut>();  // non spatial add in
 
     xoutRgb->setStreamName("preview");
     xoutNN->setStreamName("detections");
-    if(spatial_camera == true) {
-        xoutDepth->setStreamName("depth");
-
-        colorCam->setPreviewSize(416, 416);
-        colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P);
-        colorCam->setInterleaved(false);
-        colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR);
-
-        if(resolution == "720p") {
-            monoResolution = dai::node::MonoCamera::Properties::SensorResolution::THE_720_P;
-        } else if(resolution == "400p") {
-            monoResolution = dai::node::MonoCamera::Properties::SensorResolution::THE_400_P;
-        } else if(resolution == "800p") {
-            monoResolution = dai::node::MonoCamera::Properties::SensorResolution::THE_800_P;
-        } else if(resolution == "480p") {
-            monoResolution = dai::node::MonoCamera::Properties::SensorResolution::THE_480_P;
-        } else {
-            RCLCPP_ERROR(rclcpp::get_logger("rclcpp"), "Invalid parameter. -> monoResolution: %s", resolution.c_str());
-            throw std::runtime_error("Invalid mono camera resolution.");
-        }
-
-        monoLeft->setResolution(monoResolution);
-        monoLeft->setBoardSocket(dai::CameraBoardSocket::CAM_B);
-        monoRight->setResolution(monoResolution);
-        monoRight->setBoardSocket(dai::CameraBoardSocket::CAM_C);
-
-        /// setting node configs
-        stereo->initialConfig.setConfidenceThreshold(confidence);
-        stereo->setRectifyEdgeFillColor(0);  // black, to better see the cutout
-        stereo->initialConfig.setLeftRightCheckThreshold(LRchecktresh);
-        stereo->setSubpixel(subpixel);
-        stereo->setDepthAlign(dai::CameraBoardSocket::CAM_A);
-
-        spatialDetectionNetwork->setBlobPath(nnPath);
-        spatialDetectionNetwork->setConfidenceThreshold(0.5f);
-        spatialDetectionNetwork->input.setBlocking(false);
-        spatialDetectionNetwork->setBoundingBoxScaleFactor(0.5);
-        spatialDetectionNetwork->setDepthLowerThreshold(100);
-        spatialDetectionNetwork->setDepthUpperThreshold(5000);
-
-        // yolo specific parameters
-        spatialDetectionNetwork->setNumClasses(80);
-        spatialDetectionNetwork->setCoordinateSize(4);
-        spatialDetectionNetwork->setAnchors({10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319});
-        spatialDetectionNetwork->setAnchorMasks({{"side13", {3, 4, 5}}, {"side26", {1, 2, 3}}});
-        spatialDetectionNetwork->setIouThreshold(0.5f);
-
-        // Link plugins CAM -> STEREO -> XLINK
-        monoLeft->out.link(stereo->left);
-        monoRight->out.link(stereo->right);
-
-        // Link plugins CAM -> NN -> XLINK
-        colorCam->preview.link(spatialDetectionNetwork->input);
-        if(syncNN)
-            spatialDetectionNetwork->passthrough.link(xoutRgb->input);
-        else
-            colorCam->preview.link(xoutRgb->input);
-
-        spatialDetectionNetwork->out.link(xoutNN->input);
-
-        stereo->depth.link(spatialDetectionNetwork->inputDepth);
-        spatialDetectionNetwork->passthroughDepth.link(xoutDepth->input);
+    xoutDepth->setStreamName("depth");
+
+    colorCam->setPreviewSize(416, 416);
+    colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P);
+    colorCam->setInterleaved(false);
+    colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR);
+
+    if(resolution == "720p") {
+        monoResolution = dai::node::MonoCamera::Properties::SensorResolution::THE_720_P;
+    } else if(resolution == "400p") {
+        monoResolution = dai::node::MonoCamera::Properties::SensorResolution::THE_400_P;
+    } else if(resolution == "800p") {
+        monoResolution = dai::node::MonoCamera::Properties::SensorResolution::THE_800_P;
+    } else if(resolution == "480p") {
+        monoResolution = dai::node::MonoCamera::Properties::SensorResolution::THE_480_P;
     } else {
-        xoutRgb->setStreamName("rgb");
-        nnOut->setStreamName("detections");
-
-        // Properties
-        camRgb->setPreviewSize(416, 416);
-        camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P);
-        camRgb->setInterleaved(false);
-        camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR);
-        camRgb->setFps(40);
-
-        // Network specific settings
-        detectionNetwork->setConfidenceThreshold(0.5f);
-        detectionNetwork->setNumClasses(80);
-        detectionNetwork->setCoordinateSize(4);
-        detectionNetwork->setAnchors({10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319});
-        detectionNetwork->setAnchorMasks({{"side26", {1, 2, 3}}, {"side13", {3, 4, 5}}});
-        detectionNetwork->setIouThreshold(0.5f);
-        detectionNetwork->setBlobPath(nnPath);
-        detectionNetwork->setNumInferenceThreads(2);
-        detectionNetwork->input.setBlocking(false);
-
-        // Linking
-        camRgb->preview.link(detectionNetwork->input);
-        if(syncNN)
-            detectionNetwork->passthrough.link(xoutRgb->input);
-        else
-            camRgb->preview.link(xoutRgb->input);
-
-        detectionNetwork->out.link(nnOut->input);
+        RCLCPP_ERROR(rclcpp::get_logger("rclcpp"), "Invalid parameter. -> monoResolution: %s", resolution.c_str());
+        throw std::runtime_error("Invalid mono camera resolution.");
     }
 
+    monoLeft->setResolution(monoResolution);
+    monoLeft->setBoardSocket(dai::CameraBoardSocket::CAM_B);
+    monoRight->setResolution(monoResolution);
+    monoRight->setBoardSocket(dai::CameraBoardSocket::CAM_C);
+
+    /// setting node configs
+    stereo->initialConfig.setConfidenceThreshold(confidence);
+    stereo->setRectifyEdgeFillColor(0);  // black, to better see the cutout
+    stereo->initialConfig.setLeftRightCheckThreshold(LRchecktresh);
+    stereo->setSubpixel(subpixel);
+    stereo->setDepthAlign(dai::CameraBoardSocket::CAM_A);
+
+    spatialDetectionNetwork->setBlobPath(nnPath);
+    spatialDetectionNetwork->setConfidenceThreshold(0.5f);
+    spatialDetectionNetwork->input.setBlocking(false);
+    spatialDetectionNetwork->setBoundingBoxScaleFactor(0.5);
+    spatialDetectionNetwork->setDepthLowerThreshold(100);
+    spatialDetectionNetwork->setDepthUpperThreshold(5000);
+
+    // yolo specific parameters
+    spatialDetectionNetwork->setNumClasses(80);
+    spatialDetectionNetwork->setCoordinateSize(4);
+    spatialDetectionNetwork->setAnchors({10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319});
+    spatialDetectionNetwork->setAnchorMasks({{"side13", {3, 4, 5}}, {"side26", {1, 2, 3}}});
+    spatialDetectionNetwork->setIouThreshold(0.5f);
+
+    // Link plugins CAM -> STEREO -> XLINK
+    monoLeft->out.link(stereo->left);
+    monoRight->out.link(stereo->right);
+
+    // Link plugins CAM -> NN -> XLINK
+    colorCam->preview.link(spatialDetectionNetwork->input);
+    if(syncNN)
+        spatialDetectionNetwork->passthrough.link(xoutRgb->input);
+    else
+        colorCam->preview.link(xoutRgb->input);
+
+    spatialDetectionNetwork->out.link(xoutNN->input);
+
+    stereo->depth.link(spatialDetectionNetwork->inputDepth);
+    spatialDetectionNetwork->passthroughDepth.link(xoutDepth->input);
+
     return pipeline;
 }
 
@@ -157,7 +122,7 @@ int main(int argc, char** argv) {
     std::string tfPrefix, resourceBaseFolder, nnPath;
     std::string camera_param_uri;
     std::string nnName(BLOB_NAME);  // Set your blob name for the model here
-    bool syncNN, subpixel, spatial_camera;
+    bool syncNN, subpixel;
     int confidence = 200, LRchecktresh = 5;
     std::string monoResolution = "400p";
 
@@ -179,7 +144,6 @@ int main(int argc, char** argv) {
     node->get_parameter("LRchecktresh", LRchecktresh);
     node->get_parameter("monoResolution", monoResolution);
     node->get_parameter("resourceBaseFolder", resourceBaseFolder);
-    node->get_parameter("spatial_camera", spatial_camera);
 
     if(resourceBaseFolder.empty()) {
         throw std::runtime_error("Send the path to the resouce folder containing NNBlob in \'resourceBaseFolder\' ");
@@ -192,7 +156,7 @@ int main(int argc, char** argv) {
     }
 
     nnPath = resourceBaseFolder + "/" + nnName;
-    dai::Pipeline pipeline = createPipeline(spatial_camera, syncNN, subpixel, nnPath, confidence, LRchecktresh, monoResolution);
+    dai::Pipeline pipeline = createPipeline(syncNN, subpixel, nnPath, confidence, LRchecktresh, monoResolution);
     dai::Device device(pipeline);
 
     auto colorQueue = device.getOutputQueue("preview", 30, false);
diff --git a/depthai_ros_driver/config/oak_d_sr.yaml b/depthai_ros_driver/config/oak_d_sr.yaml
new file mode 100644
index 00000000..e00474b2
--- /dev/null
+++ b/depthai_ros_driver/config/oak_d_sr.yaml
@@ -0,0 +1,6 @@
+/oak:
+  ros__parameters:
+    camera:
+      i_pipeline_type: 'Depth'
+    right:
+      i_publish_topic: true
diff --git a/depthai_ros_driver/config/sr_rgbd.yaml b/depthai_ros_driver/config/sr_rgbd.yaml
new file mode 100644
index 00000000..4d5b7f6d
--- /dev/null
+++ b/depthai_ros_driver/config/sr_rgbd.yaml
@@ -0,0 +1,9 @@
+/oak:
+  ros__parameters:
+    camera:
+      i_nn_type: none
+      i_pipeline_type: Depth
+    right:
+      i_publish_topic: true
+    stereo:
+      i_extended_disp: true
\ No newline at end of file
diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/detection.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/detection.hpp
index d5898914..812a2908 100644
--- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/detection.hpp
+++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/detection.hpp
@@ -5,6 +5,7 @@
 #include <vector>
 
 #include "camera_info_manager/camera_info_manager.hpp"
+#include "depthai-shared/common/CameraBoardSocket.hpp"
 #include "depthai/device/DataQueue.hpp"
 #include "depthai/device/Device.hpp"
 #include "depthai/pipeline/Pipeline.hpp"
@@ -16,6 +17,7 @@
 #include "depthai_ros_driver/dai_nodes/base_node.hpp"
 #include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp"
 #include "depthai_ros_driver/param_handlers/nn_param_handler.hpp"
+#include "depthai_ros_driver/utils.hpp"
 #include "image_transport/camera_publisher.hpp"
 #include "image_transport/image_transport.hpp"
 #include "rclcpp/node.hpp"
@@ -35,12 +37,16 @@ class Detection : public BaseNode {
      * @param      node         The node
      * @param      pipeline     The pipeline
      */
-    Detection(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr<dai::Pipeline> pipeline) : BaseNode(daiNodeName, node, pipeline) {
+    Detection(const std::string& daiNodeName,
+              rclcpp::Node* node,
+              std::shared_ptr<dai::Pipeline> pipeline,
+              const dai::CameraBoardSocket& socket = dai::CameraBoardSocket::CAM_A)
+        : BaseNode(daiNodeName, node, pipeline) {
         RCLCPP_DEBUG(node->get_logger(), "Creating node %s", daiNodeName.c_str());
         setNames();
         detectionNode = pipeline->create<T>();
         imageManip = pipeline->create<dai::node::ImageManip>();
-        ph = std::make_unique<param_handlers::NNParamHandler>(node, daiNodeName);
+        ph = std::make_unique<param_handlers::NNParamHandler>(node, daiNodeName, socket);
         ph->declareParams(detectionNode, imageManip);
         RCLCPP_DEBUG(node->get_logger(), "Node %s created", daiNodeName.c_str());
         imageManip->out.link(detectionNode->input);
@@ -55,12 +61,13 @@ class Detection : public BaseNode {
      */
     void setupQueues(std::shared_ptr<dai::Device> device) override {
         nnQ = device->getOutputQueue(nnQName, ph->getParam<int>("i_max_q_size"), false);
-        auto tfPrefix = getTFPrefix("rgb");
+        std::string socketName = utils::getSocketName(static_cast<dai::CameraBoardSocket>(ph->getParam<int>("i_board_socket_id")));
+        auto tfPrefix = getTFPrefix(socketName);
         int width;
         int height;
         if(ph->getParam<bool>("i_disable_resize")) {
-            width = getROSNode()->get_parameter("rgb.i_preview_size").as_int();
-            height = getROSNode()->get_parameter("rgb.i_preview_size").as_int();
+            width = ph->getOtherNodeParam<int>(socketName, "i_preview_width");
+            height = ph->getOtherNodeParam<int>(socketName, "i_preview_height");
         } else {
             width = imageManip->initialConfig.getResizeConfig().width;
             height = imageManip->initialConfig.getResizeConfig().height;
diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/nn_wrapper.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/nn_wrapper.hpp
index 4b82374b..6890ab38 100644
--- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/nn_wrapper.hpp
+++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/nn_wrapper.hpp
@@ -4,6 +4,7 @@
 #include <string>
 #include <vector>
 
+#include "depthai-shared/common/CameraBoardSocket.hpp"
 #include "depthai_ros_driver/dai_nodes/base_node.hpp"
 
 namespace dai {
@@ -25,7 +26,10 @@ namespace dai_nodes {
 
 class NNWrapper : public BaseNode {
    public:
-    explicit NNWrapper(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr<dai::Pipeline> pipeline);
+    explicit NNWrapper(const std::string& daiNodeName,
+                       rclcpp::Node* node,
+                       std::shared_ptr<dai::Pipeline> pipeline,
+                       const dai::CameraBoardSocket& socket = dai::CameraBoardSocket::CAM_A);
     ~NNWrapper();
     void updateParams(const std::vector<rclcpp::Parameter>& params) override;
     void setupQueues(std::shared_ptr<dai::Device> device) override;
diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/segmentation.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/segmentation.hpp
index c59acca1..97127b9b 100644
--- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/segmentation.hpp
+++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/segmentation.hpp
@@ -5,6 +5,7 @@
 #include <vector>
 
 #include "cv_bridge/cv_bridge.hpp"
+#include "depthai-shared/common/CameraBoardSocket.hpp"
 #include "depthai_ros_driver/dai_nodes/base_node.hpp"
 #include "image_transport/camera_publisher.hpp"
 #include "image_transport/image_transport.hpp"
@@ -40,7 +41,10 @@ namespace dai_nodes {
 namespace nn {
 class Segmentation : public BaseNode {
    public:
-    Segmentation(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr<dai::Pipeline> pipeline);
+    Segmentation(const std::string& daiNodeName,
+                 rclcpp::Node* node,
+                 std::shared_ptr<dai::Pipeline> pipeline,
+                 const dai::CameraBoardSocket& socket = dai::CameraBoardSocket::CAM_A);
     ~Segmentation();
     void updateParams(const std::vector<rclcpp::Parameter>& params) override;
     void setupQueues(std::shared_ptr<dai::Device> device) override;
diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/spatial_detection.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/spatial_detection.hpp
index ea82cece..18f9bf62 100644
--- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/spatial_detection.hpp
+++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/spatial_detection.hpp
@@ -5,6 +5,7 @@
 #include <vector>
 
 #include "camera_info_manager/camera_info_manager.hpp"
+#include "depthai-shared/common/CameraBoardSocket.hpp"
 #include "depthai/device/DataQueue.hpp"
 #include "depthai/device/Device.hpp"
 #include "depthai/pipeline/Pipeline.hpp"
@@ -17,6 +18,7 @@
 #include "depthai_ros_driver/dai_nodes/nn/nn_helpers.hpp"
 #include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp"
 #include "depthai_ros_driver/param_handlers/nn_param_handler.hpp"
+#include "depthai_ros_driver/utils.hpp"
 #include "image_transport/camera_publisher.hpp"
 #include "image_transport/image_transport.hpp"
 #include "rclcpp/node.hpp"
@@ -27,12 +29,16 @@ namespace nn {
 template <typename T>
 class SpatialDetection : public BaseNode {
    public:
-    SpatialDetection(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr<dai::Pipeline> pipeline) : BaseNode(daiNodeName, node, pipeline) {
+    SpatialDetection(const std::string& daiNodeName,
+                     rclcpp::Node* node,
+                     std::shared_ptr<dai::Pipeline> pipeline,
+                     const dai::CameraBoardSocket& socket = dai::CameraBoardSocket::CAM_A)
+        : BaseNode(daiNodeName, node, pipeline) {
         RCLCPP_DEBUG(node->get_logger(), "Creating node %s", daiNodeName.c_str());
         setNames();
         spatialNode = pipeline->create<T>();
         imageManip = pipeline->create<dai::node::ImageManip>();
-        ph = std::make_unique<param_handlers::NNParamHandler>(node, daiNodeName);
+        ph = std::make_unique<param_handlers::NNParamHandler>(node, daiNodeName, socket);
         ph->declareParams(spatialNode, imageManip);
         RCLCPP_DEBUG(node->get_logger(), "Node %s created", daiNodeName.c_str());
         imageManip->out.link(spatialNode->input);
@@ -44,12 +50,13 @@ class SpatialDetection : public BaseNode {
     };
     void setupQueues(std::shared_ptr<dai::Device> device) override {
         nnQ = device->getOutputQueue(nnQName, ph->getParam<int>("i_max_q_size"), false);
-        auto tfPrefix = getTFPrefix("rgb");
+        std::string socketName = utils::getSocketName(static_cast<dai::CameraBoardSocket>(ph->getParam<int>("i_board_socket_id")));
+        auto tfPrefix = getTFPrefix(socketName);
         int width;
         int height;
         if(ph->getParam<bool>("i_disable_resize")) {
-            width = getROSNode()->get_parameter("rgb.i_preview_size").as_int();
-            height = getROSNode()->get_parameter("rgb.i_preview_size").as_int();
+            width = ph->getOtherNodeParam<int>(socketName, "i_preview_width");
+            height = ph->getOtherNodeParam<int>(socketName, "i_preview_height");
         } else {
             width = imageManip->initialConfig.getResizeConfig().width;
             height = imageManip->initialConfig.getResizeConfig().height;
@@ -80,8 +87,8 @@ class SpatialDetection : public BaseNode {
         }
 
         if(ph->getParam<bool>("i_enable_passthrough_depth")) {
-            dai::CameraBoardSocket socket = static_cast<dai::CameraBoardSocket>(getROSNode()->get_parameter("stereo.i_board_socket_id").as_int());
-            if(!getROSNode()->get_parameter("stereo.i_align_depth").as_bool()) {
+            dai::CameraBoardSocket socket = static_cast<dai::CameraBoardSocket>(ph->getOtherNodeParam<int>("stereo", "i_board_socket_id"));
+            if(!ph->getOtherNodeParam<bool>("stereo", "i_align_depth")) {
                 tfPrefix = getTFPrefix("right");
             };
             ptDepthQ = device->getOutputQueue(ptDepthQName, ph->getParam<int>("i_max_q_size"), false);
@@ -93,8 +100,8 @@ class SpatialDetection : public BaseNode {
                                                                        *ptDepthImageConverter,
                                                                        device,
                                                                        socket,
-                                                                       getROSNode()->get_parameter("stereo.i_width").as_int(),
-                                                                       getROSNode()->get_parameter("stereo.i_height").as_int()));
+                                                                       ph->getOtherNodeParam<int>("stereo", "i_width"),
+                                                                       ph->getOtherNodeParam<int>("stereo", "i_height")));
 
             ptDepthPub = image_transport::create_camera_publisher(getROSNode(), "~/" + getName() + "/passthrough_depth/image_raw");
             ptDepthQ->addCallback(
diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/spatial_nn_wrapper.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/spatial_nn_wrapper.hpp
index 8c9d19ac..dc27c09c 100644
--- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/spatial_nn_wrapper.hpp
+++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/spatial_nn_wrapper.hpp
@@ -4,6 +4,7 @@
 #include <string>
 #include <vector>
 
+#include "depthai-shared/common/CameraBoardSocket.hpp"
 #include "depthai_ros_driver/dai_nodes/base_node.hpp"
 
 namespace dai {
@@ -26,7 +27,10 @@ namespace dai_nodes {
 
 class SpatialNNWrapper : public BaseNode {
    public:
-    explicit SpatialNNWrapper(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr<dai::Pipeline> pipeline);
+    explicit SpatialNNWrapper(const std::string& daiNodeName,
+                              rclcpp::Node* node,
+                              std::shared_ptr<dai::Pipeline> pipeline,
+                              const dai::CameraBoardSocket& socket = dai::CameraBoardSocket::CAM_A);
     ~SpatialNNWrapper();
     void updateParams(const std::vector<rclcpp::Parameter>& params) override;
     void setupQueues(std::shared_ptr<dai::Device> device) override;
diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/sensor_wrapper.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/sensor_wrapper.hpp
index 080c5700..792e763b 100644
--- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/sensor_wrapper.hpp
+++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/sensor_wrapper.hpp
@@ -53,7 +53,7 @@ class SensorWrapper : public BaseNode {
 
    private:
     void subCB(const sensor_msgs::msg::Image& img);
-    std::unique_ptr<BaseNode> sensorNode, featureTrackerNode;
+    std::unique_ptr<BaseNode> sensorNode, featureTrackerNode, nnNode;
     std::unique_ptr<param_handlers::SensorParamHandler> ph;
     std::unique_ptr<dai::ros::ImageConverter> converter;
     rclcpp::Subscription<sensor_msgs::msg::Image>::SharedPtr sub;
diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/stereo.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/stereo.hpp
index bcc09024..f159cc4a 100644
--- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/stereo.hpp
+++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/stereo.hpp
@@ -93,7 +93,7 @@ class Stereo : public BaseNode {
     std::shared_ptr<dai::node::VideoEncoder> stereoEnc, leftRectEnc, rightRectEnc;
     std::unique_ptr<SensorWrapper> left;
     std::unique_ptr<SensorWrapper> right;
-    std::unique_ptr<BaseNode> featureTrackerLeftR, featureTrackerRightR;
+    std::unique_ptr<BaseNode> featureTrackerLeftR, featureTrackerRightR, nnNode;
     std::unique_ptr<param_handlers::StereoParamHandler> ph;
     std::shared_ptr<dai::DataOutputQueue> stereoQ, leftRectQ, rightRectQ;
     std::shared_ptr<dai::node::XLinkOut> xoutStereo, xoutLeftRect, xoutRightRect;
diff --git a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/imu_param_handler.hpp b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/imu_param_handler.hpp
index 0045c0b4..d6836fc6 100644
--- a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/imu_param_handler.hpp
+++ b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/imu_param_handler.hpp
@@ -4,6 +4,7 @@
 #include <string>
 #include <vector>
 
+#include "depthai-shared/properties/IMUProperties.hpp"
 #include "depthai/pipeline/datatype/CameraControl.hpp"
 #include "depthai-shared/properties/IMUProperties.hpp"
 #include "depthai_bridge/ImuConverter.hpp"
diff --git a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/nn_param_handler.hpp b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/nn_param_handler.hpp
index 2fc65c79..a4fc36b6 100644
--- a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/nn_param_handler.hpp
+++ b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/nn_param_handler.hpp
@@ -6,6 +6,7 @@
 #include <unordered_map>
 #include <vector>
 
+#include "depthai-shared/common/CameraBoardSocket.hpp"
 #include "depthai/pipeline/datatype/CameraControl.hpp"
 #include "depthai_ros_driver/param_handlers/base_param_handler.hpp"
 #include "nlohmann/json.hpp"
@@ -33,7 +34,7 @@ enum class NNFamily { Segmentation, Mobilenet, Yolo };
 }
 class NNParamHandler : public BaseParamHandler {
    public:
-    explicit NNParamHandler(rclcpp::Node* node, const std::string& name);
+    explicit NNParamHandler(rclcpp::Node* node, const std::string& name, const dai::CameraBoardSocket& socket = dai::CameraBoardSocket::CAM_A);
     ~NNParamHandler();
     nn::NNFamily getNNFamily();
     template <typename T>
diff --git a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/stereo_param_handler.hpp b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/stereo_param_handler.hpp
index c45c6595..f2546e70 100644
--- a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/stereo_param_handler.hpp
+++ b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/stereo_param_handler.hpp
@@ -26,13 +26,14 @@ class StereoParamHandler : public BaseParamHandler {
     ~StereoParamHandler();
     void declareParams(std::shared_ptr<dai::node::StereoDepth> stereo);
     dai::CameraControl setRuntimeParams(const std::vector<rclcpp::Parameter>& params) override;
-    void updateSocketsFromParams(dai::CameraBoardSocket& left, dai::CameraBoardSocket& right);
+    void updateSocketsFromParams(dai::CameraBoardSocket& left, dai::CameraBoardSocket& right, dai::CameraBoardSocket& align);
 
    private:
     std::unordered_map<std::string, dai::node::StereoDepth::PresetMode> depthPresetMap;
     std::unordered_map<std::string, dai::StereoDepthConfig::CostMatching::DisparityWidth> disparityWidthMap;
     std::unordered_map<std::string, dai::StereoDepthConfig::PostProcessing::DecimationFilter::DecimationMode> decimationModeMap;
     std::unordered_map<std::string, dai::StereoDepthConfig::PostProcessing::TemporalFilter::PersistencyMode> temporalPersistencyMap;
+    dai::CameraBoardSocket alignSocket;
 };
 }  // namespace param_handlers
 }  // namespace depthai_ros_driver
\ No newline at end of file
diff --git a/depthai_ros_driver/launch/camera.launch.py b/depthai_ros_driver/launch/camera.launch.py
index 9e743c02..c152ea8d 100644
--- a/depthai_ros_driver/launch/camera.launch.py
+++ b/depthai_ros_driver/launch/camera.launch.py
@@ -33,11 +33,11 @@ def launch_setup(context, *args, **kwargs):
     cam_yaw      = LaunchConfiguration('cam_yaw',       default = '0.0')
     use_composition = LaunchConfiguration('rsp_use_composition', default='true')
     imu_from_descr = LaunchConfiguration('imu_from_descr', default='false')
-    pass_tf_args_as_params = LaunchConfiguration('pass_tf_args_as_params', default='false')
+    publish_tf_from_calibration = LaunchConfiguration('publish_tf_from_calibration', default='false')
     override_cam_model = LaunchConfiguration('override_cam_model', default='false')
 
     tf_params = {}
-    if(pass_tf_args_as_params.perform(context) == 'true'):
+    if(publish_tf_from_calibration.perform(context) == 'true'):
         cam_model = ''
         if override_cam_model.perform(context) == 'true':
             cam_model = camera_model.perform(context)
@@ -92,7 +92,7 @@ def launch_setup(context, *args, **kwargs):
                               'cam_pitch': cam_pitch,
                               'cam_yaw': cam_yaw,
                               'use_composition': use_composition,
-                              'use_base_descr': pass_tf_args_as_params}.items()),
+                              'use_base_descr': publish_tf_from_calibration}.items()),
 
         ComposableNodeContainer(
             name=name+"_container",
@@ -132,7 +132,7 @@ def generate_launch_description():
         DeclareLaunchArgument("use_rviz", default_value='false'),
         DeclareLaunchArgument("rviz_config", default_value=os.path.join(depthai_prefix, "config", "rviz", "rgbd.rviz")),
         DeclareLaunchArgument("rsp_use_composition", default_value='true'),
-        DeclareLaunchArgument("pass_tf_args_as_params", default_value='false', description='Enables TF publishing from camera calibration file.'),
+        DeclareLaunchArgument("publish_tf_from_calibration", default_value='false', description='Enables TF publishing from camera calibration file.'),
         DeclareLaunchArgument("imu_from_descr", default_value='false', description='Enables IMU publishing from URDF.'),
         DeclareLaunchArgument("override_cam_model", default_value='false', description='Overrides camera model from calibration file.'),
         DeclareLaunchArgument("use_gdb", default_value='false'),
diff --git a/depthai_ros_driver/launch/rgbd_pcl.launch.py b/depthai_ros_driver/launch/rgbd_pcl.launch.py
index 6b50fa04..6d8b17c6 100644
--- a/depthai_ros_driver/launch/rgbd_pcl.launch.py
+++ b/depthai_ros_driver/launch/rgbd_pcl.launch.py
@@ -71,6 +71,7 @@ def generate_launch_description():
     depthai_prefix = get_package_share_directory("depthai_ros_driver")
     declared_arguments = [
         DeclareLaunchArgument("name", default_value="oak"),
+        DeclareLaunchArgument("camera_model", default_value="OAK-D"),
         DeclareLaunchArgument("parent_frame", default_value="oak-d-base-frame"),
         DeclareLaunchArgument("cam_pos_x", default_value="0.0"),
         DeclareLaunchArgument("cam_pos_y", default_value="0.0"),
diff --git a/depthai_ros_driver/launch/sr_rgbd_pcl.launch.py b/depthai_ros_driver/launch/sr_rgbd_pcl.launch.py
new file mode 100644
index 00000000..32c0035a
--- /dev/null
+++ b/depthai_ros_driver/launch/sr_rgbd_pcl.launch.py
@@ -0,0 +1,89 @@
+import os
+
+from ament_index_python.packages import get_package_share_directory
+from launch import LaunchDescription
+from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription, OpaqueFunction
+from launch.launch_description_sources import PythonLaunchDescriptionSource
+from launch.substitutions import LaunchConfiguration
+from launch_ros.actions import LoadComposableNodes, Node
+from launch_ros.descriptions import ComposableNode
+from launch.conditions import IfCondition
+
+
+def launch_setup(context, *args, **kwargs):
+    params_file = LaunchConfiguration("params_file")
+    depthai_prefix = get_package_share_directory("depthai_ros_driver")
+
+    name = LaunchConfiguration('name').perform(context)
+    rgb_topic_name = name+'/right/image_raw'
+    if LaunchConfiguration('rectify_rgb').perform(context)=='true':
+        rgb_topic_name = name +'/right/image_rect'
+    return [
+        IncludeLaunchDescription(
+            PythonLaunchDescriptionSource(
+                os.path.join(depthai_prefix, 'launch', 'camera.launch.py')),
+            launch_arguments={"name": name,
+                              "params_file": params_file,
+                              "parent_frame": LaunchConfiguration("parent_frame"),
+                               "cam_pos_x": LaunchConfiguration("cam_pos_x"),
+                               "cam_pos_y": LaunchConfiguration("cam_pos_y"),
+                               "cam_pos_z": LaunchConfiguration("cam_pos_z"),
+                               "cam_roll": LaunchConfiguration("cam_roll"),
+                               "cam_pitch": LaunchConfiguration("cam_pitch"),
+                               "cam_yaw": LaunchConfiguration("cam_yaw"),
+                               "use_rviz": LaunchConfiguration("use_rviz")
+                               }.items()),
+
+        LoadComposableNodes(
+            condition=IfCondition(LaunchConfiguration("rectify_rgb")),
+            target_container=name+"_container",
+            composable_node_descriptions=[
+                    ComposableNode(
+                        package="image_proc",
+                        plugin="image_proc::RectifyNode",
+                        name="rectify_color_node",
+                        remappings=[('image', name+'/right/image_raw'),
+                                    ('camera_info', name+'/right/camera_info'),
+                                    ('image_rect', name+'/right/image_rect'),
+                                    ('image_rect/compressed', name+'/right/image_rect/compressed'),
+                                    ('image_rect/compressedDepth', name+'/right/image_rect/compressedDepth'),
+                                    ('image_rect/theora', name+'/right/image_rect/theora')]
+                    )
+            ]),
+        LoadComposableNodes(
+            target_container=name+"_container",
+            composable_node_descriptions=[
+                    ComposableNode(
+                    package='depth_image_proc',
+                    plugin='depth_image_proc::PointCloudXyzrgbNode',
+                    name='point_cloud_xyzrgb_node',
+                    remappings=[('depth_registered/image_rect', name+'/stereo/image_raw'),
+                                ('rgb/image_rect_color', rgb_topic_name),
+                                ('rgb/camera_info', name+'/right/camera_info'),
+                                ('points', name+'/points')]
+                    ),
+            ],
+        ),
+    ]
+
+
+def generate_launch_description():
+    depthai_prefix = get_package_share_directory("depthai_ros_driver")
+    declared_arguments = [
+        DeclareLaunchArgument("name", default_value="oak"),
+        DeclareLaunchArgument("camera_model", default_value="OAK-D-SR"),
+        DeclareLaunchArgument("parent_frame", default_value="oak-d-base-frame"),
+        DeclareLaunchArgument("cam_pos_x", default_value="0.0"),
+        DeclareLaunchArgument("cam_pos_y", default_value="0.0"),
+        DeclareLaunchArgument("cam_pos_z", default_value="0.0"),
+        DeclareLaunchArgument("cam_roll", default_value="0.0"),
+        DeclareLaunchArgument("cam_pitch", default_value="0.0"),
+        DeclareLaunchArgument("cam_yaw", default_value="0.0"),
+        DeclareLaunchArgument("params_file", default_value=os.path.join(depthai_prefix, 'config', 'sr_rgbd.yaml')),
+        DeclareLaunchArgument("use_rviz", default_value="False"),
+        DeclareLaunchArgument("rectify_rgb", default_value="False"),
+    ]
+
+    return LaunchDescription(
+        declared_arguments + [OpaqueFunction(function=launch_setup)]
+    )
diff --git a/depthai_ros_driver/src/camera.cpp b/depthai_ros_driver/src/camera.cpp
index b9f14844..3e690a7b 100644
--- a/depthai_ros_driver/src/camera.cpp
+++ b/depthai_ros_driver/src/camera.cpp
@@ -62,7 +62,9 @@ void Camera::diagCB(const diagnostic_msgs::msg::DiagnosticArray::SharedPtr msg)
         if(status.name == get_name() + std::string(": sys_logger")) {
             if(status.level == diagnostic_msgs::msg::DiagnosticStatus::ERROR) {
                 RCLCPP_ERROR(this->get_logger(), "Camera diagnostics error: %s", status.message.c_str());
-                restart();
+                if(ph->getParam<bool>("i_restart_on_diagnostics_error")) {
+                    restart();
+                };
             }
         }
     }
diff --git a/depthai_ros_driver/src/dai_nodes/nn/nn_wrapper.cpp b/depthai_ros_driver/src/dai_nodes/nn/nn_wrapper.cpp
index 8d23d2a7..0f7561e4 100644
--- a/depthai_ros_driver/src/dai_nodes/nn/nn_wrapper.cpp
+++ b/depthai_ros_driver/src/dai_nodes/nn/nn_wrapper.cpp
@@ -10,9 +10,10 @@
 
 namespace depthai_ros_driver {
 namespace dai_nodes {
-NNWrapper::NNWrapper(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr<dai::Pipeline> pipeline) : BaseNode(daiNodeName, node, pipeline) {
+NNWrapper::NNWrapper(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr<dai::Pipeline> pipeline, const dai::CameraBoardSocket& socket)
+    : BaseNode(daiNodeName, node, pipeline) {
     RCLCPP_DEBUG(node->get_logger(), "Creating node %s base", daiNodeName.c_str());
-    ph = std::make_unique<param_handlers::NNParamHandler>(node, daiNodeName);
+    ph = std::make_unique<param_handlers::NNParamHandler>(node, daiNodeName, socket);
     auto family = ph->getNNFamily();
     switch(family) {
         case param_handlers::nn::NNFamily::Yolo: {
diff --git a/depthai_ros_driver/src/dai_nodes/nn/segmentation.cpp b/depthai_ros_driver/src/dai_nodes/nn/segmentation.cpp
index 90872dbd..627ad259 100644
--- a/depthai_ros_driver/src/dai_nodes/nn/segmentation.cpp
+++ b/depthai_ros_driver/src/dai_nodes/nn/segmentation.cpp
@@ -12,6 +12,7 @@
 #include "depthai_bridge/ImageConverter.hpp"
 #include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp"
 #include "depthai_ros_driver/param_handlers/nn_param_handler.hpp"
+#include "depthai_ros_driver/utils.hpp"
 #include "image_transport/camera_publisher.hpp"
 #include "image_transport/image_transport.hpp"
 #include "rclcpp/node.hpp"
@@ -22,13 +23,13 @@ namespace depthai_ros_driver {
 namespace dai_nodes {
 namespace nn {
 
-Segmentation::Segmentation(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr<dai::Pipeline> pipeline)
+Segmentation::Segmentation(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr<dai::Pipeline> pipeline, const dai::CameraBoardSocket& socket)
     : BaseNode(daiNodeName, node, pipeline) {
     RCLCPP_DEBUG(node->get_logger(), "Creating node %s", daiNodeName.c_str());
     setNames();
     segNode = pipeline->create<dai::node::NeuralNetwork>();
     imageManip = pipeline->create<dai::node::ImageManip>();
-    ph = std::make_unique<param_handlers::NNParamHandler>(node, daiNodeName);
+    ph = std::make_unique<param_handlers::NNParamHandler>(node, daiNodeName, socket);
     ph->declareParams(segNode, imageManip);
     RCLCPP_DEBUG(node->get_logger(), "Node %s created", daiNodeName.c_str());
     imageManip->out.link(segNode->input);
@@ -58,7 +59,7 @@ void Segmentation::setupQueues(std::shared_ptr<dai::Device> device) {
     nnPub = image_transport::create_camera_publisher(getROSNode(), "~/" + getName() + "/image_raw");
     nnQ->addCallback(std::bind(&Segmentation::segmentationCB, this, std::placeholders::_1, std::placeholders::_2));
     if(ph->getParam<bool>("i_enable_passthrough")) {
-        auto tfPrefix = getTFPrefix("rgb");
+        auto tfPrefix = getTFPrefix(utils::getSocketName(static_cast<dai::CameraBoardSocket>(ph->getParam<int>("i_board_socket_id"))));
         ptQ = device->getOutputQueue(ptQName, ph->getParam<int>("i_max_q_size"), false);
         imageConverter = std::make_unique<dai::ros::ImageConverter>(tfPrefix + "_camera_optical_frame", false);
         infoManager = std::make_shared<camera_info_manager::CameraInfoManager>(
diff --git a/depthai_ros_driver/src/dai_nodes/nn/spatial_nn_wrapper.cpp b/depthai_ros_driver/src/dai_nodes/nn/spatial_nn_wrapper.cpp
index d54a8aec..938cd6fa 100644
--- a/depthai_ros_driver/src/dai_nodes/nn/spatial_nn_wrapper.cpp
+++ b/depthai_ros_driver/src/dai_nodes/nn/spatial_nn_wrapper.cpp
@@ -9,10 +9,13 @@
 
 namespace depthai_ros_driver {
 namespace dai_nodes {
-SpatialNNWrapper::SpatialNNWrapper(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr<dai::Pipeline> pipeline)
+SpatialNNWrapper::SpatialNNWrapper(const std::string& daiNodeName,
+                                   rclcpp::Node* node,
+                                   std::shared_ptr<dai::Pipeline> pipeline,
+                                   const dai::CameraBoardSocket& socket)
     : BaseNode(daiNodeName, node, pipeline) {
     RCLCPP_DEBUG(node->get_logger(), "Creating node %s base", daiNodeName.c_str());
-    ph = std::make_unique<param_handlers::NNParamHandler>(node, daiNodeName);
+    ph = std::make_unique<param_handlers::NNParamHandler>(node, daiNodeName, socket);
     auto family = ph->getNNFamily();
     switch(family) {
         case param_handlers::nn::NNFamily::Yolo: {
diff --git a/depthai_ros_driver/src/dai_nodes/sensors/sensor_helpers.cpp b/depthai_ros_driver/src/dai_nodes/sensors/sensor_helpers.cpp
index 076a8419..59e9f8b0 100644
--- a/depthai_ros_driver/src/dai_nodes/sensors/sensor_helpers.cpp
+++ b/depthai_ros_driver/src/dai_nodes/sensors/sensor_helpers.cpp
@@ -125,7 +125,7 @@ sensor_msgs::msg::CameraInfo getCalibInfo(const rclcpp::Logger& logger,
     try {
         info = converter.calibrationToCameraInfo(calibHandler, socket, width, height);
     } catch(std::runtime_error& e) {
-        RCLCPP_ERROR(logger, "No calibration! Publishing empty camera_info.");
+        RCLCPP_ERROR(logger, "No calibration for socket %d! Publishing empty camera_info.", static_cast<int>(socket));
     }
     return info;
 }
diff --git a/depthai_ros_driver/src/dai_nodes/sensors/sensor_wrapper.cpp b/depthai_ros_driver/src/dai_nodes/sensors/sensor_wrapper.cpp
index 4f748b8f..da0bd819 100644
--- a/depthai_ros_driver/src/dai_nodes/sensors/sensor_wrapper.cpp
+++ b/depthai_ros_driver/src/dai_nodes/sensors/sensor_wrapper.cpp
@@ -4,6 +4,7 @@
 #include "depthai/pipeline/Pipeline.hpp"
 #include "depthai/pipeline/node/XLinkIn.hpp"
 #include "depthai_bridge/ImageConverter.hpp"
+#include "depthai_ros_driver/dai_nodes/nn/nn_wrapper.hpp"
 #include "depthai_ros_driver/dai_nodes/sensors/feature_tracker.hpp"
 #include "depthai_ros_driver/dai_nodes/sensors/mono.hpp"
 #include "depthai_ros_driver/dai_nodes/sensors/rgb.hpp"
@@ -53,6 +54,9 @@ SensorWrapper::SensorWrapper(const std::string& daiNodeName,
         }
         RCLCPP_DEBUG(node->get_logger(), "Node %s has sensor %s", daiNodeName.c_str(), sensorName.c_str());
         sensorData = *sensorIt;
+        if(device->getDeviceName() == "OAK-D-SR") {
+            (*sensorIt).color = true;  // ov9282 is color sensor in this case
+        }
         if((*sensorIt).color) {
             sensorNode = std::make_unique<RGB>(daiNodeName, node, pipeline, socket, (*sensorIt), publish);
         } else {
@@ -63,6 +67,10 @@ SensorWrapper::SensorWrapper(const std::string& daiNodeName,
         featureTrackerNode = std::make_unique<FeatureTracker>(daiNodeName + std::string("_feature_tracker"), node, pipeline);
         sensorNode->link(featureTrackerNode->getInput());
     }
+    if(ph->getParam<bool>("i_enable_nn")) {
+        nnNode = std::make_unique<NNWrapper>(daiNodeName + std::string("_nn"), node, pipeline, static_cast<dai::CameraBoardSocket>(socketID));
+        sensorNode->link(nnNode->getInput(), static_cast<int>(link_types::RGBLinkType::preview));
+    }
     RCLCPP_DEBUG(node->get_logger(), "Base node %s created", daiNodeName.c_str());
 }
 SensorWrapper::~SensorWrapper() = default;
@@ -96,6 +104,9 @@ void SensorWrapper::setupQueues(std::shared_ptr<dai::Device> device) {
     if(ph->getParam<bool>("i_enable_feature_tracker")) {
         featureTrackerNode->setupQueues(device);
     }
+    if(ph->getParam<bool>("i_enable_nn")) {
+        nnNode->setupQueues(device);
+    }
 }
 void SensorWrapper::closeQueues() {
     if(ph->getParam<bool>("i_simulate_from_topic")) {
@@ -107,6 +118,9 @@ void SensorWrapper::closeQueues() {
     if(ph->getParam<bool>("i_enable_feature_tracker")) {
         featureTrackerNode->closeQueues();
     }
+    if(ph->getParam<bool>("i_enable_nn")) {
+        nnNode->closeQueues();
+    }
 }
 
 void SensorWrapper::link(dai::Node::Input in, int linkType) {
diff --git a/depthai_ros_driver/src/dai_nodes/stereo.cpp b/depthai_ros_driver/src/dai_nodes/stereo.cpp
index 40e1de04..47b4fe66 100644
--- a/depthai_ros_driver/src/dai_nodes/stereo.cpp
+++ b/depthai_ros_driver/src/dai_nodes/stereo.cpp
@@ -9,6 +9,8 @@
 #include "depthai/pipeline/node/VideoEncoder.hpp"
 #include "depthai/pipeline/node/XLinkOut.hpp"
 #include "depthai_bridge/ImageConverter.hpp"
+#include "depthai_ros_driver/dai_nodes/nn/nn_helpers.hpp"
+#include "depthai_ros_driver/dai_nodes/nn/spatial_nn_wrapper.hpp"
 #include "depthai_ros_driver/dai_nodes/sensors/feature_tracker.hpp"
 #include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp"
 #include "depthai_ros_driver/dai_nodes/sensors/sensor_wrapper.hpp"
@@ -30,7 +32,11 @@ Stereo::Stereo(const std::string& daiNodeName,
     RCLCPP_DEBUG(node->get_logger(), "Creating node %s", daiNodeName.c_str());
     setNames();
     ph = std::make_unique<param_handlers::StereoParamHandler>(node, daiNodeName);
-    ph->updateSocketsFromParams(leftSocket, rightSocket);
+    auto alignSocket = dai::CameraBoardSocket::CAM_A;
+    if(device->getDeviceName() == "OAK-D-SR") {
+        alignSocket = dai::CameraBoardSocket::CAM_C;
+    }
+    ph->updateSocketsFromParams(leftSocket, rightSocket, alignSocket);
     auto features = device->getConnectedCameraFeatures();
     for(auto f : features) {
         if(f.socket == leftSocket) {
@@ -52,6 +58,20 @@ Stereo::Stereo(const std::string& daiNodeName,
     setXinXout(pipeline);
     left->link(stereoCamNode->left);
     right->link(stereoCamNode->right);
+
+    if(ph->getParam<bool>("i_enable_spatial_nn")) {
+        if(ph->getParam<std::string>("i_spatial_nn_source") == "left") {
+            nnNode = std::make_unique<SpatialNNWrapper>(getName() + "_spatial_nn", getROSNode(), pipeline, leftSensInfo.socket);
+            left->link(nnNode->getInput(static_cast<int>(dai_nodes::nn_helpers::link_types::SpatialNNLinkType::input)),
+                       static_cast<int>(dai_nodes::link_types::RGBLinkType::preview));
+        } else {
+            nnNode = std::make_unique<SpatialNNWrapper>(getName() + "_spatial_nn", getROSNode(), pipeline, rightSensInfo.socket);
+            right->link(nnNode->getInput(static_cast<int>(dai_nodes::nn_helpers::link_types::SpatialNNLinkType::input)),
+                        static_cast<int>(dai_nodes::link_types::RGBLinkType::preview));
+        }
+        stereoCamNode->depth.link(nnNode->getInput(static_cast<int>(dai_nodes::nn_helpers::link_types::SpatialNNLinkType::inputDepth)));
+    }
+
     RCLCPP_DEBUG(node->get_logger(), "Node %s created", daiNodeName.c_str());
 }
 Stereo::~Stereo() = default;
@@ -329,6 +349,9 @@ void Stereo::setupQueues(std::shared_ptr<dai::Device> device) {
     if(ph->getParam<bool>("i_right_rect_enable_feature_tracker")) {
         featureTrackerRightR->setupQueues(device);
     }
+    if(ph->getParam<bool>("i_enable_spatial_nn")) {
+        nnNode->setupQueues(device);
+    }
 }
 void Stereo::closeQueues() {
     left->closeQueues();
@@ -353,6 +376,9 @@ void Stereo::closeQueues() {
     if(ph->getParam<bool>("i_right_rect_enable_feature_tracker")) {
         featureTrackerRightR->closeQueues();
     }
+    if(ph->getParam<bool>("i_enable_spatial_nn")) {
+        nnNode->closeQueues();
+    }
 }
 
 void Stereo::link(dai::Node::Input in, int /*linkType*/) {
diff --git a/depthai_ros_driver/src/param_handlers/camera_param_handler.cpp b/depthai_ros_driver/src/param_handlers/camera_param_handler.cpp
index 459406f2..097b57f9 100644
--- a/depthai_ros_driver/src/param_handlers/camera_param_handler.cpp
+++ b/depthai_ros_driver/src/param_handlers/camera_param_handler.cpp
@@ -35,6 +35,7 @@ void CameraParamHandler::declareParams() {
     declareAndLogParam<std::string>("i_external_calibration_path", "");
     declareAndLogParam<int>("i_laser_dot_brightness", 800, getRangedIntDescriptor(0, 1200));
     declareAndLogParam<int>("i_floodlight_brightness", 0, getRangedIntDescriptor(0, 1500));
+    declareAndLogParam<bool>("i_restart_on_diagnostics_error", false);
 
     declareAndLogParam<bool>("i_publish_tf_from_calibration", false);
     declareAndLogParam<std::string>("i_tf_camera_name", getROSNode()->get_name());
diff --git a/depthai_ros_driver/src/param_handlers/nn_param_handler.cpp b/depthai_ros_driver/src/param_handlers/nn_param_handler.cpp
index 4645091d..97be2b69 100644
--- a/depthai_ros_driver/src/param_handlers/nn_param_handler.cpp
+++ b/depthai_ros_driver/src/param_handlers/nn_param_handler.cpp
@@ -3,6 +3,7 @@
 #include <fstream>
 
 #include "ament_index_cpp/get_package_share_directory.hpp"
+#include "depthai-shared/common/CameraBoardSocket.hpp"
 #include "depthai/pipeline/node/DetectionNetwork.hpp"
 #include "depthai/pipeline/node/ImageManip.hpp"
 #include "depthai/pipeline/node/NeuralNetwork.hpp"
@@ -15,12 +16,13 @@
 namespace depthai_ros_driver {
 namespace param_handlers {
 
-NNParamHandler::NNParamHandler(rclcpp::Node* node, const std::string& name) : BaseParamHandler(node, name) {
+NNParamHandler::NNParamHandler(rclcpp::Node* node, const std::string& name, const dai::CameraBoardSocket& socket) : BaseParamHandler(node, name) {
     nnFamilyMap = {
         {"segmentation", nn::NNFamily::Segmentation},
         {"mobilenet", nn::NNFamily::Mobilenet},
         {"YOLO", nn::NNFamily::Yolo},
     };
+    declareAndLogParam<int>("i_board_socket_id", static_cast<int>(socket));
 }
 NNParamHandler::~NNParamHandler() = default;
 nn::NNFamily NNParamHandler::getNNFamily() {
diff --git a/depthai_ros_driver/src/param_handlers/sensor_param_handler.cpp b/depthai_ros_driver/src/param_handlers/sensor_param_handler.cpp
index 0109fec1..5b168899 100644
--- a/depthai_ros_driver/src/param_handlers/sensor_param_handler.cpp
+++ b/depthai_ros_driver/src/param_handlers/sensor_param_handler.cpp
@@ -28,6 +28,7 @@ void SensorParamHandler::declareCommonParams(dai::CameraBoardSocket socket) {
     socketID = static_cast<dai::CameraBoardSocket>(declareAndLogParam<int>("i_board_socket_id", static_cast<int>(socket), 0));
     declareAndLogParam<bool>("i_update_ros_base_time_on_ros_msg", false);
     declareAndLogParam<bool>("i_enable_feature_tracker", false);
+    declareAndLogParam<bool>("i_enable_nn", false);
     declareAndLogParam<bool>("i_enable_lazy_publisher", true);
     declareAndLogParam<bool>("i_add_exposure_offset", false);
     declareAndLogParam<int>("i_exposure_offset", 0);
@@ -99,9 +100,20 @@ void SensorParamHandler::declareParams(std::shared_ptr<dai::node::ColorCamera> c
     int height = colorCam->getResolutionHeight();
 
     colorCam->setInterleaved(declareAndLogParam<bool>("i_interleaved", false));
-    if(declareAndLogParam<bool>("i_set_isp_scale", true)) {
-        int num = declareAndLogParam<int>("i_isp_num", 2);
-        int den = declareAndLogParam<int>("i_isp_den", 3);
+
+    bool setIspScale = true;
+    if(sensor.defaultResolution != "1080P"
+       && sensor.defaultResolution != "1200P") {  // default disable ISP scaling since default resolution is not 1080P or 1200P
+        setIspScale = false;
+    }
+    if(declareAndLogParam<bool>("i_set_isp_scale", setIspScale)) {
+        int num = 2;
+        int den = 3;
+        if(sensor.defaultResolution == "1200P") {
+            den = 5;  // for improved performance
+        }
+        num = declareAndLogParam<int>("i_isp_num", num);
+        den = declareAndLogParam<int>("i_isp_den", den);
         width = (width * num + den - 1) / den;
         height = (height * num + den - 1) / den;
         colorCam->setIspScale(num, den);
diff --git a/depthai_ros_driver/src/param_handlers/stereo_param_handler.cpp b/depthai_ros_driver/src/param_handlers/stereo_param_handler.cpp
index 2ec0d1b2..f3ac9e7c 100644
--- a/depthai_ros_driver/src/param_handlers/stereo_param_handler.cpp
+++ b/depthai_ros_driver/src/param_handlers/stereo_param_handler.cpp
@@ -39,9 +39,10 @@ StereoParamHandler::StereoParamHandler(rclcpp::Node* node, const std::string& na
 
 StereoParamHandler::~StereoParamHandler() = default;
 
-void StereoParamHandler::updateSocketsFromParams(dai::CameraBoardSocket& left, dai::CameraBoardSocket& right) {
+void StereoParamHandler::updateSocketsFromParams(dai::CameraBoardSocket& left, dai::CameraBoardSocket& right, dai::CameraBoardSocket& align) {
     int newLeftS = declareAndLogParam<int>("i_left_socket_id", static_cast<int>(left));
     int newRightS = declareAndLogParam<int>("i_right_socket_id", static_cast<int>(right));
+    alignSocket = static_cast<dai::CameraBoardSocket>(declareAndLogParam<int>("i_board_socket_id", static_cast<int>(align)));
     if(newLeftS != static_cast<int>(left) || newRightS != static_cast<int>(right)) {
         RCLCPP_WARN(getROSNode()->get_logger(), "Left or right socket changed, updating stereo node");
         RCLCPP_WARN(getROSNode()->get_logger(), "Old left socket: %d, new left socket: %d", static_cast<int>(left), newLeftS);
@@ -78,14 +79,15 @@ void StereoParamHandler::declareParams(std::shared_ptr<dai::node::StereoDepth> s
     declareAndLogParam<bool>("i_right_rect_enable_feature_tracker", false);
     declareAndLogParam<bool>("i_right_rect_add_exposure_offset", false);
     declareAndLogParam<int>("i_right_rect_exposure_offset", 0);
+    declareAndLogParam<bool>("i_enable_spatial_nn", false);
+    declareAndLogParam<std::string>("i_spatial_nn_source", "right");
 
     stereo->setLeftRightCheck(declareAndLogParam<bool>("i_lr_check", true));
     int width = 1280;
     int height = 720;
-    auto socket = static_cast<dai::CameraBoardSocket>(declareAndLogParam<int>("i_board_socket_id", static_cast<int>(dai::CameraBoardSocket::CAM_A)));
     std::string socketName;
     if(declareAndLogParam<bool>("i_align_depth", true)) {
-        socketName = utils::getSocketName(socket);
+        socketName = utils::getSocketName(alignSocket);
         try {
             width = getROSNode()->get_parameter(socketName + ".i_width").as_int();
             height = getROSNode()->get_parameter(socketName + ".i_height").as_int();
@@ -93,7 +95,7 @@ void StereoParamHandler::declareParams(std::shared_ptr<dai::node::StereoDepth> s
             RCLCPP_ERROR(getROSNode()->get_logger(), "%s parameters not set, defaulting to 1280x720 unless specified otherwise.", socketName.c_str());
         }
         declareAndLogParam<std::string>("i_socket_name", socketName);
-        stereo->setDepthAlign(socket);
+        stereo->setDepthAlign(alignSocket);
     }
 
     if(declareAndLogParam<bool>("i_set_input_size", false)) {
diff --git a/depthai_ros_driver/src/pipeline/pipeline_generator.cpp b/depthai_ros_driver/src/pipeline/pipeline_generator.cpp
index b26e6039..cc28adc1 100644
--- a/depthai_ros_driver/src/pipeline/pipeline_generator.cpp
+++ b/depthai_ros_driver/src/pipeline/pipeline_generator.cpp
@@ -55,12 +55,12 @@ std::string PipelineGenerator::validatePipeline(rclcpp::Node* node, const std::s
     auto pType = utils::getValFromMap(typeStr, pipelineTypeMap);
     if(sensorNum == 1) {
         if(pType != PipelineType::RGB) {
-            RCLCPP_ERROR(node->get_logger(), "Wrong pipeline chosen for camera as it has only one sensor. Switching to RGB.");
+            RCLCPP_ERROR(node->get_logger(), "Invalid pipeline chosen for camera as it has only one sensor. Switching to RGB.");
             return "RGB";
         }
     } else if(sensorNum == 2) {
         if(pType != PipelineType::Stereo && pType != PipelineType::Depth) {
-            RCLCPP_ERROR(node->get_logger(), "Wrong pipeline chosen for camera as it has only stereo pair. Switching to Depth.");
+            RCLCPP_ERROR(node->get_logger(), "Invalid pipeline chosen for camera as it has only stereo pair. Switching to Depth.");
             return "DEPTH";
         }
     }