diff --git a/.github/workflows/main.workflow.yml b/.github/workflows/main.workflow.yml index 886db63c..126fde76 100644 --- a/.github/workflows/main.workflow.yml +++ b/.github/workflows/main.workflow.yml @@ -30,7 +30,7 @@ jobs: fail-fast: false steps: - uses: actions/checkout@v3.3.0 - - uses: ros-tooling/setup-ros@0.6.2 + - uses: ros-tooling/setup-ros@0.7.8 with: required-ros-distributions: ${{ env.ROS_DISTRO }} - uses: ros-tooling/action-ros2-lint@0.1.3 @@ -52,7 +52,7 @@ jobs: linter: [xmllint, pep257, lint_cmake] steps: - uses: actions/checkout@v3.3.0 - - uses: ros-tooling/setup-ros@0.6.2 + - uses: ros-tooling/setup-ros@0.7.8 with: required-ros-distributions: ${{ env.ROS_DISTRO }} - uses: ros-tooling/action-ros2-lint@0.1.3 @@ -119,4 +119,4 @@ jobs: no-cache: true tags: | luxonis/depthai-ros:${{ steps.vars.outputs.short_ref }} - luxonis/depthai-ros:${{ env.ROS_DISTRO }}-latest \ No newline at end of file + luxonis/depthai-ros:${{ env.ROS_DISTRO }}-latest diff --git a/README.md b/README.md index de9b00c1..a18a9631 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ # Depthai ROS Repository Hi and welcome to the main depthai-ros respository! Here you can find ROS related code for OAK cameras from Luxonis. Don't have one? You can get them [here!](https://shop.luxonis.com/) -You can find the newest documentation [here](https://docs-beta.luxonis.com/software/ros/depthai-ros/intro/). +You can find the newest documentation [here](https://docs.luxonis.com/software/ros/depthai-ros/) diff --git a/depthai-ros/CHANGELOG.rst b/depthai-ros/CHANGELOG.rst index f97c2b58..1917e593 100644 --- a/depthai-ros/CHANGELOG.rst +++ b/depthai-ros/CHANGELOG.rst @@ -1,6 +1,33 @@ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Changelog for package depthai-ros ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Changelog for package depthai-ros +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +2.10.0 (2024-08-29) +------------------- +## What's Changed +* Adding stl files for SR and LR models by @danilo-pejovic in https://github.com/luxonis/depthai-ros/pull/491 +* No imu fix Humble by @Serafadam in https://github.com/luxonis/depthai-ros/pull/500 +* Tracking converter for ROS2 Humble by @daniqsilva25 in https://github.com/luxonis/depthai-ros/pull/505 +* Added Env Hooks so that depthai xacro can be used with gazebo sim by @r4hul77 in https://github.com/luxonis/depthai-ros/pull/507 +* Fix resource paths for Ignition Gazebo by @Nibanovic in https://github.com/luxonis/depthai-ros/pull/511 +* Use simulation flag to decide how to load meshes. by @destogl in https://github.com/luxonis/depthai-ros/pull/524 +* Add new launch file for starting multiple rgbd cameras on robots. by @destogl in https://github.com/luxonis/depthai-ros/pull/532 +* Missing fields in detection messages Humble by @Serafadam in https://github.com/luxonis/depthai-ros/pull/574 +* Ip autodiscovery fix Humble by @Serafadam in https://github.com/luxonis/depthai-ros/pull/561 +* RS Mode & Sync - Humble by @Serafadam in https://github.com/luxonis/depthai-ros/pull/578 +* Compressed image publishers by @Serafadam in https://github.com/luxonis/depthai-ros/pull/580 +* ToF Support Humble by @Serafadam in https://github.com/luxonis/depthai-ros/pull/581 +* WLS fix humble by @Serafadam in https://github.com/luxonis/depthai-ros/pull/582 +* Syncing & RS updates Humble by @Serafadam in https://github.com/luxonis/depthai-ros/pull/586 + +## New Contributors +* @r4hul77 made their first contribution in https://github.com/luxonis/depthai-ros/pull/507 +* @Nibanovic made their first contribution in https://github.com/luxonis/depthai-ros/pull/511 +* @destogl made their first contribution in https://github.com/luxonis/depthai-ros/pull/524 + +**Full Changelog**: https://github.com/luxonis/depthai-ros/compare/v2.9.0-humble...v2.10.0-humble 2.9.0 (2024-01-24) ------------------- diff --git a/depthai-ros/CMakeLists.txt b/depthai-ros/CMakeLists.txt index 794351c9..770691b9 100644 --- a/depthai-ros/CMakeLists.txt +++ b/depthai-ros/CMakeLists.txt @@ -1,10 +1,10 @@ cmake_minimum_required(VERSION 3.10.2) # CMake version in Ubuntu 18.04 LTS -project(depthai-ros VERSION 2.9.0 LANGUAGES CXX C) +project(depthai-ros VERSION 2.10.0 LANGUAGES CXX C) set(CMAKE_CXX_STANDARD 14) find_package(ament_cmake REQUIRED) -ament_package() \ No newline at end of file +ament_package() diff --git a/depthai-ros/package.xml b/depthai-ros/package.xml index 6d848954..a9a262b3 100644 --- a/depthai-ros/package.xml +++ b/depthai-ros/package.xml @@ -1,13 +1,13 @@ depthai-ros - 2.9.0 + 2.10.0 The depthai-ros package - sachin + Adam Serafin Sachin Guruswamy MIT diff --git a/depthai_bridge/CMakeLists.txt b/depthai_bridge/CMakeLists.txt index 5bb5a8c2..67ef0ef7 100644 --- a/depthai_bridge/CMakeLists.txt +++ b/depthai_bridge/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required(VERSION 3.10.2) # CMake version in Ubuntu 18.04 LTS set(CMAKE_POSITION_INDEPENDENT_CODE ON) -project(depthai_bridge VERSION 2.9.0 LANGUAGES CXX C) +project(depthai_bridge VERSION 2.10.0 LANGUAGES CXX C) set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD_REQUIRED ON) @@ -41,6 +41,7 @@ find_package(tf2_ros REQUIRED) find_package(tf2 REQUIRED) find_package(tf2_geometry_msgs REQUIRED) find_package(composition_interfaces REQUIRED) +find_package(ffmpeg_image_transport_msgs REQUIRED) set(dependencies camera_info_manager @@ -56,6 +57,7 @@ set(dependencies tf2_geometry_msgs tf2 composition_interfaces + ffmpeg_image_transport_msgs ) include_directories( @@ -72,6 +74,8 @@ file(GLOB LIB_SRC "src/ImuConverter.cpp" "src/TFPublisher.cpp" "src/TrackedFeaturesConverter.cpp" +"src/TrackDetectionConverter.cpp" +"src/TrackSpatialDetectionConverter.cpp" ) add_library(${PROJECT_NAME} SHARED ${LIB_SRC}) diff --git a/depthai_bridge/include/depthai_bridge/ImageConverter.hpp b/depthai_bridge/include/depthai_bridge/ImageConverter.hpp index 8ad51e94..a9d6be82 100644 --- a/depthai_bridge/include/depthai_bridge/ImageConverter.hpp +++ b/depthai_bridge/include/depthai_bridge/ImageConverter.hpp @@ -10,9 +10,12 @@ #include "depthai-shared/common/CameraBoardSocket.hpp" #include "depthai-shared/common/Point2f.hpp" #include "depthai/device/CalibrationHandler.hpp" +#include "depthai/pipeline/datatype/EncodedFrame.hpp" #include "depthai/pipeline/datatype/ImgFrame.hpp" +#include "ffmpeg_image_transport_msgs/msg/ffmpeg_packet.hpp" #include "rclcpp/time.hpp" #include "sensor_msgs/msg/camera_info.hpp" +#include "sensor_msgs/msg/compressed_image.hpp" #include "sensor_msgs/msg/image.hpp" #include "std_msgs/msg/header.hpp" @@ -22,7 +25,10 @@ namespace ros { namespace StdMsgs = std_msgs::msg; namespace ImageMsgs = sensor_msgs::msg; +namespace FFMPEGMsgs = ffmpeg_image_transport_msgs::msg; using ImagePtr = ImageMsgs::Image::SharedPtr; +using FFMPEGImagePtr = FFMPEGMsgs::FFMPEGPacket::SharedPtr; +using CompImagePtr = ImageMsgs::CompressedImage::SharedPtr; using TimePoint = std::chrono::time_point; @@ -47,7 +53,7 @@ class ImageConverter { * @param update: bool whether to automatically update the ROS base time on message conversion */ void setUpdateRosBaseTimeOnToRosMsg(bool update = true) { - _updateRosBaseTimeOnToRosMsg = update; + updateRosBaseTimeOnToRosMsg = update; } /** @@ -80,10 +86,20 @@ class ImageConverter { */ void setAlphaScaling(double alphaScalingFactor = 0.0); + /** + * @brief Sets the encoding of the image when converting to FFMPEG message. Default is libx264. + * @param encoding: The encoding to be used. + */ + void setFFMPEGEncoding(const std::string& encoding); + void toRosMsg(std::shared_ptr inData, std::deque& outImageMsgs); ImageMsgs::Image toRosMsgRawPtr(std::shared_ptr inData, const sensor_msgs::msg::CameraInfo& info = sensor_msgs::msg::CameraInfo()); ImagePtr toRosMsgPtr(std::shared_ptr inData); + FFMPEGMsgs::FFMPEGPacket toRosFFMPEGPacket(std::shared_ptr inData); + + ImageMsgs::CompressedImage toRosCompressedMsg(std::shared_ptr inData); + void toDaiMsg(const ImageMsgs::Image& inMsg, dai::ImgFrame& outData); /** TODO(sachin): Add support for ros msg to cv mat since we have some @@ -99,36 +115,37 @@ class ImageConverter { Point2f bottomRightPixelId = Point2f()); private: + void planarToInterleaved(const std::vector& srcData, std::vector& destData, int w, int h, int numPlanes, int bpp); + void interleavedToPlanar(const std::vector& srcData, std::vector& destData, int w, int h, int numPlanes, int bpp); static std::unordered_map encodingEnumMap; static std::unordered_map planarEncodingEnumMap; // dai::RawImgFrame::Type _srcType; - bool _daiInterleaved; + bool daiInterleaved; // bool c - const std::string _frameName = ""; - void planarToInterleaved(const std::vector& srcData, std::vector& destData, int w, int h, int numPlanes, int bpp); - void interleavedToPlanar(const std::vector& srcData, std::vector& destData, int w, int h, int numPlanes, int bpp); - std::chrono::time_point _steadyBaseTime; + const std::string frameName = ""; + std::chrono::time_point steadyBaseTime; - rclcpp::Time _rosBaseTime; - bool _getBaseDeviceTimestamp; + rclcpp::Time rosBaseTime; + bool getBaseDeviceTimestamp; // For handling ROS time shifts and debugging - int64_t _totalNsChange{0}; + int64_t totalNsChange{0}; // Whether to update the ROS base time on each message conversion - bool _updateRosBaseTimeOnToRosMsg{false}; - dai::RawImgFrame::Type _srcType; - bool _fromBitstream = false; - bool _convertDispToDepth = false; - bool _addExpOffset = false; - bool _alphaScalingEnabled = false; - dai::CameraExposureOffset _expOffset; - bool _reverseStereoSocketOrder = false; - double _baseline; - double _alphaScalingFactor = 0.0; + bool updateRosBaseTimeOnToRosMsg{false}; + dai::RawImgFrame::Type srcType; + bool fromBitstream = false; + bool dispToDepth = false; + bool addExpOffset = false; + bool alphaScalingEnabled = false; + dai::CameraExposureOffset expOffset; + bool reversedStereoSocketOrder = false; + double baseline; + double alphaScalingFactor = 0.0; + int camHeight = -1; + int camWidth = -1; + std::string ffmpegEncoding = "libx264"; }; } // namespace ros -namespace rosBridge = ros; - } // namespace dai diff --git a/depthai_bridge/include/depthai_bridge/TFPublisher.hpp b/depthai_bridge/include/depthai_bridge/TFPublisher.hpp index 0e384ada..daee7968 100644 --- a/depthai_bridge/include/depthai_bridge/TFPublisher.hpp +++ b/depthai_bridge/include/depthai_bridge/TFPublisher.hpp @@ -4,15 +4,19 @@ #include "geometry_msgs/msg/quaternion.hpp" #include "geometry_msgs/msg/transform_stamped.hpp" #include "nlohmann/json.hpp" -#include "rclcpp/node.hpp" +#include "rclcpp/logger.hpp" #include "rclcpp/parameter_client.hpp" #include "tf2_ros/static_transform_broadcaster.h" +namespace rclcpp { +class Node; +} // namespace rclcpp + namespace dai { namespace ros { class TFPublisher { public: - explicit TFPublisher(rclcpp::Node* node, + explicit TFPublisher(std::shared_ptr node, const dai::CalibrationHandler& calHandler, const std::vector& camFeatures, const std::string& camName, @@ -27,7 +31,8 @@ class TFPublisher { const std::string& camYaw, const std::string& imuFromDescr, const std::string& customURDFLocation, - const std::string& customXacroArgs); + const std::string& customXacroArgs, + const bool rsCompatibilityMode); /** * @brief Obtain URDF description by running Xacro with provided arguments. */ @@ -53,36 +58,37 @@ class TFPublisher { * Frame names are based on socket names and use following convention: [base_frame]_[socket_name]_camera_frame and * [base_frame]_[socket_name]_camera_optical_frame */ - void publishCamTransforms(nlohmann::json camData, rclcpp::Node* node); + void publishCamTransforms(nlohmann::json camData, std::shared_ptr node); /** * @brief Publish IMU transform based on calibration data. * Frame name is based on IMU name and uses following convention: [base_frame]_imu_frame. * If IMU extrinsics are not set, warning is printed out and imu frame is published with zero translation and rotation. */ - void publishImuTransform(nlohmann::json json, rclcpp::Node* node); + void publishImuTransform(nlohmann::json json, std::shared_ptr node); /** * @brief Check if model STL file is available in depthai_descriptions package. */ bool modelNameAvailable(); std::string getCamSocketName(int socketNum); - std::unique_ptr _paramClient; - std::shared_ptr _tfPub; - std::string _camName; - std::string _camModel; - std::string _baseFrame; - std::string _parentFrame; - std::string _camPosX; - std::string _camPosY; - std::string _camPosZ; - std::string _camRoll; - std::string _camPitch; - std::string _camYaw; - std::string _imuFromDescr; - std::string _customURDFLocation; - std::string _customXacroArgs; - std::vector _camFeatures; - rclcpp::Logger _logger; - const std::unordered_map _socketNameMap = { + std::unique_ptr paramClient; + std::shared_ptr tfPub; + std::string camName; + std::string camModel; + std::string baseFrame; + std::string parentFrame; + std::string camPosX; + std::string camPosY; + std::string camPosZ; + std::string camRoll; + std::string camPitch; + std::string camYaw; + std::string imuFromDescr; + std::string customURDFLocation; + std::string customXacroArgs; + std::vector camFeatures; + bool rsCompatibilityMode; + rclcpp::Logger logger; + const std::unordered_map socketNameMap = { {dai::CameraBoardSocket::AUTO, "rgb"}, {dai::CameraBoardSocket::CAM_A, "rgb"}, {dai::CameraBoardSocket::CAM_B, "left"}, @@ -90,6 +96,12 @@ class TFPublisher { {dai::CameraBoardSocket::CAM_D, "left_back"}, {dai::CameraBoardSocket::CAM_E, "right_back"}, }; + const std::unordered_map rsSocketNameMap = { + {dai::CameraBoardSocket::AUTO, "color"}, + {dai::CameraBoardSocket::CAM_A, "color"}, + {dai::CameraBoardSocket::CAM_B, "infra2"}, + {dai::CameraBoardSocket::CAM_C, "infra1"}, + }; }; } // namespace ros -} // namespace dai \ No newline at end of file +} // namespace dai diff --git a/depthai_bridge/include/depthai_bridge/TrackDetectionConverter.hpp b/depthai_bridge/include/depthai_bridge/TrackDetectionConverter.hpp new file mode 100644 index 00000000..27c5e74e --- /dev/null +++ b/depthai_bridge/include/depthai_bridge/TrackDetectionConverter.hpp @@ -0,0 +1,60 @@ +#pragma once + +#include +#include +#include + +#include "depthai/pipeline/datatype/Tracklets.hpp" +#include "depthai_ros_msgs/msg/track_detection2_d_array.hpp" +#include "rclcpp/time.hpp" +#include "vision_msgs/msg/detection2_d_array.hpp" + +namespace dai { + +namespace ros { + +class TrackDetectionConverter { + public: + TrackDetectionConverter(std::string frameName, int width, int height, bool normalized = false, float thresh = 0.0, bool getBaseDeviceTimestamp = false); + ~TrackDetectionConverter(); + + /** + * @brief Handles cases in which the ROS time shifts forward or backward + * Should be called at regular intervals or on-change of ROS time, depending + * on monitoring. + * + */ + void updateRosBaseTime(); + + /** + * @brief Commands the converter to automatically update the ROS base time on message conversion based on variable + * + * @param update: bool whether to automatically update the ROS base time on message conversion + */ + void setUpdateRosBaseTimeOnToRosMsg(bool update = true) { + _updateRosBaseTimeOnToRosMsg = update; + } + + void toRosMsg(std::shared_ptr trackData, std::deque& opDetectionMsgs); + + depthai_ros_msgs::msg::TrackDetection2DArray::SharedPtr toRosMsgPtr(std::shared_ptr trackData); + + private: + int _width, _height; + const std::string _frameName; + bool _normalized; + float _thresh; + std::chrono::time_point _steadyBaseTime; + rclcpp::Time _rosBaseTime; + bool _getBaseDeviceTimestamp; + // For handling ROS time shifts and debugging + int64_t _totalNsChange{0}; + // Whether to update the ROS base time on each message conversion + bool _updateRosBaseTimeOnToRosMsg{false}; +}; + +} // namespace ros + +namespace rosBridge = ros; + +} // namespace dai diff --git a/depthai_bridge/include/depthai_bridge/TrackSpatialDetectionConverter.hpp b/depthai_bridge/include/depthai_bridge/TrackSpatialDetectionConverter.hpp new file mode 100644 index 00000000..d2889256 --- /dev/null +++ b/depthai_bridge/include/depthai_bridge/TrackSpatialDetectionConverter.hpp @@ -0,0 +1,61 @@ +#pragma once + +#include +#include +#include + +#include "depthai/pipeline/datatype/Tracklets.hpp" +#include "depthai_ros_msgs/msg/track_detection2_d_array.hpp" +#include "rclcpp/time.hpp" +#include "vision_msgs/msg/detection2_d_array.hpp" + +namespace dai { + +namespace ros { + +class TrackSpatialDetectionConverter { + public: + TrackSpatialDetectionConverter( + std::string frameName, int width, int height, bool normalized = false, float thresh = 0.0, bool getBaseDeviceTimestamp = false); + ~TrackSpatialDetectionConverter(); + + /** + * @brief Handles cases in which the ROS time shifts forward or backward + * Should be called at regular intervals or on-change of ROS time, depending + * on monitoring. + * + */ + void updateRosBaseTime(); + + /** + * @brief Commands the converter to automatically update the ROS base time on message conversion based on variable + * + * @param update: bool whether to automatically update the ROS base time on message conversion + */ + void setUpdateRosBaseTimeOnToRosMsg(bool update = true) { + _updateRosBaseTimeOnToRosMsg = update; + } + + void toRosMsg(std::shared_ptr trackData, std::deque& opDetectionMsgs); + + depthai_ros_msgs::msg::TrackDetection2DArray::SharedPtr toRosMsgPtr(std::shared_ptr trackData); + + private: + int _width, _height; + const std::string _frameName; + bool _normalized; + float _thresh; + std::chrono::time_point _steadyBaseTime; + rclcpp::Time _rosBaseTime; + bool _getBaseDeviceTimestamp; + // For handling ROS time shifts and debugging + int64_t _totalNsChange{0}; + // Whether to update the ROS base time on each message conversion + bool _updateRosBaseTimeOnToRosMsg{false}; +}; + +} // namespace ros + +namespace rosBridge = ros; + +} // namespace dai \ No newline at end of file diff --git a/depthai_bridge/package.xml b/depthai_bridge/package.xml index f83bca45..ef1bc7ba 100644 --- a/depthai_bridge/package.xml +++ b/depthai_bridge/package.xml @@ -1,10 +1,10 @@ depthai_bridge - 2.9.0 + 2.10.0 The depthai_bridge package - Sachin Guruswamy + Adam Serafin Sachin Guruswamy MIT @@ -25,6 +25,7 @@ std_msgs stereo_msgs vision_msgs + ffmpeg_image_transport_msgs tf2_ros tf2 tf2_geometry_msgs diff --git a/depthai_bridge/src/ImageConverter.cpp b/depthai_bridge/src/ImageConverter.cpp index c678b1ee..7ccaac4d 100644 --- a/depthai_bridge/src/ImageConverter.cpp +++ b/depthai_bridge/src/ImageConverter.cpp @@ -1,6 +1,10 @@ #include "depthai_bridge/ImageConverter.hpp" +#include + +#include "depthai-shared/datatype/RawEncodedFrame.hpp" +#include "depthai/pipeline/datatype/EncodedFrame.hpp" #include "depthai_bridge/depthaiUtility.hpp" #include "opencv2/calib3d.hpp" #include "opencv2/imgcodecs.hpp" @@ -26,71 +30,124 @@ std::unordered_map ImageConverter::planarEn {dai::RawImgFrame::Type::YUV420p, "rgb8"}}; ImageConverter::ImageConverter(bool interleaved, bool getBaseDeviceTimestamp) - : _daiInterleaved(interleaved), _steadyBaseTime(std::chrono::steady_clock::now()), _getBaseDeviceTimestamp(getBaseDeviceTimestamp) { - _rosBaseTime = rclcpp::Clock().now(); + : daiInterleaved(interleaved), steadyBaseTime(std::chrono::steady_clock::now()), getBaseDeviceTimestamp(getBaseDeviceTimestamp) { + rosBaseTime = rclcpp::Clock().now(); } ImageConverter::ImageConverter(const std::string frameName, bool interleaved, bool getBaseDeviceTimestamp) - : _frameName(frameName), _daiInterleaved(interleaved), _steadyBaseTime(std::chrono::steady_clock::now()), _getBaseDeviceTimestamp(getBaseDeviceTimestamp) { - _rosBaseTime = rclcpp::Clock().now(); + : frameName(frameName), daiInterleaved(interleaved), steadyBaseTime(std::chrono::steady_clock::now()), getBaseDeviceTimestamp(getBaseDeviceTimestamp) { + rosBaseTime = rclcpp::Clock().now(); } ImageConverter::~ImageConverter() = default; void ImageConverter::updateRosBaseTime() { - updateBaseTime(_steadyBaseTime, _rosBaseTime, _totalNsChange); + updateBaseTime(steadyBaseTime, rosBaseTime, totalNsChange); } void ImageConverter::convertFromBitstream(dai::RawImgFrame::Type srcType) { - _fromBitstream = true; - _srcType = srcType; + fromBitstream = true; + this->srcType = srcType; } void ImageConverter::convertDispToDepth(double baseline) { - _convertDispToDepth = true; - _baseline = baseline; + dispToDepth = true; + this->baseline = baseline; } void ImageConverter::addExposureOffset(dai::CameraExposureOffset& offset) { - _expOffset = offset; - _addExpOffset = true; + expOffset = offset; + addExpOffset = true; } void ImageConverter::reverseStereoSocketOrder() { - _reverseStereoSocketOrder = true; + reversedStereoSocketOrder = true; } void ImageConverter::setAlphaScaling(double alphaScalingFactor) { - _alphaScalingEnabled = true; - _alphaScalingFactor = alphaScalingFactor; + alphaScalingEnabled = true; + this->alphaScalingFactor = alphaScalingFactor; +} + +void ImageConverter::setFFMPEGEncoding(const std::string& encoding) { + ffmpegEncoding = encoding; } ImageMsgs::Image ImageConverter::toRosMsgRawPtr(std::shared_ptr inData, const sensor_msgs::msg::CameraInfo& info) { - if(_updateRosBaseTimeOnToRosMsg) { + if(updateRosBaseTimeOnToRosMsg) { updateRosBaseTime(); } std::chrono::_V2::steady_clock::time_point tstamp; - if(_getBaseDeviceTimestamp) - if(_addExpOffset) - tstamp = inData->getTimestampDevice(_expOffset); + if(getBaseDeviceTimestamp) + if(addExpOffset) + tstamp = inData->getTimestampDevice(expOffset); else tstamp = inData->getTimestampDevice(); - else if(_addExpOffset) - tstamp = inData->getTimestamp(_expOffset); + else if(addExpOffset) + tstamp = inData->getTimestamp(expOffset); else tstamp = inData->getTimestamp(); ImageMsgs::Image outImageMsg; StdMsgs::Header header; - header.frame_id = _frameName; + header.frame_id = frameName; - header.stamp = getFrameTime(_rosBaseTime, _steadyBaseTime, tstamp); + header.stamp = getFrameTime(rosBaseTime, steadyBaseTime, tstamp); + + if(fromBitstream) { + std::string encoding; + int decodeFlags; + int channels; + cv::Mat output; + switch(srcType) { + case dai::RawImgFrame::Type::BGR888i: { + encoding = sensor_msgs::image_encodings::BGR8; + decodeFlags = cv::IMREAD_COLOR; + channels = CV_8UC3; + break; + } + case dai::RawImgFrame::Type::GRAY8: { + encoding = sensor_msgs::image_encodings::MONO8; + decodeFlags = cv::IMREAD_GRAYSCALE; + channels = CV_8UC1; + break; + } + case dai::RawImgFrame::Type::RAW8: { + encoding = sensor_msgs::image_encodings::TYPE_16UC1; + decodeFlags = cv::IMREAD_ANYDEPTH; + channels = CV_16UC1; + break; + } + default: { + std::cout << frameName << static_cast(srcType) << std::endl; + throw(std::runtime_error("Converted type not supported!")); + } + } + + output = cv::imdecode(cv::Mat(inData->getData()), decodeFlags); + + // converting disparity + if(dispToDepth) { + auto factor = std::abs(baseline * 10) * info.p[0]; + cv::Mat depthOut = cv::Mat(cv::Size(output.cols, output.rows), CV_16UC1); + depthOut.forEach([&output, &factor](uint16_t& pixel, const int* position) -> void { + auto disp = output.at(position); + if(disp == 0) + pixel = 0; + else + pixel = factor / disp; + }); + output = depthOut.clone(); + } + cv_bridge::CvImage(header, encoding, output).toImageMsg(outImageMsg); + return outImageMsg; + } - if(_fromBitstream) { + if(fromBitstream) { std::string encoding; int decodeFlags; int channels; cv::Mat output; - switch(_srcType) { + switch(srcType) { case dai::RawImgFrame::Type::BGR888i: { encoding = sensor_msgs::image_encodings::BGR8; decodeFlags = cv::IMREAD_COLOR; @@ -110,7 +167,7 @@ ImageMsgs::Image ImageConverter::toRosMsgRawPtr(std::shared_ptr i break; } default: { - std::cout << _frameName << static_cast(_srcType) << std::endl; + std::cout << frameName << static_cast(srcType) << std::endl; throw(std::runtime_error("Converted type not supported!")); } } @@ -118,8 +175,8 @@ ImageMsgs::Image ImageConverter::toRosMsgRawPtr(std::shared_ptr i output = cv::imdecode(cv::Mat(inData->getData()), decodeFlags); // converting disparity - if(_convertDispToDepth) { - auto factor = std::abs(_baseline * 10) * info.p[0]; + if(dispToDepth) { + auto factor = std::abs(baseline * 10) * info.p[0]; cv::Mat depthOut = cv::Mat(cv::Size(output.cols, output.rows), CV_16UC1); depthOut.forEach([&output, &factor](uint16_t& pixel, const int* position) -> void { auto disp = output.at(position); @@ -210,6 +267,80 @@ ImageMsgs::Image ImageConverter::toRosMsgRawPtr(std::shared_ptr i return outImageMsg; } +std::chrono::time_point getOffsetTimestamp( + std::chrono::time_point ts, + CameraExposureOffset offset, + std::chrono::microseconds expTime) { + switch(offset) { + case CameraExposureOffset::START: + return ts - expTime; + case CameraExposureOffset::MIDDLE: + return ts - expTime / 2; + case CameraExposureOffset::END: + default: + return ts; + } +} + +ImageMsgs::CompressedImage ImageConverter::toRosCompressedMsg(std::shared_ptr inData) { + if(updateRosBaseTimeOnToRosMsg) { + updateRosBaseTime(); + } + std::chrono::_V2::steady_clock::time_point tstamp; + if(getBaseDeviceTimestamp) + if(addExpOffset) + tstamp = getOffsetTimestamp(inData->getTimestampDevice(), expOffset, inData->getExposureTime()); + else + tstamp = inData->getTimestampDevice(); + else if(addExpOffset) + tstamp = getOffsetTimestamp(inData->getTimestamp(), expOffset, inData->getExposureTime()); + else + tstamp = inData->getTimestamp(); + + ImageMsgs::CompressedImage outImageMsg; + StdMsgs::Header header; + header.frame_id = frameName; + header.stamp = getFrameTime(rosBaseTime, steadyBaseTime, tstamp); + + outImageMsg.header = header; + outImageMsg.format = "jpeg"; + outImageMsg.data = inData->getData(); + return outImageMsg; +} + +FFMPEGMsgs::FFMPEGPacket ImageConverter::toRosFFMPEGPacket(std::shared_ptr inData) { + if(updateRosBaseTimeOnToRosMsg) { + updateRosBaseTime(); + } + std::chrono::_V2::steady_clock::time_point tstamp; + if(getBaseDeviceTimestamp) + if(addExpOffset) + tstamp = getOffsetTimestamp(inData->getTimestampDevice(), expOffset, inData->getExposureTime()); + else + tstamp = inData->getTimestampDevice(); + else if(addExpOffset) + tstamp = getOffsetTimestamp(inData->getTimestamp(), expOffset, inData->getExposureTime()); + else + tstamp = inData->getTimestamp(); + + FFMPEGMsgs::FFMPEGPacket outFrameMsg; + StdMsgs::Header header; + header.frame_id = frameName; + header.stamp = getFrameTime(rosBaseTime, steadyBaseTime, tstamp); + outFrameMsg.header = header; + auto ft = inData->getFrameType(); + + outFrameMsg.width = camWidth; + outFrameMsg.height = camHeight; + outFrameMsg.encoding = ffmpegEncoding; + outFrameMsg.pts = header.stamp.sec * 1000000000 + header.stamp.nanosec; // in nanoseconds + outFrameMsg.flags = (int)(ft == RawEncodedFrame::FrameType::I); + outFrameMsg.is_bigendian = false; + outFrameMsg.data = inData->getData(); + + return outFrameMsg; +} + void ImageConverter::toRosMsg(std::shared_ptr inData, std::deque& outImageMsgs) { auto outImageMsg = toRosMsgRawPtr(inData); outImageMsgs.push_back(outImageMsg); @@ -225,7 +356,7 @@ ImagePtr ImageConverter::toRosMsgPtr(std::shared_ptr inData) { void ImageConverter::toDaiMsg(const ImageMsgs::Image& inMsg, dai::ImgFrame& outData) { std::unordered_map::iterator revEncodingIter; - if(_daiInterleaved) { + if(daiInterleaved) { revEncodingIter = std::find_if(encodingEnumMap.begin(), encodingEnumMap.end(), [&](const std::pair& pair) { return pair.second == inMsg.encoding; }); @@ -352,6 +483,8 @@ ImageMsgs::CameraInfo ImageConverter::calibrationToCameraInfo( cameraData.height = static_cast(height); } + camWidth = cameraData.width; + camHeight = cameraData.height; camIntrinsics = calibHandler.getCameraIntrinsics(cameraId, cameraData.width, cameraData.height, topLeftPixelId, bottomRightPixelId); flatIntrinsics.resize(9); @@ -382,7 +515,7 @@ ImageMsgs::CameraInfo ImageConverter::calibrationToCameraInfo( std::vector> stereoIntrinsics = calibHandler.getCameraIntrinsics( calibHandler.getStereoRightCameraId(), cameraData.width, cameraData.height, topLeftPixelId, bottomRightPixelId); - if(_alphaScalingEnabled) { + if(alphaScalingEnabled) { cv::Mat cameraMatrix = cv::Mat(3, 3, CV_64F); for(int i = 0; i < 3; i++) { for(int j = 0; j < 3; j++) { @@ -391,7 +524,7 @@ ImageMsgs::CameraInfo ImageConverter::calibrationToCameraInfo( } cv::Mat distCoefficients(distCoeffs); - cv::Mat newCameraMatrix = cv::getOptimalNewCameraMatrix(cameraMatrix, distCoefficients, cv::Size(width, height), _alphaScalingFactor); + cv::Mat newCameraMatrix = cv::getOptimalNewCameraMatrix(cameraMatrix, distCoefficients, cv::Size(width, height), alphaScalingFactor); // Copying the contents of newCameraMatrix to stereoIntrinsics for(int i = 0; i < 3; i++) { for(int j = 0; j < 3; j++) { @@ -410,7 +543,7 @@ ImageMsgs::CameraInfo ImageConverter::calibrationToCameraInfo( dai::CameraBoardSocket stereoSocketFirst = calibHandler.getStereoLeftCameraId(); dai::CameraBoardSocket stereoSocketSecond = calibHandler.getStereoRightCameraId(); double factor = 1.0; - if(_reverseStereoSocketOrder) { + if(reversedStereoSocketOrder) { stereoSocketFirst = calibHandler.getStereoRightCameraId(); stereoSocketSecond = calibHandler.getStereoLeftCameraId(); factor = -1.0; @@ -439,4 +572,4 @@ ImageMsgs::CameraInfo ImageConverter::calibrationToCameraInfo( return cameraData; } } // namespace ros -} // namespace dai \ No newline at end of file +} // namespace dai diff --git a/depthai_bridge/src/ImgDetectionConverter.cpp b/depthai_bridge/src/ImgDetectionConverter.cpp index 0b18d41f..cbd0c9d2 100644 --- a/depthai_bridge/src/ImgDetectionConverter.cpp +++ b/depthai_bridge/src/ImgDetectionConverter.cpp @@ -86,4 +86,4 @@ Detection2DArrayPtr ImgDetectionConverter::toRosMsgPtr(std::shared_ptr inData, std::dequepackets.size(); ++i) { auto accel = inData->packets[i].acceleroMeter; auto gyro = inData->packets[i].gyroscope; + ImuMsgs::Imu msg; std::chrono::_V2::steady_clock::time_point tstamp; if(_getBaseDeviceTimestamp) @@ -142,4 +143,4 @@ void ImuConverter::toRosDaiMsg(std::shared_ptr inData, std::deque< } } // namespace ros -} // namespace dai \ No newline at end of file +} // namespace dai diff --git a/depthai_bridge/src/SpatialDetectionConverter.cpp b/depthai_bridge/src/SpatialDetectionConverter.cpp index a4f62840..1b916ad1 100644 --- a/depthai_bridge/src/SpatialDetectionConverter.cpp +++ b/depthai_bridge/src/SpatialDetectionConverter.cpp @@ -60,10 +60,10 @@ void SpatialDetectionConverter::toRosMsg(std::shared_ptrdetections[i].label); + opDetectionMsg.detections[i].results[0].score = inNetData->detections[i].confidence; opDetectionMsg.detections[i].bbox.center.position.x = xCenter; opDetectionMsg.detections[i].bbox.center.position.y = yCenter; - opDetectionMsg.detections[i].bbox.size_x = xSize; opDetectionMsg.detections[i].bbox.size_y = ySize; @@ -140,4 +140,4 @@ void SpatialDetectionConverter::toRosVisionMsg(std::shared_ptr node, const dai::CalibrationHandler& calHandler, const std::vector& camFeatures, const std::string& camName, @@ -35,31 +35,33 @@ TFPublisher::TFPublisher(rclcpp::Node* node, const std::string& camYaw, const std::string& imuFromDescr, const std::string& customURDFLocation, - const std::string& customXacroArgs) - : _camName(camName), - _camModel(camModel), - _baseFrame(baseFrame), - _parentFrame(parentFrame), - _camPosX(camPosX), - _camPosY(camPosY), - _camPosZ(camPosZ), - _camRoll(camRoll), - _camPitch(camPitch), - _camYaw(camYaw), - _camFeatures(camFeatures), - _imuFromDescr(imuFromDescr), - _customURDFLocation(customURDFLocation), - _customXacroArgs(customXacroArgs), - _logger(node->get_logger()) { - _tfPub = std::make_shared(node); - - _paramClient = std::make_unique(node, _camName + std::string("_state_publisher")); + const std::string& customXacroArgs, + const bool rsCompatibilityMode) + : camName(camName), + camModel(camModel), + baseFrame(baseFrame), + parentFrame(parentFrame), + camPosX(camPosX), + camPosY(camPosY), + camPosZ(camPosZ), + camRoll(camRoll), + camPitch(camPitch), + camYaw(camYaw), + camFeatures(camFeatures), + imuFromDescr(imuFromDescr), + customURDFLocation(customURDFLocation), + customXacroArgs(customXacroArgs), + rsCompatibilityMode(rsCompatibilityMode), + logger(node->get_logger()) { + tfPub = std::make_shared(node); + + paramClient = std::make_unique(node, camName + std::string("_state_publisher")); auto json = calHandler.eepromToJson(); auto camData = json["cameraData"]; publishDescription(); publishCamTransforms(camData, node); - if(_imuFromDescr != "true") { + if(imuFromDescr != "true") { publishImuTransform(json, node); } } @@ -67,11 +69,11 @@ TFPublisher::TFPublisher(rclcpp::Node* node, void TFPublisher::publishDescription() { auto urdf = getURDF(); auto robotDescr = rclcpp::Parameter("robot_description", urdf); - auto result = _paramClient->set_parameters({robotDescr}); - RCLCPP_INFO(_logger, "Published URDF"); + auto result = paramClient->set_parameters({robotDescr}); + RCLCPP_INFO(logger, "Published URDF"); } -void TFPublisher::publishCamTransforms(nlohmann::json camData, rclcpp::Node* node) { +void TFPublisher::publishCamTransforms(nlohmann::json camData, std::shared_ptr node) { for(auto& cam : camData) { geometry_msgs::msg::TransformStamped ts; geometry_msgs::msg::TransformStamped opticalTS; @@ -83,51 +85,54 @@ void TFPublisher::publishCamTransforms(nlohmann::json camData, rclcpp::Node* nod ts.transform.translation = transFromExtr(extrinsics["translation"]); std::string name = getCamSocketName(cam[0]); - ts.child_frame_id = _baseFrame + std::string("_") + name + std::string("_camera_frame"); + ts.child_frame_id = baseFrame + std::string("_") + name + std::string("_camera_frame"); // check if the camera is at the end of the chain if(extrinsics["toCameraSocket"] != -1) { - ts.header.frame_id = _baseFrame + std::string("_") + getCamSocketName(extrinsics["toCameraSocket"].get()) + std::string("_camera_frame"); + ts.header.frame_id = baseFrame + std::string("_") + getCamSocketName(extrinsics["toCameraSocket"].get()) + std::string("_camera_frame"); } else { - ts.header.frame_id = _baseFrame; + ts.header.frame_id = baseFrame; ts.transform.rotation.w = 1.0; ts.transform.rotation.x = 0.0; ts.transform.rotation.y = 0.0; ts.transform.rotation.z = 0.0; } // rotate optical fransform - opticalTS.child_frame_id = _baseFrame + std::string("_") + name + std::string("_camera_optical_frame"); + opticalTS.child_frame_id = baseFrame + std::string("_") + name + std::string("_camera_optical_frame"); opticalTS.header.frame_id = ts.child_frame_id; opticalTS.transform.rotation.w = 0.5; opticalTS.transform.rotation.x = -0.5; opticalTS.transform.rotation.y = 0.5; opticalTS.transform.rotation.z = -0.5; - _tfPub->sendTransform(ts); - _tfPub->sendTransform(opticalTS); + tfPub->sendTransform(ts); + tfPub->sendTransform(opticalTS); } } -void TFPublisher::publishImuTransform(nlohmann::json json, rclcpp::Node* node) { +void TFPublisher::publishImuTransform(nlohmann::json json, std::shared_ptr node) { geometry_msgs::msg::TransformStamped ts; ts.header.stamp = node->get_clock()->now(); auto imuExtr = json["imuExtrinsics"]; if(imuExtr["toCameraSocket"] != -1) { - ts.header.frame_id = _baseFrame + std::string("_") + getCamSocketName(imuExtr["toCameraSocket"].get()) + std::string("_camera_frame"); + ts.header.frame_id = baseFrame + std::string("_") + getCamSocketName(imuExtr["toCameraSocket"].get()) + std::string("_camera_frame"); } else { - ts.header.frame_id = _baseFrame; + ts.header.frame_id = baseFrame; } - ts.child_frame_id = _baseFrame + std::string("_imu_frame"); + ts.child_frame_id = baseFrame + std::string("_imu_frame"); ts.transform.rotation = quatFromRotM(imuExtr["rotationMatrix"]); ts.transform.translation = transFromExtr(imuExtr["translation"]); bool zeroTrans = ts.transform.translation.x == 0 && ts.transform.translation.y == 0 && ts.transform.translation.z == 0; bool zeroRot = ts.transform.rotation.w == 1 && ts.transform.rotation.x == 0 && ts.transform.rotation.y == 0 && ts.transform.rotation.z == 0; if(zeroTrans || zeroRot) { - RCLCPP_WARN(_logger, "IMU extrinsics appear to be default. Check if the IMU is calibrated."); + RCLCPP_WARN(logger, "IMU extrinsics appear to be default. Check if the IMU is calibrated."); } - _tfPub->sendTransform(ts); + tfPub->sendTransform(ts); } std::string TFPublisher::getCamSocketName(int socketNum) { - return _socketNameMap.at(static_cast(socketNum)); + if(rsCompatibilityMode) { + return rsSocketNameMap.at(static_cast(socketNum)); + } + return socketNameMap.at(static_cast(socketNum)); } geometry_msgs::msg::Vector3 TFPublisher::transFromExtr(nlohmann::json translation) { @@ -165,8 +170,8 @@ bool TFPublisher::modelNameAvailable() { if((dir = opendir(path.c_str())) != NULL) { while((ent = readdir(dir)) != NULL) { auto name = std::string(ent->d_name); - RCLCPP_DEBUG(_logger, "Found model: %s", name.c_str()); - if(name == _camModel + ".stl") { + RCLCPP_DEBUG(logger, "Found model: %s", name.c_str()); + if(name == camModel + ".stl") { return true; } } @@ -178,63 +183,63 @@ bool TFPublisher::modelNameAvailable() { } std::string TFPublisher::prepareXacroArgs() { - if(!_customURDFLocation.empty() || !modelNameAvailable()) { + if(!customURDFLocation.empty() || !modelNameAvailable()) { RCLCPP_ERROR( - _logger, + logger, "Model name %s not found in depthai_descriptions package. If camera model is autodetected, please notify developers. Using default model: OAK-D", - _camModel.c_str()); - _camModel = "OAK-D"; + camModel.c_str()); + camModel = "OAK-D"; } - std::string xacroArgs = "camera_name:=" + _camName; - xacroArgs += " camera_model:=" + _camModel; - xacroArgs += " base_frame:=" + _baseFrame; - xacroArgs += " parent_frame:=" + _parentFrame; - xacroArgs += " cam_pos_x:=" + _camPosX; - xacroArgs += " cam_pos_y:=" + _camPosY; - xacroArgs += " cam_pos_z:=" + _camPosZ; - xacroArgs += " cam_roll:=" + _camRoll; - xacroArgs += " cam_pitch:=" + _camPitch; - xacroArgs += " cam_yaw:=" + _camYaw; - xacroArgs += " has_imu:=" + _imuFromDescr; + std::string xacroArgs = "camera_name:=" + camName; + xacroArgs += " camera_model:=" + camModel; + xacroArgs += " base_frame:=" + baseFrame; + xacroArgs += " parent_frame:=" + parentFrame; + xacroArgs += " cam_pos_x:=" + camPosX; + xacroArgs += " cam_pos_y:=" + camPosY; + xacroArgs += " cam_pos_z:=" + camPosZ; + xacroArgs += " cam_roll:=" + camRoll; + xacroArgs += " cam_pitch:=" + camPitch; + xacroArgs += " cam_yaw:=" + camYaw; + xacroArgs += " has_imu:=" + imuFromDescr; return xacroArgs; } void TFPublisher::convertModelName() { - if(_camModel.find("OAK-D-PRO-POE") != std::string::npos || _camModel.find("OAK-D-PRO-W-POE") != std::string::npos - || _camModel.find("OAK-D-S2-POE") != std::string::npos) { - _camModel = "OAK-D-POE"; - } else if(_camModel.find("OAK-D-LITE") != std::string::npos) { - _camModel = "OAK-D-PRO"; - } else if(_camModel.find("OAK-D-S2") != std::string::npos) { - _camModel = "OAK-D-PRO"; - } else if(_camModel.find("OAK-D-PRO-W") != std::string::npos) { - _camModel = "OAK-D-PRO"; - } else if(_camModel.find("OAK-D-PRO") != std::string::npos) { - _camModel = "OAK-D-PRO"; - } else if(_camModel.find("OAK-D-POE")) { - _camModel = "OAK-D-POE"; - } else if(_camModel.find("OAK-D") != std::string::npos) { - _camModel = "OAK-D"; + if(camModel.find("OAK-D-PRO-POE") != std::string::npos || camModel.find("OAK-D-PRO-W-POE") != std::string::npos + || camModel.find("OAK-D-S2-POE") != std::string::npos) { + camModel = "OAK-D-POE"; + } else if(camModel.find("OAK-D-LITE") != std::string::npos) { + camModel = "OAK-D-PRO"; + } else if(camModel.find("OAK-D-S2") != std::string::npos) { + camModel = "OAK-D-PRO"; + } else if(camModel.find("OAK-D-PRO-W") != std::string::npos) { + camModel = "OAK-D-PRO"; + } else if(camModel.find("OAK-D-PRO") != std::string::npos) { + camModel = "OAK-D-PRO"; + } else if(camModel.find("OAK-D-POE")) { + camModel = "OAK-D-POE"; + } else if(camModel.find("OAK-D") != std::string::npos) { + camModel = "OAK-D"; } else { - RCLCPP_WARN(_logger, "Unable to match model name: %s to available model family.", _camModel.c_str()); + RCLCPP_WARN(logger, "Unable to match model name: %s to available model family.", camModel.c_str()); } } std::string TFPublisher::getURDF() { std::string args, path; - if(_customXacroArgs.empty()) { + if(customXacroArgs.empty()) { args = prepareXacroArgs(); } else { - args = _customXacroArgs; + args = customXacroArgs; } - if(_customURDFLocation.empty()) { + if(customURDFLocation.empty()) { path = ament_index_cpp::get_package_share_directory("depthai_descriptions") + "/urdf/base_descr.urdf.xacro "; } else { - path = _customURDFLocation + " "; + path = customURDFLocation + " "; } std::string cmd = "xacro " + path + args; - RCLCPP_DEBUG(_logger, "Xacro command: %s", cmd.c_str()); + RCLCPP_DEBUG(logger, "Xacro command: %s", cmd.c_str()); std::array buffer; std::string result; std::unique_ptr pipe(popen(cmd.c_str(), "r"), pclose); @@ -247,4 +252,4 @@ std::string TFPublisher::getURDF() { return result; } } // namespace ros -} // namespace dai \ No newline at end of file +} // namespace dai diff --git a/depthai_bridge/src/TrackDetectionConverter.cpp b/depthai_bridge/src/TrackDetectionConverter.cpp new file mode 100644 index 00000000..be29e561 --- /dev/null +++ b/depthai_bridge/src/TrackDetectionConverter.cpp @@ -0,0 +1,94 @@ +#include "depthai_bridge/TrackDetectionConverter.hpp" + +#include "depthai/depthai.hpp" +#include "depthai_bridge/depthaiUtility.hpp" + +namespace dai { + +namespace ros { + +TrackDetectionConverter::TrackDetectionConverter(std::string frameName, int width, int height, bool normalized, float thresh, bool getBaseDeviceTimestamp) + : _frameName(frameName), + _width(width), + _height(height), + _normalized(normalized), + _thresh(thresh), + _steadyBaseTime(std::chrono::steady_clock::now()), + _getBaseDeviceTimestamp(getBaseDeviceTimestamp) { + _rosBaseTime = rclcpp::Clock().now(); +} + +TrackDetectionConverter::~TrackDetectionConverter() = default; + +void TrackDetectionConverter::updateRosBaseTime() { + updateBaseTime(_steadyBaseTime, _rosBaseTime, _totalNsChange); +} + +void TrackDetectionConverter::toRosMsg(std::shared_ptr trackData, std::deque& opDetectionMsgs) { + // setting the header + std::chrono::_V2::steady_clock::time_point tstamp; + if(_getBaseDeviceTimestamp) + tstamp = trackData->getTimestampDevice(); + else + tstamp = trackData->getTimestamp(); + + depthai_ros_msgs::msg::TrackDetection2DArray opDetectionMsg; + opDetectionMsg.header.stamp = getFrameTime(_rosBaseTime, _steadyBaseTime, tstamp); + opDetectionMsg.header.frame_id = _frameName; + opDetectionMsg.detections.resize(trackData->tracklets.size()); + + // publishing + for(int i = 0; i < trackData->tracklets.size(); ++i) { + dai::Tracklet t = trackData->tracklets[i]; + dai::Rect roi; + float xMin, yMin, xMax, yMax; + + if(_normalized) + roi = t.roi; + else + roi = t.roi.denormalize(_width, _height); + + xMin = roi.topLeft().x; + yMin = roi.topLeft().y; + xMax = roi.bottomRight().x; + yMax = roi.bottomRight().y; + + float xSize = xMax - xMin; + float ySize = yMax - yMin; + float xCenter = xMin + xSize / 2.; + float yCenter = yMin + ySize / 2.; + + opDetectionMsg.detections[i].results.resize(1); + + opDetectionMsg.detections[i].results[0].hypothesis.class_id = std::to_string(t.label); + opDetectionMsg.detections[i].results[0].hypothesis.score = _thresh; + + opDetectionMsg.detections[i].bbox.center.position.x = xCenter; + opDetectionMsg.detections[i].bbox.center.position.y = yCenter; + opDetectionMsg.detections[i].bbox.size_x = xSize; + opDetectionMsg.detections[i].bbox.size_y = ySize; + + opDetectionMsg.detections[i].is_tracking = true; + std::stringstream track_id_str; + track_id_str << "" << t.id; + opDetectionMsg.detections[i].tracking_id = track_id_str.str(); + opDetectionMsg.detections[i].tracking_age = t.age; + opDetectionMsg.detections[i].tracking_status = static_cast(t.status); + } + + opDetectionMsgs.push_back(opDetectionMsg); +} + +depthai_ros_msgs::msg::TrackDetection2DArray::SharedPtr TrackDetectionConverter::toRosMsgPtr(std::shared_ptr trackData) { + std::deque msgQueue; + toRosMsg(trackData, msgQueue); + auto msg = msgQueue.front(); + + depthai_ros_msgs::msg::TrackDetection2DArray::SharedPtr ptr = std::make_shared(msg); + + return ptr; +} + +} // namespace ros + +} // namespace dai \ No newline at end of file diff --git a/depthai_bridge/src/TrackSpatialDetectionConverter.cpp b/depthai_bridge/src/TrackSpatialDetectionConverter.cpp new file mode 100644 index 00000000..7b29942a --- /dev/null +++ b/depthai_bridge/src/TrackSpatialDetectionConverter.cpp @@ -0,0 +1,101 @@ +#include "depthai_bridge/TrackSpatialDetectionConverter.hpp" + +#include "depthai/depthai.hpp" +#include "depthai_bridge/depthaiUtility.hpp" + +namespace dai { + +namespace ros { + +TrackSpatialDetectionConverter::TrackSpatialDetectionConverter( + std::string frameName, int width, int height, bool normalized, float thresh, bool getBaseDeviceTimestamp) + : _frameName(frameName), + _width(width), + _height(height), + _normalized(normalized), + _thresh(thresh), + _steadyBaseTime(std::chrono::steady_clock::now()), + _getBaseDeviceTimestamp(getBaseDeviceTimestamp) { + _rosBaseTime = rclcpp::Clock().now(); +} + +TrackSpatialDetectionConverter::~TrackSpatialDetectionConverter() = default; + +void TrackSpatialDetectionConverter::updateRosBaseTime() { + updateBaseTime(_steadyBaseTime, _rosBaseTime, _totalNsChange); +} + +void TrackSpatialDetectionConverter::toRosMsg(std::shared_ptr trackData, + std::deque& opDetectionMsgs) { + // setting the header + std::chrono::_V2::steady_clock::time_point tstamp; + if(_getBaseDeviceTimestamp) + tstamp = trackData->getTimestampDevice(); + else + tstamp = trackData->getTimestamp(); + + depthai_ros_msgs::msg::TrackDetection2DArray opDetectionMsg; + opDetectionMsg.header.stamp = getFrameTime(_rosBaseTime, _steadyBaseTime, tstamp); + opDetectionMsg.header.frame_id = _frameName; + opDetectionMsg.detections.resize(trackData->tracklets.size()); + + // publishing + for(int i = 0; i < trackData->tracklets.size(); ++i) { + dai::Tracklet t = trackData->tracklets[i]; + dai::Rect roi; + float xMin, yMin, xMax, yMax; + + if(_normalized) + roi = t.roi; + else + roi = t.roi.denormalize(_width, _height); + + xMin = roi.topLeft().x; + yMin = roi.topLeft().y; + xMax = roi.bottomRight().x; + yMax = roi.bottomRight().y; + + float xSize = xMax - xMin; + float ySize = yMax - yMin; + float xCenter = xMin + xSize / 2.; + float yCenter = yMin + ySize / 2.; + + opDetectionMsg.detections[i].results.resize(1); + + opDetectionMsg.detections[i].results[0].hypothesis.class_id = std::to_string(t.label); + opDetectionMsg.detections[i].results[0].hypothesis.score = _thresh; + + opDetectionMsg.detections[i].bbox.center.position.x = xCenter; + opDetectionMsg.detections[i].bbox.center.position.y = yCenter; + opDetectionMsg.detections[i].bbox.size_x = xSize; + opDetectionMsg.detections[i].bbox.size_y = ySize; + + opDetectionMsg.detections[i].is_tracking = true; + std::stringstream track_id_str; + track_id_str << "" << t.id; + opDetectionMsg.detections[i].tracking_id = track_id_str.str(); + opDetectionMsg.detections[i].tracking_age = t.age; + opDetectionMsg.detections[i].tracking_status = static_cast(t.status); + + // converting mm to meters since per ros rep-103 lenght should always be in meters + opDetectionMsg.detections[i].results[0].pose.pose.position.x = t.spatialCoordinates.x / 1000.0; + opDetectionMsg.detections[i].results[0].pose.pose.position.y = t.spatialCoordinates.y / 1000.0; + opDetectionMsg.detections[i].results[0].pose.pose.position.z = t.spatialCoordinates.z / 1000.0; + } + + opDetectionMsgs.push_back(opDetectionMsg); +} + +depthai_ros_msgs::msg::TrackDetection2DArray::SharedPtr TrackSpatialDetectionConverter::toRosMsgPtr(std::shared_ptr trackData) { + std::deque msgQueue; + toRosMsg(trackData, msgQueue); + auto msg = msgQueue.front(); + + depthai_ros_msgs::msg::TrackDetection2DArray::SharedPtr ptr = std::make_shared(msg); + + return ptr; +} + +} // namespace ros + +} // namespace dai \ No newline at end of file diff --git a/depthai_bridge/src/TrackedFeaturesConverter.cpp b/depthai_bridge/src/TrackedFeaturesConverter.cpp index cb9e40c1..313f852f 100644 --- a/depthai_bridge/src/TrackedFeaturesConverter.cpp +++ b/depthai_bridge/src/TrackedFeaturesConverter.cpp @@ -31,7 +31,7 @@ void TrackedFeaturesConverter::toRosMsg(std::shared_ptr in msg.header.stamp = getFrameTime(_rosBaseTime, _steadyBaseTime, tstamp); msg.header.frame_id = _frameName; - msg.features.resize(inFeatures->trackedFeatures.size()); + msg.features.reserve(inFeatures->trackedFeatures.size()); for(const auto& feature : inFeatures->trackedFeatures) { depthai_ros_msgs::msg::TrackedFeature ft; @@ -48,4 +48,4 @@ void TrackedFeaturesConverter::toRosMsg(std::shared_ptr in } } // namespace ros -} // namespace dai \ No newline at end of file +} // namespace dai diff --git a/depthai_descriptions/CMakeLists.txt b/depthai_descriptions/CMakeLists.txt index 7ac41a95..7cbf15a2 100644 --- a/depthai_descriptions/CMakeLists.txt +++ b/depthai_descriptions/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.8) -project(depthai_descriptions VERSION 2.9.0) +project(depthai_descriptions VERSION 2.10.0) if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Wall -Wextra -Wpedantic) @@ -15,4 +15,8 @@ install(DIRECTORY launch/ DESTINATION share/${PROJECT_NAME}/launch FILES_MATCHING PATTERN "*.py") install(DIRECTORY urdf DESTINATION share/${PROJECT_NAME}) + +ament_environment_hooks("${CMAKE_CURRENT_SOURCE_DIR}/env-hooks/${PROJECT_NAME}.sh.in") +ament_environment_hooks("${CMAKE_CURRENT_SOURCE_DIR}/env-hooks/${PROJECT_NAME}.dsv.in") + ament_package() diff --git a/depthai_descriptions/env-hooks/depthai_descriptions.dsv.in b/depthai_descriptions/env-hooks/depthai_descriptions.dsv.in new file mode 100644 index 00000000..17b3b11c --- /dev/null +++ b/depthai_descriptions/env-hooks/depthai_descriptions.dsv.in @@ -0,0 +1,4 @@ +prepend-non-duplicate;GAZEBO_MODEL_PATH;share +prepend-non-duplicate;GAZEBO_RESOURCE_PATH;share +prepend-non-duplicate;IGN_GAZEBO_MODEL_PATH;share +prepend-non-duplicate;IGN_GAZEBO_RESOURCE_PATH;share diff --git a/depthai_descriptions/env-hooks/depthai_descriptions.sh.in b/depthai_descriptions/env-hooks/depthai_descriptions.sh.in new file mode 100644 index 00000000..f8b034f2 --- /dev/null +++ b/depthai_descriptions/env-hooks/depthai_descriptions.sh.in @@ -0,0 +1,5 @@ + +ament_prepend_unique_value GAZEBO_MODEL_PATH "$AMENT_CURRENT_PREFIX" +ament_prepend_unique_value GAZEBO_RESOURCE_PATH "$AMENT_CURRENT_PREFIX" +ament_prepend_unique_value IGN_GAZEBO_MODEL_PATH "$AMENT_CURRENT_PREFIX" +ament_prepend_unique_value IGN_GAZEBO_RESOURCE_PATH "$AMENT_CURRENT_PREFIX" diff --git a/depthai_descriptions/launch/urdf_launch.py b/depthai_descriptions/launch/urdf_launch.py index 2ce03ede..a35d80ce 100644 --- a/depthai_descriptions/launch/urdf_launch.py +++ b/depthai_descriptions/launch/urdf_launch.py @@ -1,3 +1,6 @@ +import os + +from ament_index_python.packages import get_package_share_directory from launch import LaunchDescription from launch.actions import DeclareLaunchArgument, OpaqueFunction from launch_ros.actions import Node, LoadComposableNodes @@ -84,54 +87,55 @@ def launch_setup(context, *args, **kwargs): def generate_launch_description(): declared_arguments = [ DeclareLaunchArgument( - 'namespace', - default_value='', - description='Specifies the namespace of the robot state publisher node. Default value will be ""'), + "namespace", + default_value="", + description='Specifies the namespace of the robot state publisher node. Default value will be ""', + ), DeclareLaunchArgument( - 'camera_model', - default_value='OAK-D', - description='The model of the camera. Using a wrong camera model can disable camera features. Valid models: `OAK-D, OAK-D-LITE`.'), - + "camera_model", + default_value="OAK-D", + description="The model of the camera. Using a wrong camera model can disable camera features. Valid models: `OAK-D, OAK-D-LITE`.", + ), DeclareLaunchArgument( - 'tf_prefix', - default_value='oak', - description='The name of the camera. It can be different from the camera model and it will be used as node `namespace`.'), - + "tf_prefix", + default_value="oak", + description="The name of the camera. It can be different from the camera model and it will be used as node `namespace`.", + ), DeclareLaunchArgument( - 'base_frame', - default_value='oak-d_frame', - description='Name of the base link.'), - + "base_frame", + default_value="oak-d_frame", + description="Name of the base link.", + ), DeclareLaunchArgument( - 'parent_frame', - default_value='oak-d-base-frame', - description='Name of the parent link from other a robot TF for example that can be connected to the base of the OAK.'), - + "parent_frame", + default_value="oak-d-base-frame", + description="Name of the parent link from other a robot TF for example that can be connected to the base of the OAK.", + ), DeclareLaunchArgument( - 'cam_pos_x', - default_value='0.0', - description='Position X of the camera with respect to the base frame.'), - + "cam_pos_x", + default_value="0.0", + description="Position X of the camera with respect to the base frame.", + ), DeclareLaunchArgument( - 'cam_pos_y', - default_value='0.0', - description='Position Y of the camera with respect to the base frame.'), - + "cam_pos_y", + default_value="0.0", + description="Position Y of the camera with respect to the base frame.", + ), DeclareLaunchArgument( - 'cam_pos_z', - default_value='0.0', - description='Position Z of the camera with respect to the base frame.'), - + "cam_pos_z", + default_value="0.0", + description="Position Z of the camera with respect to the base frame.", + ), DeclareLaunchArgument( - 'cam_roll', - default_value='0.0', - description='Roll orientation of the camera with respect to the base frame.'), - + "cam_roll", + default_value="0.0", + description="Roll orientation of the camera with respect to the base frame.", + ), DeclareLaunchArgument( - 'cam_pitch', - default_value='0.0', - description='Pitch orientation of the camera with respect to the base frame.'), - + "cam_pitch", + default_value="0.0", + description="Pitch orientation of the camera with respect to the base frame.", + ), DeclareLaunchArgument( 'cam_yaw', default_value='0.0', diff --git a/depthai_descriptions/package.xml b/depthai_descriptions/package.xml index 5e4251b4..3f3eeff2 100644 --- a/depthai_descriptions/package.xml +++ b/depthai_descriptions/package.xml @@ -1,10 +1,9 @@ depthai_descriptions - 2.9.0 + 2.10.0 The depthai_descriptions package - Sachin Guruswamy Adam Serafin Adam Serafin diff --git a/depthai_descriptions/urdf/base_descr.urdf.xacro b/depthai_descriptions/urdf/base_descr.urdf.xacro index e5d04601..47190853 100644 --- a/depthai_descriptions/urdf/base_descr.urdf.xacro +++ b/depthai_descriptions/urdf/base_descr.urdf.xacro @@ -1,21 +1,25 @@ - - - - - - - - - - - + + + + + + + + + + + - + - - + + diff --git a/depthai_descriptions/urdf/depthai_descr.urdf.xacro b/depthai_descriptions/urdf/depthai_descr.urdf.xacro index 9fba2d7e..39e9fac3 100644 --- a/depthai_descriptions/urdf/depthai_descr.urdf.xacro +++ b/depthai_descriptions/urdf/depthai_descr.urdf.xacro @@ -1,20 +1,25 @@ - - - - - - - - - - + + + + + + + + + + + - + - - + + diff --git a/depthai_descriptions/urdf/include/base_macro.urdf.xacro b/depthai_descriptions/urdf/include/base_macro.urdf.xacro index e46e3bca..93a73660 100644 --- a/depthai_descriptions/urdf/include/base_macro.urdf.xacro +++ b/depthai_descriptions/urdf/include/base_macro.urdf.xacro @@ -1,78 +1,110 @@ + xmlns:xacro="http://ros.org/wiki/xacro"> - - - - - + cam_roll cam_pitch cam_yaw has_imu r:=0.8 g:=0.8 b:=0.8 a:=0.8 simulation:=false"> - - - - - + + + + + + - - - - - - - - - - - - + + + + - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + + - - - - - - - - - - - - - - - - + - - - - - - - - + + + + + + + + - - - - - - - - + + + + + + + + - - \ No newline at end of file + + + + + + + + + + + + + + + + + + + + diff --git a/depthai_descriptions/urdf/include/depthai_macro.urdf.xacro b/depthai_descriptions/urdf/include/depthai_macro.urdf.xacro index 5eda7e07..45d16958 100644 --- a/depthai_descriptions/urdf/include/depthai_macro.urdf.xacro +++ b/depthai_descriptions/urdf/include/depthai_macro.urdf.xacro @@ -1,120 +1,177 @@ + xmlns:xacro="http://ros.org/wiki/xacro"> - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file + cam_roll cam_pitch cam_yaw r:=0.8 g:=0.8 b:=0.8 a:=0.8 simulation:=false rs_compat:=false"> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/depthai_descriptions/urdf/models/OAK-D-SR-POE.stl b/depthai_descriptions/urdf/models/OAK-D-SR-POE.stl new file mode 100644 index 00000000..fd72d704 Binary files /dev/null and b/depthai_descriptions/urdf/models/OAK-D-SR-POE.stl differ diff --git a/depthai_examples/CMakeLists.txt b/depthai_examples/CMakeLists.txt index d149e2e1..3505ea40 100644 --- a/depthai_examples/CMakeLists.txt +++ b/depthai_examples/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.10.2) # CMake version in Ubuntu 18.04 LTS -project(depthai_examples VERSION 2.9.0 LANGUAGES CXX C) +project(depthai_examples VERSION 2.10.0 LANGUAGES CXX C) set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD_REQUIRED ON) @@ -102,11 +102,15 @@ dai_add_node_ros2(feature_tracker src/feature_tracker_publisher.cpp) dai_add_node_ros2(stereo_node src/stereo_publisher.cpp) dai_add_node_ros2(yolov4_spatial_node src/yolov4_spatial_publisher.cpp) dai_add_node_ros2(yolov4_node src/yolov4_publisher.cpp) +dai_add_node_ros2(tracker_yolov4_node src/tracker_yolov4_publisher.cpp) +dai_add_node_ros2(tracker_yolov4_spatial_node src/tracker_yolov4_spatial_publisher.cpp) target_compile_definitions(mobilenet_node PRIVATE BLOB_NAME="${mobilenet_blob_name}") target_compile_definitions(yolov4_spatial_node PRIVATE BLOB_NAME="${tiny_yolo_v4_blob_name}") target_compile_definitions(yolov4_node PRIVATE BLOB_NAME="${tiny_yolo_v4_blob_name}") target_compile_definitions(stereo_inertial_node PRIVATE BLOB_NAME="${tiny_yolo_v4_blob_name}") +target_compile_definitions(tracker_yolov4_node PRIVATE BLOB_NAME="${tiny_yolo_v4_blob_name}") +target_compile_definitions(tracker_yolov4_spatial_node PRIVATE BLOB_NAME="${tiny_yolo_v4_blob_name}") if($ENV{ROS_DISTRO} STREQUAL "galactic") target_compile_definitions(rgb_stereo_node PRIVATE IS_GALACTIC) @@ -128,6 +132,8 @@ install(TARGETS yolov4_spatial_node yolov4_node feature_tracker + tracker_yolov4_node + tracker_yolov4_spatial_node DESTINATION lib/${PROJECT_NAME}) ament_package() diff --git a/depthai_examples/launch/rgb_stereo_node.launch.py b/depthai_examples/launch/rgb_stereo_node.launch.py index 54115119..ee74a98c 100644 --- a/depthai_examples/launch/rgb_stereo_node.launch.py +++ b/depthai_examples/launch/rgb_stereo_node.launch.py @@ -42,8 +42,8 @@ def generate_launch_description(): previewHeight = LaunchConfiguration('previewHeight', default = 300) # IR Brightness. OAK-D-Pro only. - dotProjectormA = LaunchConfiguration('dotProjectormA', default = 0.0) - floodLightmA = LaunchConfiguration('floodLightmA', default = 0.0) + dotProjectorIntensity = LaunchConfiguration('dotProjectorIntensity', default = 0.0) + floodLightIntensity = LaunchConfiguration('floodLightIntensity', default = 0.0) declare_camera_model_cmd = DeclareLaunchArgument( 'camera_model', @@ -156,14 +156,14 @@ def generate_launch_description(): default_value=previewHeight, description='Height of preview image') - declare_dotProjectormA_cmd = DeclareLaunchArgument( - 'dotProjectormA', - default_value=dotProjectormA, + declare_dotProjectorIntensity_cmd = DeclareLaunchArgument( + 'dotProjectorIntensity', + default_value=dotProjectorIntensity, description='Brightness of IR Dot projector on OAK-D-Pro in mA.') - declare_floodLightmA_cmd = DeclareLaunchArgument( - 'floodLightmA', - default_value=floodLightmA, + declare_floodLightIntensity_cmd = DeclareLaunchArgument( + 'floodLightIntensity', + default_value=floodLightIntensity, description='Brightness of IR Flood LED on OAK-D-Pro in mA.') urdf_launch = IncludeLaunchDescription( @@ -196,8 +196,8 @@ def generate_launch_description(): {'useDepth': useDepth}, {'previewWidth': previewWidth}, {'previewHeight': previewHeight}, - {'dotProjectormA': dotProjectormA}, - {'floodLightmA': floodLightmA}]) + {'dotProjectorIntensity': dotProjectorIntensity}, + {'floodLightIntensity': floodLightIntensity}]) metric_converter_node = launch_ros.actions.ComposableNodeContainer( name='container', @@ -270,8 +270,8 @@ def generate_launch_description(): ld.add_action(declare_previewWidth_cmd) ld.add_action(declare_previewHeight_cmd) - ld.add_action(declare_dotProjectormA_cmd) - ld.add_action(declare_floodLightmA_cmd) + ld.add_action(declare_dotProjectorIntensity_cmd) + ld.add_action(declare_floodLightIntensity_cmd) ld.add_action(rgb_stereo_node) ld.add_action(urdf_launch) diff --git a/depthai_examples/launch/stereo_inertial_node.launch.py b/depthai_examples/launch/stereo_inertial_node.launch.py index e6febeeb..819a7db4 100644 --- a/depthai_examples/launch/stereo_inertial_node.launch.py +++ b/depthai_examples/launch/stereo_inertial_node.launch.py @@ -73,8 +73,8 @@ def generate_launch_description(): enableDotProjector = LaunchConfiguration('enableDotProjector', default = False) enableFloodLight = LaunchConfiguration('enableFloodLight', default = False) - dotProjectormA = LaunchConfiguration('dotProjectormA', default = 200.0) - floodLightmA = LaunchConfiguration('floodLightmA', default = 200.0) + dotProjectorIntensity = LaunchConfiguration('dotProjectorIntensity', default = 0.5) + floodLightIntensity = LaunchConfiguration('floodLightIntensity', default = 0.5) enableRosBaseTimeUpdate = LaunchConfiguration('enableRosBaseTimeUpdate', default = False) enableRviz = LaunchConfiguration('enableRviz', default = True) @@ -286,15 +286,15 @@ def generate_launch_description(): default_value=enableFloodLight, description='Set this to true to enable the flood light for night vision (Available only on Pro models).') - declare_dotProjectormA_cmd = DeclareLaunchArgument( - 'dotProjectormA', - default_value=dotProjectormA, - description='Set the mA at which you intend to drive the dotProjector. Default is set to 200mA.') - - declare_floodLightmA_cmd = DeclareLaunchArgument( - 'floodLightmA', - default_value=floodLightmA, - description='Set the mA at which you intend to drive the FloodLight. Default is set to 200mA.') + declare_dotProjectorIntensity_cmd = DeclareLaunchArgument( + 'dotProjectorIntensity', + default_value=dotProjectorIntensity, + description='Set the mA at which you intend to drive the dotProjector. Default is set to 0.5.') + + declare_floodLightIntensity_cmd = DeclareLaunchArgument( + 'floodLightIntensity', + default_value=floodLightIntensity, + description='Set the mA at which you intend to drive the FloodLight. Default is set to 0.5.') declare_enableRosBaseTimeUpdate_cmd = DeclareLaunchArgument( 'enableRosBaseTimeUpdate', default_value=enableRosBaseTimeUpdate, @@ -364,8 +364,8 @@ def generate_launch_description(): {'enableDotProjector': enableDotProjector}, {'enableFloodLight': enableFloodLight}, - {'dotProjectormA': dotProjectormA}, - {'floodLightmA': floodLightmA}, + {'dotProjectorIntensity': dotProjectorIntensity}, + {'floodLightIntensity': floodLightIntensity}, {'enableRosBaseTimeUpdate': enableRosBaseTimeUpdate} ]) @@ -484,8 +484,8 @@ def generate_launch_description(): ld.add_action(declare_enableDotProjector_cmd) ld.add_action(declare_enableFloodLight_cmd) - ld.add_action(declare_dotProjectormA_cmd) - ld.add_action(declare_floodLightmA_cmd) + ld.add_action(declare_dotProjectorIntensity_cmd) + ld.add_action(declare_floodLightIntensity_cmd) ld.add_action(declare_enableRviz_cmd) diff --git a/depthai_examples/launch/tracker_yolov4_node.launch.py b/depthai_examples/launch/tracker_yolov4_node.launch.py new file mode 100644 index 00000000..4a82b35b --- /dev/null +++ b/depthai_examples/launch/tracker_yolov4_node.launch.py @@ -0,0 +1,159 @@ +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription, launch_description_sources +from launch.actions import IncludeLaunchDescription +from launch.actions import DeclareLaunchArgument +from launch.substitutions import LaunchConfiguration +import launch_ros.actions +import launch_ros.descriptions + + +def generate_launch_description(): + depthai_examples_path = get_package_share_directory('depthai_examples') + + default_rviz = os.path.join(depthai_examples_path, + 'rviz', 'pointCloud.rviz') + urdf_launch_dir = os.path.join(get_package_share_directory('depthai_descriptions'), 'launch') + default_resources_path = os.path.join(depthai_examples_path, + 'resources') + print('Default resources path..............') + print(default_resources_path) + camera_model = LaunchConfiguration('camera_model', default = 'OAK-D') + tf_prefix = LaunchConfiguration('tf_prefix', default = 'oak') + base_frame = LaunchConfiguration('base_frame', default = 'oak-d_frame') + parent_frame = LaunchConfiguration('parent_frame', default = 'oak-d-base-frame') + + cam_pos_x = LaunchConfiguration('cam_pos_x', default = '0.0') + cam_pos_y = LaunchConfiguration('cam_pos_y', default = '0.0') + cam_pos_z = LaunchConfiguration('cam_pos_z', default = '0.0') + cam_roll = LaunchConfiguration('cam_roll', default = '0.0') + cam_pitch = LaunchConfiguration('cam_pitch', default = '0.0') + cam_yaw = LaunchConfiguration('cam_yaw', default = '0.0') + + camera_param_uri = LaunchConfiguration('camera_param_uri', default = 'package://depthai_examples/params/camera') + nnName = LaunchConfiguration('nnName', default = "x") + resourceBaseFolder = LaunchConfiguration('resourceBaseFolder', default = default_resources_path) + fullFrameTracking = LaunchConfiguration('fullFrameTracking', default = False) + + declare_camera_model_cmd = DeclareLaunchArgument( + 'camera_model', + default_value=camera_model, + description='The model of the camera. Using a wrong camera model can disable camera features. Valid models: `OAK-D, OAK-D-LITE`.') + + declare_tf_prefix_cmd = DeclareLaunchArgument( + 'tf_prefix', + default_value=tf_prefix, + description='The name of the camera. It can be different from the camera model and it will be used in naming TF.') + + declare_base_frame_cmd = DeclareLaunchArgument( + 'base_frame', + default_value=base_frame, + description='Name of the base link.') + + declare_parent_frame_cmd = DeclareLaunchArgument( + 'parent_frame', + default_value=parent_frame, + description='Name of the parent link from other a robot TF for example that can be connected to the base of the OAK.') + + declare_pos_x_cmd = DeclareLaunchArgument( + 'cam_pos_x', + default_value=cam_pos_x, + description='Position X of the camera with respect to the base frame.') + + declare_pos_y_cmd = DeclareLaunchArgument( + 'cam_pos_y', + default_value=cam_pos_y, + description='Position Y of the camera with respect to the base frame.') + + declare_pos_z_cmd = DeclareLaunchArgument( + 'cam_pos_z', + default_value=cam_pos_z, + description='Position Z of the camera with respect to the base frame.') + + declare_roll_cmd = DeclareLaunchArgument( + 'cam_roll', + default_value=cam_roll, + description='Roll orientation of the camera with respect to the base frame.') + + declare_pitch_cmd = DeclareLaunchArgument( + 'cam_pitch', + default_value=cam_pitch, + description='Pitch orientation of the camera with respect to the base frame.') + + declare_yaw_cmd = DeclareLaunchArgument( + 'cam_yaw', + default_value=cam_yaw, + description='Yaw orientation of the camera with respect to the base frame.') + + declare_camera_param_uri_cmd = DeclareLaunchArgument( + 'camera_param_uri', + default_value=camera_param_uri, + description='Sending camera yaml path') + + declare_nnName_cmd = DeclareLaunchArgument( + 'nnName', + default_value=nnName, + description='Path to the object detection blob needed for detection') + + declare_resourceBaseFolder_cmd = DeclareLaunchArgument( + 'resourceBaseFolder', + default_value=resourceBaseFolder, + description='Path to the resources folder which contains the default blobs for the network') + + declare_fullFrameTracking_cmd = DeclareLaunchArgument( + 'fullFrameTracking', + default_value=fullFrameTracking, + description='Set to true for tracking in entire frame') + + urdf_launch = IncludeLaunchDescription( + launch_description_sources.PythonLaunchDescriptionSource( + os.path.join(urdf_launch_dir, 'urdf_launch.py')), + launch_arguments={'tf_prefix' : tf_prefix, + 'camera_model': camera_model, + 'base_frame' : base_frame, + 'parent_frame': parent_frame, + 'cam_pos_x' : cam_pos_x, + 'cam_pos_y' : cam_pos_y, + 'cam_pos_z' : cam_pos_z, + 'cam_roll' : cam_roll, + 'cam_pitch' : cam_pitch, + 'cam_yaw' : cam_yaw}.items()) + + tracker_yolov4_node = launch_ros.actions.Node( + package='depthai_examples', executable='tracker_yolov4_node', + output='screen', + parameters=[{'tf_prefix': tf_prefix}, + {'camera_param_uri': camera_param_uri}, + {'nnName': nnName}, + {'resourceBaseFolder': resourceBaseFolder}, + {'fullFrameTracking': fullFrameTracking}]) + + rviz_node = launch_ros.actions.Node( + package='rviz2', executable='rviz2', output='screen', + arguments=['--display-config', default_rviz]) + + ld = LaunchDescription() + ld.add_action(declare_tf_prefix_cmd) + ld.add_action(declare_camera_model_cmd) + + ld.add_action(declare_base_frame_cmd) + ld.add_action(declare_parent_frame_cmd) + + ld.add_action(declare_pos_x_cmd) + ld.add_action(declare_pos_y_cmd) + ld.add_action(declare_pos_z_cmd) + ld.add_action(declare_roll_cmd) + ld.add_action(declare_pitch_cmd) + ld.add_action(declare_yaw_cmd) + + ld.add_action(declare_camera_param_uri_cmd) + ld.add_action(declare_nnName_cmd) + ld.add_action(declare_resourceBaseFolder_cmd) + ld.add_action(declare_fullFrameTracking_cmd) + + ld.add_action(tracker_yolov4_node) + #ld.add_action(urdf_launch) + + return ld + diff --git a/depthai_examples/launch/tracker_yolov4_spatial_node.launch.py b/depthai_examples/launch/tracker_yolov4_spatial_node.launch.py new file mode 100644 index 00000000..2a94c22f --- /dev/null +++ b/depthai_examples/launch/tracker_yolov4_spatial_node.launch.py @@ -0,0 +1,196 @@ +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription, launch_description_sources +from launch.actions import IncludeLaunchDescription +from launch.actions import DeclareLaunchArgument +from launch.substitutions import LaunchConfiguration +import launch_ros.actions +import launch_ros.descriptions + + +def generate_launch_description(): + depthai_examples_path = get_package_share_directory('depthai_examples') + + default_rviz = os.path.join(depthai_examples_path, + 'rviz', 'pointCloud.rviz') + urdf_launch_dir = os.path.join(get_package_share_directory('depthai_descriptions'), 'launch') + default_resources_path = os.path.join(depthai_examples_path, + 'resources') + print('Default resources path..............') + print(default_resources_path) + camera_model = LaunchConfiguration('camera_model', default = 'OAK-D') + tf_prefix = LaunchConfiguration('tf_prefix', default = 'oak') + base_frame = LaunchConfiguration('base_frame', default = 'oak-d_frame') + parent_frame = LaunchConfiguration('parent_frame', default = 'oak-d-base-frame') + + cam_pos_x = LaunchConfiguration('cam_pos_x', default = '0.0') + cam_pos_y = LaunchConfiguration('cam_pos_y', default = '0.0') + cam_pos_z = LaunchConfiguration('cam_pos_z', default = '0.0') + cam_roll = LaunchConfiguration('cam_roll', default = '0.0') + cam_pitch = LaunchConfiguration('cam_pitch', default = '0.0') + cam_yaw = LaunchConfiguration('cam_yaw', default = '0.0') + + camera_param_uri = LaunchConfiguration('camera_param_uri', default = 'package://depthai_examples/params/camera') + sync_nn = LaunchConfiguration('sync_nn', default = True) + subpixel = LaunchConfiguration('subpixel', default = True) + nnName = LaunchConfiguration('nnName', default = "x") + resourceBaseFolder = LaunchConfiguration('resourceBaseFolder', default = default_resources_path) + confidence = LaunchConfiguration('confidence', default = 200) + lrCheckTresh = LaunchConfiguration('lrCheckTresh', default = 5) + monoResolution = LaunchConfiguration('monoResolution', default = '400p') + fullFrameTracking = LaunchConfiguration('fullFrameTracking', default = False) + + declare_camera_model_cmd = DeclareLaunchArgument( + 'camera_model', + default_value=camera_model, + description='The model of the camera. Using a wrong camera model can disable camera features. Valid models: `OAK-D, OAK-D-LITE`.') + + declare_tf_prefix_cmd = DeclareLaunchArgument( + 'tf_prefix', + default_value=tf_prefix, + description='The name of the camera. It can be different from the camera model and it will be used in naming TF.') + + declare_base_frame_cmd = DeclareLaunchArgument( + 'base_frame', + default_value=base_frame, + description='Name of the base link.') + + declare_parent_frame_cmd = DeclareLaunchArgument( + 'parent_frame', + default_value=parent_frame, + description='Name of the parent link from other a robot TF for example that can be connected to the base of the OAK.') + + declare_pos_x_cmd = DeclareLaunchArgument( + 'cam_pos_x', + default_value=cam_pos_x, + description='Position X of the camera with respect to the base frame.') + + declare_pos_y_cmd = DeclareLaunchArgument( + 'cam_pos_y', + default_value=cam_pos_y, + description='Position Y of the camera with respect to the base frame.') + + declare_pos_z_cmd = DeclareLaunchArgument( + 'cam_pos_z', + default_value=cam_pos_z, + description='Position Z of the camera with respect to the base frame.') + + declare_roll_cmd = DeclareLaunchArgument( + 'cam_roll', + default_value=cam_roll, + description='Roll orientation of the camera with respect to the base frame.') + + declare_pitch_cmd = DeclareLaunchArgument( + 'cam_pitch', + default_value=cam_pitch, + description='Pitch orientation of the camera with respect to the base frame.') + + declare_yaw_cmd = DeclareLaunchArgument( + 'cam_yaw', + default_value=cam_yaw, + description='Yaw orientation of the camera with respect to the base frame.') + + declare_camera_param_uri_cmd = DeclareLaunchArgument( + 'camera_param_uri', + default_value=camera_param_uri, + description='Sending camera yaml path') + + declare_sync_nn_cmd = DeclareLaunchArgument( + 'sync_nn', + default_value=sync_nn, + description='Syncs the image output with the Detection.') + + declare_subpixel_cmd = DeclareLaunchArgument( + 'subpixel', + default_value=subpixel, + description='Enables subpixel stereo detection.') + + declare_nnName_cmd = DeclareLaunchArgument( + 'nnName', + default_value=nnName, + description='Path to the object detection blob needed for detection') + + declare_resourceBaseFolder_cmd = DeclareLaunchArgument( + 'resourceBaseFolder', + default_value=resourceBaseFolder, + description='Path to the resources folder which contains the default blobs for the network') + + declare_confidence_cmd = DeclareLaunchArgument( + 'confidence', + default_value=confidence, + description='Confidence that the disparity from the feature matching was good. 0-255. 255 being the lowest confidence.') + + declare_lrCheckTresh_cmd = DeclareLaunchArgument( + 'lrCheckTresh', + default_value=lrCheckTresh, + description='LR Threshold is the threshod of how much off the disparity on the l->r and r->l ') + + declare_monoResolution_cmd = DeclareLaunchArgument( + 'monoResolution', + default_value=monoResolution, + description='Contains the resolution of the Mono Cameras. Available resolutions are 800p, 720p & 400p for OAK-D & 480p for OAK-D-Lite.') + + declare_fullFrameTracking_cmd = DeclareLaunchArgument( + 'fullFrameTracking', + default_value=fullFrameTracking, + description='Set to true for tracking in entire frame') + + urdf_launch = IncludeLaunchDescription( + launch_description_sources.PythonLaunchDescriptionSource( + os.path.join(urdf_launch_dir, 'urdf_launch.py')), + launch_arguments={'tf_prefix' : tf_prefix, + 'camera_model': camera_model, + 'base_frame' : base_frame, + 'parent_frame': parent_frame, + 'cam_pos_x' : cam_pos_x, + 'cam_pos_y' : cam_pos_y, + 'cam_pos_z' : cam_pos_z, + 'cam_roll' : cam_roll, + 'cam_pitch' : cam_pitch, + 'cam_yaw' : cam_yaw}.items()) + + tracker_yolov4_spatial_node = launch_ros.actions.Node( + package='depthai_examples', executable='tracker_yolov4_spatial_node', + output='screen', + parameters=[{'tf_prefix': tf_prefix}, + {'camera_param_uri': camera_param_uri}, + {'sync_nn': sync_nn}, + {'nnName': nnName}, + {'resourceBaseFolder': resourceBaseFolder}, + {'monoResolution': monoResolution}, + {'fullFrameTracking': fullFrameTracking}]) + + rviz_node = launch_ros.actions.Node( + package='rviz2', executable='rviz2', output='screen', + arguments=['--display-config', default_rviz]) + + ld = LaunchDescription() + ld.add_action(declare_tf_prefix_cmd) + ld.add_action(declare_camera_model_cmd) + + ld.add_action(declare_base_frame_cmd) + ld.add_action(declare_parent_frame_cmd) + + ld.add_action(declare_pos_x_cmd) + ld.add_action(declare_pos_y_cmd) + ld.add_action(declare_pos_z_cmd) + ld.add_action(declare_roll_cmd) + ld.add_action(declare_pitch_cmd) + ld.add_action(declare_yaw_cmd) + + ld.add_action(declare_camera_param_uri_cmd) + ld.add_action(declare_sync_nn_cmd) + ld.add_action(declare_subpixel_cmd) + ld.add_action(declare_nnName_cmd) + ld.add_action(declare_resourceBaseFolder_cmd) + ld.add_action(declare_confidence_cmd) + ld.add_action(declare_lrCheckTresh_cmd) + ld.add_action(declare_monoResolution_cmd) + ld.add_action(declare_fullFrameTracking_cmd) + + ld.add_action(tracker_yolov4_spatial_node) + #ld.add_action(urdf_launch) + + return ld + diff --git a/depthai_examples/package.xml b/depthai_examples/package.xml index ba9bd4b3..d317c671 100644 --- a/depthai_examples/package.xml +++ b/depthai_examples/package.xml @@ -1,13 +1,13 @@ depthai_examples - 2.9.0 + 2.10.0 The depthai_examples package - sachin + Adam Serafin Sachin Guruswamy MIT diff --git a/depthai_examples/src/rgb_stereo_node.cpp b/depthai_examples/src/rgb_stereo_node.cpp index 6576e28a..b741812b 100644 --- a/depthai_examples/src/rgb_stereo_node.cpp +++ b/depthai_examples/src/rgb_stereo_node.cpp @@ -135,7 +135,7 @@ int main(int argc, char** argv) { bool lrcheck, extended, subpixel; bool useVideo, usePreview, useDepth; int confidence, LRchecktresh, previewWidth, previewHeight; - float dotProjectormA, floodLightmA; + float dotProjectorIntensity, floodLightIntensity; node->declare_parameter("tf_prefix", "oak"); node->declare_parameter("lrcheck", true); @@ -150,8 +150,8 @@ int main(int argc, char** argv) { node->declare_parameter("useDepth", true); node->declare_parameter("previewWidth", 300); node->declare_parameter("previewHeight", 300); - node->declare_parameter("dotProjectormA", 0.0f); - node->declare_parameter("floodLightmA", 0.0f); + node->declare_parameter("dotProjectorIntensity", 0.0f); + node->declare_parameter("floodLightIntensity", 0.0f); node->get_parameter("tf_prefix", tfPrefix); node->get_parameter("lrcheck", lrcheck); @@ -166,8 +166,8 @@ int main(int argc, char** argv) { node->get_parameter("useDepth", useDepth); node->get_parameter("previewWidth", previewWidth); node->get_parameter("previewHeight", previewHeight); - node->get_parameter("dotProjectormA", dotProjectormA); - node->get_parameter("floodLightmA", floodLightmA); + node->get_parameter("dotProjectorIntensity", dotProjectorIntensity); + node->get_parameter("floodLightIntensity", floodLightIntensity); int colorWidth, colorHeight; if(colorResolution == "1080p") { @@ -204,10 +204,10 @@ int main(int argc, char** argv) { std::shared_ptr param_subscriber; std::shared_ptr dot_cb_handle, flood_cb_handle; auto cb = [node, &device](const rclcpp::Parameter& p) { - if(p.get_name() == std::string("dotProjectormA")) { + if(p.get_name() == std::string("dotProjectorIntensity")) { RCLCPP_INFO(node->get_logger(), "Updating Dot Projector current to %f", p.as_double()); device.setIrLaserDotProjectorBrightness(static_cast(p.as_double())); - } else if(p.get_name() == std::string("floodLightmA")) { + } else if(p.get_name() == std::string("floodLightIntensity")) { RCLCPP_INFO(node->get_logger(), "Updating Flood Light current to %f", p.as_double()); device.setIrFloodLightBrightness(static_cast(p.as_double())); } @@ -216,27 +216,27 @@ int main(int argc, char** argv) { if(boardName.find("PRO") != std::string::npos) { param_subscriber = std::make_shared(node); - dot_cb_handle = param_subscriber->add_parameter_callback("dotProjectormA", cb); - flood_cb_handle = param_subscriber->add_parameter_callback("floodLightmA", cb); + dot_cb_handle = param_subscriber->add_parameter_callback("dotProjectorIntensity", cb); + flood_cb_handle = param_subscriber->add_parameter_callback("floodLightIntensity", cb); } #else rclcpp::TimerBase::SharedPtr timer; - auto cb = [node, &device, &dotProjectormA, &floodLightmA]() { + auto cb = [node, &device, &dotProjectorIntensity, &floodLightIntensity]() { // rclcpp::Parameter p; - float dotProjectormATemp, floodLightmATemp; - node->get_parameter("dotProjectormA", dotProjectormATemp); - node->get_parameter("floodLightmA", floodLightmATemp); - if(dotProjectormATemp != dotProjectormA) { - dotProjectormA = dotProjectormATemp; - RCLCPP_INFO(node->get_logger(), "Updating Dot Projector current to %f", dotProjectormA); - device.setIrLaserDotProjectorBrightness(static_cast(dotProjectormA)); + float dotProjectorIntensityTemp, floodLightIntensityTemp; + node->get_parameter("dotProjectorIntensity", dotProjectorIntensityTemp); + node->get_parameter("floodLightIntensity", floodLightIntensityTemp); + if(dotProjectorIntensityTemp != dotProjectorIntensity) { + dotProjectorIntensity = dotProjectorIntensityTemp; + RCLCPP_INFO(node->get_logger(), "Updating Dot Projector current to %f", dotProjectorIntensity); + device.setIrLaserDotProjectorIntensity(static_cast(dotProjectorIntensity)); } - if(floodLightmATemp != floodLightmA) { - floodLightmA = floodLightmATemp; - RCLCPP_INFO(node->get_logger(), "Updating Flood Light current to %f", floodLightmA); - device.setIrFloodLightBrightness(static_cast(floodLightmA)); + if(floodLightIntensityTemp != floodLightIntensity) { + floodLightIntensity = floodLightIntensityTemp; + RCLCPP_INFO(node->get_logger(), "Updating Flood Light current to %f", floodLightIntensity); + device.setIrFloodLightIntensity(static_cast(floodLightIntensity)); } }; if(boardName.find("PRO") != std::string::npos) { diff --git a/depthai_examples/src/stereo_inertial_publisher.cpp b/depthai_examples/src/stereo_inertial_publisher.cpp index a4fc8258..90f14613 100644 --- a/depthai_examples/src/stereo_inertial_publisher.cpp +++ b/depthai_examples/src/stereo_inertial_publisher.cpp @@ -295,7 +295,7 @@ int main(int argc, char** argv) { bool enableSpatialDetection, enableDotProjector, enableFloodLight; bool usb2Mode, poeMode, syncNN; double angularVelCovariance, linearAccelCovariance; - double dotProjectormA, floodLightmA; + double dotProjectorIntensity, floodLightIntensity; bool enableRosBaseTimeUpdate; std::string nnName(BLOB_NAME); // Set your blob name for the model here @@ -337,8 +337,8 @@ int main(int argc, char** argv) { node->declare_parameter("enableDotProjector", false); node->declare_parameter("enableFloodLight", false); - node->declare_parameter("dotProjectormA", 200.0); - node->declare_parameter("floodLightmA", 200.0); + node->declare_parameter("dotProjectorIntensity", 0.5); + node->declare_parameter("floodLightIntensity", 0.5); node->declare_parameter("enableRosBaseTimeUpdate", false); // updating parameters if defined in launch file. @@ -380,8 +380,8 @@ int main(int argc, char** argv) { node->get_parameter("enableDotProjector", enableDotProjector); node->get_parameter("enableFloodLight", enableFloodLight); - node->get_parameter("dotProjectormA", dotProjectormA); - node->get_parameter("floodLightmA", floodLightmA); + node->get_parameter("dotProjectorIntensity", dotProjectorIntensity); + node->get_parameter("floodLightIntensity", floodLightIntensity); node->get_parameter("enableRosBaseTimeUpdate", enableRosBaseTimeUpdate); if(resourceBaseFolder.empty()) { @@ -485,11 +485,11 @@ int main(int argc, char** argv) { std::vector> irDrivers = device->getIrDrivers(); if(!irDrivers.empty()) { if(enableDotProjector) { - device->setIrLaserDotProjectorBrightness(dotProjectormA); + device->setIrLaserDotProjectorIntensity(dotProjectorIntensity); } if(enableFloodLight) { - device->setIrFloodLightBrightness(floodLightmA); + device->setIrFloodLightIntensity(floodLightIntensity); } } diff --git a/depthai_examples/src/tracker_yolov4_publisher.cpp b/depthai_examples/src/tracker_yolov4_publisher.cpp new file mode 100644 index 00000000..ae61961f --- /dev/null +++ b/depthai_examples/src/tracker_yolov4_publisher.cpp @@ -0,0 +1,165 @@ +#include +#include + +#include "camera_info_manager/camera_info_manager.hpp" +#include "depthai_bridge/BridgePublisher.hpp" +#include "depthai_bridge/ImageConverter.hpp" +#include "depthai_bridge/TrackDetectionConverter.hpp" +#include "depthai_ros_msgs/msg/track_detection2_d_array.hpp" +#include "rclcpp/executors.hpp" +#include "rclcpp/node.hpp" +#include "rclcpp/rclcpp.hpp" +#include "sensor_msgs/msg/image.hpp" +#include "vision_msgs/msg/detection2_d_array.hpp" + +// Inludes common necessary includes for development using depthai library +#include "depthai/device/DataQueue.hpp" +#include "depthai/device/Device.hpp" +#include "depthai/pipeline/Pipeline.hpp" +#include "depthai/pipeline/node/ColorCamera.hpp" +#include "depthai/pipeline/node/MonoCamera.hpp" +#include "depthai/pipeline/node/ObjectTracker.hpp" +#include "depthai/pipeline/node/SpatialDetectionNetwork.hpp" +#include "depthai/pipeline/node/StereoDepth.hpp" +#include "depthai/pipeline/node/XLinkOut.hpp" + +const std::vector label_map = { + "person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", + "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", + "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", + "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", + "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", + "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", + "donut", "cake", "chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", + "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", + "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"}; + +dai::Pipeline createPipeline(std::string nnPath, bool fullFrameTracking) { + dai::Pipeline pipeline; + auto colorCam = pipeline.create(); + auto detectionNetwork = pipeline.create(); + auto tracker = pipeline.create(); + + // create xlink connections + auto xoutRgb = pipeline.create(); + auto xoutTracker = pipeline.create(); + + xoutRgb->setStreamName("preview"); + xoutTracker->setStreamName("tracklets"); + + colorCam->setPreviewSize(416, 416); + colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + colorCam->setInterleaved(false); + colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + + // setting node configs + detectionNetwork->setBlobPath(nnPath); + detectionNetwork->setConfidenceThreshold(0.3f); + detectionNetwork->input.setBlocking(false); + + // yolo specific parameters + detectionNetwork->setNumClasses(80); + detectionNetwork->setCoordinateSize(4); + detectionNetwork->setAnchors({10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319}); + detectionNetwork->setAnchorMasks({{"side13", {3, 4, 5}}, {"side26", {1, 2, 3}}}); + detectionNetwork->setIouThreshold(0.5f); + + // object tracker settings + tracker->setTrackerType(dai::TrackerType::ZERO_TERM_COLOR_HISTOGRAM); + tracker->setTrackerIdAssignmentPolicy(dai::TrackerIdAssignmentPolicy::UNIQUE_ID); + + // Link plugins CAM -> NN -> XLINK + colorCam->preview.link(detectionNetwork->input); + tracker->passthroughTrackerFrame.link(xoutRgb->input); + + if(fullFrameTracking) { + colorCam->setPreviewKeepAspectRatio(false); + colorCam->video.link(tracker->inputTrackerFrame); + // tracker->inputTrackerFrame.setBlocking(false); + // do not block the pipeline if it's too slow on full frame + // tracker->inputTrackerFrame.setQueueSize(2); + } else { + detectionNetwork->passthrough.link(tracker->inputTrackerFrame); + } + + detectionNetwork->passthrough.link(tracker->inputDetectionFrame); + detectionNetwork->out.link(tracker->inputDetections); + tracker->out.link(xoutTracker->input); + + return pipeline; +} + +int main(int argc, char** argv) { + rclcpp::init(argc, argv); + auto node = rclcpp::Node::make_shared("tracker_yolov4_node"); + + std::string tfPrefix, resourceBaseFolder, nnPath; + std::string camera_param_uri; + bool fullFrameTracking; + std::string nnName(BLOB_NAME); // Set your blob name for the model here + + node->declare_parameter("tf_prefix", "oak"); + node->declare_parameter("camera_param_uri", camera_param_uri); + node->declare_parameter("nnName", ""); + node->declare_parameter("resourceBaseFolder", ""); + node->declare_parameter("fullFrameTracking", false); + + node->get_parameter("tf_prefix", tfPrefix); + node->get_parameter("camera_param_uri", camera_param_uri); + node->get_parameter("resourceBaseFolder", resourceBaseFolder); + node->get_parameter("fullFrameTracking", fullFrameTracking); + + if(resourceBaseFolder.empty()) { + throw std::runtime_error("Send the path to the resouce folder containing NNBlob in \'resourceBaseFolder\' "); + } + + std::string nnParam; + node->get_parameter("nnName", nnParam); + if(nnParam != "x") { + node->get_parameter("nnName", nnName); + } + + nnPath = resourceBaseFolder + "/" + nnName; + dai::Pipeline pipeline = createPipeline(nnPath, fullFrameTracking); + dai::Device device(pipeline); + + auto colorQueue = device.getOutputQueue("preview", 30, false); + auto trackQueue = device.getOutputQueue("tracklets", 30, false); + auto calibrationHandler = device.readCalibration(); + + int width, height; + auto boardName = calibrationHandler.getEepromData().boardName; + if(height > 480 && boardName == "OAK-D-LITE") { + width = 640; + height = 480; + } + + dai::rosBridge::ImageConverter rgbConverter(tfPrefix + "_rgb_camera_optical_frame", false); + auto rgbCameraInfo = rgbConverter.calibrationToCameraInfo(calibrationHandler, dai::CameraBoardSocket::CAM_A, -1, -1); + dai::rosBridge::BridgePublisher rgbPublish(colorQueue, + node, + std::string("color/image"), + std::bind(&dai::rosBridge::ImageConverter::toRosMsg, + &rgbConverter, // since the converter has the same frame name + // and image type is also same we can reuse it + std::placeholders::_1, + std::placeholders::_2), + 30, + rgbCameraInfo, + "color"); + + dai::rosBridge::TrackDetectionConverter trackConverter(tfPrefix + "_rgb_camera_optical_frame", 416, 416, fullFrameTracking, 0.3); + dai::rosBridge::BridgePublisher trackPublish( + trackQueue, + node, + std::string("color/yolov4_tracklets"), + std::bind(&dai::rosBridge::TrackDetectionConverter::toRosMsg, &trackConverter, std::placeholders::_1, std::placeholders::_2), + 30); + + rgbPublish.addPublisherCallback(); + trackPublish.addPublisherCallback(); + + rclcpp::spin(node); + + return 0; +} diff --git a/depthai_examples/src/tracker_yolov4_spatial_publisher.cpp b/depthai_examples/src/tracker_yolov4_spatial_publisher.cpp new file mode 100644 index 00000000..1d5934cf --- /dev/null +++ b/depthai_examples/src/tracker_yolov4_spatial_publisher.cpp @@ -0,0 +1,246 @@ +#include +#include + +#include "camera_info_manager/camera_info_manager.hpp" +#include "depthai_bridge/BridgePublisher.hpp" +#include "depthai_bridge/ImageConverter.hpp" +#include "depthai_bridge/TrackSpatialDetectionConverter.hpp" +#include "depthai_ros_msgs/msg/track_detection2_d_array.hpp" +#include "rclcpp/executors.hpp" +#include "rclcpp/node.hpp" +#include "rclcpp/rclcpp.hpp" +#include "sensor_msgs/msg/image.hpp" +#include "vision_msgs/msg/detection2_d_array.hpp" + +// Inludes common necessary includes for development using depthai library +#include "depthai/device/DataQueue.hpp" +#include "depthai/device/Device.hpp" +#include "depthai/pipeline/Pipeline.hpp" +#include "depthai/pipeline/node/ColorCamera.hpp" +#include "depthai/pipeline/node/MonoCamera.hpp" +#include "depthai/pipeline/node/ObjectTracker.hpp" +#include "depthai/pipeline/node/SpatialDetectionNetwork.hpp" +#include "depthai/pipeline/node/StereoDepth.hpp" +#include "depthai/pipeline/node/XLinkOut.hpp" + +const std::vector label_map = { + "person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", + "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", + "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", + "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", + "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", + "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", + "donut", "cake", "chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", + "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", + "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"}; + +dai::Pipeline createPipeline(bool syncNN, bool subpixel, std::string nnPath, int confidence, int LRchecktresh, std::string resolution, bool fullFrameTracking) { + dai::Pipeline pipeline; + dai::node::MonoCamera::Properties::SensorResolution monoResolution; + auto colorCam = pipeline.create(); + auto spatialDetectionNetwork = pipeline.create(); + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto stereo = pipeline.create(); + auto tracker = pipeline.create(); + + // create xlink connections + auto xoutRgb = pipeline.create(); + auto xoutDepth = pipeline.create(); + auto xoutTracker = pipeline.create(); + + xoutRgb->setStreamName("preview"); + xoutDepth->setStreamName("depth"); + xoutTracker->setStreamName("tracklets"); + + colorCam->setPreviewSize(416, 416); + colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + colorCam->setInterleaved(false); + colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + + if(resolution == "720p") { + monoResolution = dai::node::MonoCamera::Properties::SensorResolution::THE_720_P; + } else if(resolution == "400p") { + monoResolution = dai::node::MonoCamera::Properties::SensorResolution::THE_400_P; + } else if(resolution == "800p") { + monoResolution = dai::node::MonoCamera::Properties::SensorResolution::THE_800_P; + } else if(resolution == "480p") { + monoResolution = dai::node::MonoCamera::Properties::SensorResolution::THE_480_P; + } else { + RCLCPP_ERROR(rclcpp::get_logger("rclcpp"), "Invalid parameter. -> monoResolution: %s", resolution.c_str()); + throw std::runtime_error("Invalid mono camera resolution."); + } + + monoLeft->setResolution(monoResolution); + monoLeft->setBoardSocket(dai::CameraBoardSocket::CAM_B); + monoRight->setResolution(monoResolution); + monoRight->setBoardSocket(dai::CameraBoardSocket::CAM_C); + + /// setting node configs + stereo->initialConfig.setConfidenceThreshold(confidence); + stereo->setRectifyEdgeFillColor(0); // black, to better see the cutout + stereo->initialConfig.setLeftRightCheckThreshold(LRchecktresh); + stereo->setSubpixel(subpixel); + stereo->setDepthAlign(dai::CameraBoardSocket::CAM_A); + + spatialDetectionNetwork->setBlobPath(nnPath); + spatialDetectionNetwork->setConfidenceThreshold(0.3f); + spatialDetectionNetwork->input.setBlocking(false); + spatialDetectionNetwork->setBoundingBoxScaleFactor(0.5); + spatialDetectionNetwork->setDepthLowerThreshold(100); + spatialDetectionNetwork->setDepthUpperThreshold(5000); + + // yolo specific parameters + spatialDetectionNetwork->setNumClasses(80); + spatialDetectionNetwork->setCoordinateSize(4); + spatialDetectionNetwork->setAnchors({10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319}); + spatialDetectionNetwork->setAnchorMasks({{"side13", {3, 4, 5}}, {"side26", {1, 2, 3}}}); + spatialDetectionNetwork->setIouThreshold(0.5f); + + // object tracker settings + tracker->setTrackerType(dai::TrackerType::ZERO_TERM_COLOR_HISTOGRAM); + tracker->setTrackerIdAssignmentPolicy(dai::TrackerIdAssignmentPolicy::UNIQUE_ID); + + // Link plugins CAM -> STEREO -> XLINK + monoLeft->out.link(stereo->left); + monoRight->out.link(stereo->right); + + // Link plugins CAM -> NN -> XLINK + colorCam->preview.link(spatialDetectionNetwork->input); + tracker->passthroughTrackerFrame.link(xoutRgb->input); + + if(fullFrameTracking) { + colorCam->setPreviewKeepAspectRatio(false); + colorCam->video.link(tracker->inputTrackerFrame); + // tracker->inputTrackerFrame.setBlocking(false); + // do not block the pipeline if it's too slow on full frame + // tracker->inputTrackerFrame.setQueueSize(2); + } else { + spatialDetectionNetwork->passthrough.link(tracker->inputTrackerFrame); + } + + spatialDetectionNetwork->passthrough.link(tracker->inputDetectionFrame); + spatialDetectionNetwork->out.link(tracker->inputDetections); + tracker->out.link(xoutTracker->input); + stereo->depth.link(spatialDetectionNetwork->inputDepth); + spatialDetectionNetwork->passthroughDepth.link(xoutDepth->input); + + return pipeline; +} + +int main(int argc, char** argv) { + rclcpp::init(argc, argv); + auto node = rclcpp::Node::make_shared("tracker_yolov4_spatial_node"); + + std::string tfPrefix, resourceBaseFolder, nnPath; + std::string camera_param_uri; + std::string nnName(BLOB_NAME); // Set your blob name for the model here + bool syncNN, subpixel, fullFrameTracking; + int confidence = 200, LRchecktresh = 5; + std::string monoResolution = "400p"; + + node->declare_parameter("tf_prefix", "oak"); + node->declare_parameter("camera_param_uri", camera_param_uri); + node->declare_parameter("sync_nn", true); + node->declare_parameter("subpixel", true); + node->declare_parameter("nnName", ""); + node->declare_parameter("confidence", confidence); + node->declare_parameter("LRchecktresh", LRchecktresh); + node->declare_parameter("monoResolution", monoResolution); + node->declare_parameter("resourceBaseFolder", ""); + node->declare_parameter("fullFrameTracking", false); + + node->get_parameter("tf_prefix", tfPrefix); + node->get_parameter("camera_param_uri", camera_param_uri); + node->get_parameter("sync_nn", syncNN); + node->get_parameter("subpixel", subpixel); + node->get_parameter("confidence", confidence); + node->get_parameter("LRchecktresh", LRchecktresh); + node->get_parameter("monoResolution", monoResolution); + node->get_parameter("resourceBaseFolder", resourceBaseFolder); + node->get_parameter("fullFrameTracking", fullFrameTracking); + + if(resourceBaseFolder.empty()) { + throw std::runtime_error("Send the path to the resouce folder containing NNBlob in \'resourceBaseFolder\' "); + } + + std::string nnParam; + node->get_parameter("nnName", nnParam); + if(nnParam != "x") { + node->get_parameter("nnName", nnName); + } + + nnPath = resourceBaseFolder + "/" + nnName; + dai::Pipeline pipeline = createPipeline(syncNN, subpixel, nnPath, confidence, LRchecktresh, monoResolution, fullFrameTracking); + dai::Device device(pipeline); + + auto colorQueue = device.getOutputQueue("preview", 30, false); + auto depthQueue = device.getOutputQueue("depth", 30, false); + auto trackQueue = device.getOutputQueue("tracklets", 30, false); + auto calibrationHandler = device.readCalibration(); + + int width, height; + if(monoResolution == "720p") { + width = 1280; + height = 720; + } else if(monoResolution == "400p") { + width = 640; + height = 400; + } else if(monoResolution == "800p") { + width = 1280; + height = 800; + } else if(monoResolution == "480p") { + width = 640; + height = 480; + } else { + RCLCPP_ERROR(rclcpp::get_logger("rclcpp"), "Invalid parameter. -> monoResolution: %s", monoResolution.c_str()); + throw std::runtime_error("Invalid mono camera resolution."); + } + + auto boardName = calibrationHandler.getEepromData().boardName; + if(height > 480 && boardName == "OAK-D-LITE") { + width = 640; + height = 480; + } + + dai::rosBridge::ImageConverter rgbConverter(tfPrefix + "_rgb_camera_optical_frame", false); + auto rgbCameraInfo = rgbConverter.calibrationToCameraInfo(calibrationHandler, dai::CameraBoardSocket::CAM_A, -1, -1); + dai::rosBridge::BridgePublisher rgbPublish(colorQueue, + node, + std::string("color/image"), + std::bind(&dai::rosBridge::ImageConverter::toRosMsg, + &rgbConverter, // since the converter has the same frame name + // and image type is also same we can reuse it + std::placeholders::_1, + std::placeholders::_2), + 30, + rgbCameraInfo, + "color"); + + dai::rosBridge::TrackSpatialDetectionConverter trackConverter(tfPrefix + "_rgb_camera_optical_frame", 416, 416, fullFrameTracking, 0.3); + dai::rosBridge::BridgePublisher trackPublish( + trackQueue, + node, + std::string("color/yolov4_Spatial_tracklets"), + std::bind(&dai::rosBridge::TrackSpatialDetectionConverter::toRosMsg, &trackConverter, std::placeholders::_1, std::placeholders::_2), + 30); + + dai::rosBridge::ImageConverter depthConverter(tfPrefix + "_right_camera_optical_frame", true); + auto rightCameraInfo = depthConverter.calibrationToCameraInfo(calibrationHandler, dai::CameraBoardSocket::CAM_C, width, height); + dai::rosBridge::BridgePublisher depthPublish( + depthQueue, + node, + std::string("stereo/depth"), + std::bind(&dai::rosBridge::ImageConverter::toRosMsg, &depthConverter, std::placeholders::_1, std::placeholders::_2), + 30, + rightCameraInfo, + "stereo"); + + depthPublish.addPublisherCallback(); + trackPublish.addPublisherCallback(); + rgbPublish.addPublisherCallback(); + + rclcpp::spin(node); + + return 0; +} diff --git a/depthai_filters/CMakeLists.txt b/depthai_filters/CMakeLists.txt index e6d4fbc0..a792b2a4 100644 --- a/depthai_filters/CMakeLists.txt +++ b/depthai_filters/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.8) -project(depthai_filters VERSION 2.9.0 LANGUAGES CXX C) +project(depthai_filters VERSION 2.10.0 LANGUAGES CXX C) if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Wall -Wextra -Wpedantic) diff --git a/depthai_filters/config/wls.yaml b/depthai_filters/config/wls.yaml index f5faef15..1c1b1c91 100644 --- a/depthai_filters/config/wls.yaml +++ b/depthai_filters/config/wls.yaml @@ -6,4 +6,5 @@ i_publish_topic: true stereo: i_output_disparity: true - i_subpixel: true \ No newline at end of file + i_subpixel: true + i_board_socket_id: 1 diff --git a/depthai_filters/include/depthai_filters/feature_tracker_overlay.hpp b/depthai_filters/include/depthai_filters/feature_tracker_overlay.hpp index 92119f6d..6ce15fb3 100644 --- a/depthai_filters/include/depthai_filters/feature_tracker_overlay.hpp +++ b/depthai_filters/include/depthai_filters/feature_tracker_overlay.hpp @@ -1,6 +1,6 @@ #pragma once -#include "cv_bridge/cv_bridge.hpp" +#include "cv_bridge/cv_bridge.h" #include "depthai_ros_msgs/msg/tracked_features.hpp" #include "geometry_msgs/msg/point.hpp" #include "message_filters/subscriber.h" @@ -42,5 +42,4 @@ class FeatureTrackerOverlay : public rclcpp::Node { std::unordered_set trackedIDs; std::unordered_map> trackedFeaturesPath; }; - -} // namespace depthai_filters \ No newline at end of file +} // namespace depthai_filters diff --git a/depthai_filters/include/depthai_filters/wls_filter.hpp b/depthai_filters/include/depthai_filters/wls_filter.hpp index 4874b215..126560d9 100644 --- a/depthai_filters/include/depthai_filters/wls_filter.hpp +++ b/depthai_filters/include/depthai_filters/wls_filter.hpp @@ -20,6 +20,8 @@ class WLSFilter : public rclcpp::Node { const sensor_msgs::msg::CameraInfo::ConstSharedPtr& disp_info, const sensor_msgs::msg::Image::ConstSharedPtr& leftImg); + OnSetParametersCallbackHandle::SharedPtr paramCBHandle; + rcl_interfaces::msg::SetParametersResult parameterCB(const std::vector& params); message_filters::Subscriber disparityImgSub; message_filters::Subscriber leftImgSub; @@ -28,5 +30,6 @@ class WLSFilter : public rclcpp::Node { std::unique_ptr> sync; cv::Ptr filter; image_transport::CameraPublisher depthPub; + double maxDisparity; }; } // namespace depthai_filters diff --git a/depthai_filters/package.xml b/depthai_filters/package.xml index 3fab5656..057bf37e 100644 --- a/depthai_filters/package.xml +++ b/depthai_filters/package.xml @@ -2,7 +2,7 @@ depthai_filters - 2.9.0 + 2.10.0 Depthai filters package Adam Serafin MIT diff --git a/depthai_filters/src/feature_tracker_overlay.cpp b/depthai_filters/src/feature_tracker_overlay.cpp index 88e91314..92668392 100644 --- a/depthai_filters/src/feature_tracker_overlay.cpp +++ b/depthai_filters/src/feature_tracker_overlay.cpp @@ -1,6 +1,6 @@ #include "depthai_filters/feature_tracker_overlay.hpp" -#include "cv_bridge/cv_bridge.hpp" +#include "cv_bridge/cv_bridge.h" #include "depthai_filters/utils.hpp" namespace depthai_filters { @@ -73,4 +73,3 @@ void FeatureTrackerOverlay::drawFeatures(cv::Mat& img) { } } // namespace depthai_filters #include "rclcpp_components/register_node_macro.hpp" -RCLCPP_COMPONENTS_REGISTER_NODE(depthai_filters::FeatureTrackerOverlay); \ No newline at end of file diff --git a/depthai_filters/src/features_3d.cpp b/depthai_filters/src/features_3d.cpp index 708f3258..825c70a7 100644 --- a/depthai_filters/src/features_3d.cpp +++ b/depthai_filters/src/features_3d.cpp @@ -1,6 +1,6 @@ #include "depthai_filters/features_3d.hpp" -#include "cv_bridge/cv_bridge.hpp" +#include "cv_bridge/cv_bridge.h" #include "depthai_filters/utils.hpp" #include "geometry_msgs/msg/point32.hpp" #include "opencv2/opencv.hpp" @@ -66,4 +66,3 @@ void Features3D::overlayCB(const sensor_msgs::msg::Image::ConstSharedPtr& depth, } // namespace depthai_filters #include "rclcpp_components/register_node_macro.hpp" -RCLCPP_COMPONENTS_REGISTER_NODE(depthai_filters::Features3D); \ No newline at end of file diff --git a/depthai_filters/src/spatial_bb.cpp b/depthai_filters/src/spatial_bb.cpp index 5ebe2363..cce013d7 100644 --- a/depthai_filters/src/spatial_bb.cpp +++ b/depthai_filters/src/spatial_bb.cpp @@ -1,6 +1,6 @@ #include "depthai_filters/spatial_bb.hpp" -#include "cv_bridge/cv_bridge.hpp" +#include "cv_bridge/cv_bridge.h" #include "depthai_filters/utils.hpp" #include "geometry_msgs/msg/point32.hpp" #include "opencv2/opencv.hpp" @@ -145,4 +145,3 @@ void SpatialBB::overlayCB(const sensor_msgs::msg::Image::ConstSharedPtr& preview } // namespace depthai_filters #include "rclcpp_components/register_node_macro.hpp" -RCLCPP_COMPONENTS_REGISTER_NODE(depthai_filters::SpatialBB); \ No newline at end of file diff --git a/depthai_filters/src/wls_filter.cpp b/depthai_filters/src/wls_filter.cpp index a01193bf..1541eb00 100644 --- a/depthai_filters/src/wls_filter.cpp +++ b/depthai_filters/src/wls_filter.cpp @@ -17,46 +17,69 @@ void WLSFilter::onInit() { sync = std::make_unique>(syncPolicy(10), disparityImgSub, disparityInfoSub, leftImgSub); sync->registerCallback(std::bind(&WLSFilter::wlsCB, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3)); filter = cv::ximgproc::createDisparityWLSFilterGeneric(false); - filter->setLambda(8000); - filter->setSigmaColor(1.5); + filter->setLambda(this->declare_parameter("lambda", 8000.0)); + filter->setSigmaColor(this->declare_parameter("sigma_color", 1.5)); + maxDisparity = this->declare_parameter("max_disparity", 760.0); + paramCBHandle = this->add_on_set_parameters_callback(std::bind(&WLSFilter::parameterCB, this, std::placeholders::_1)); depthPub = image_transport::create_camera_publisher(this, "wls_filtered"); } +rcl_interfaces::msg::SetParametersResult WLSFilter::parameterCB(const std::vector& params) { + for(const auto& p : params) { + if(p.get_name() == "lambda") { + filter->setLambda(p.as_double()); + } else if(p.get_name() == "sigma_color") { + filter->setSigmaColor(p.as_double()); + } else if(p.get_name() == "max_disparity") { + maxDisparity = p.as_double(); + } + } + rcl_interfaces::msg::SetParametersResult res; + res.successful = true; + return res; +} + void WLSFilter::wlsCB(const sensor_msgs::msg::Image::ConstSharedPtr& disp, const sensor_msgs::msg::CameraInfo::ConstSharedPtr& disp_info, const sensor_msgs::msg::Image::ConstSharedPtr& leftImg) { cv::Mat leftFrame = utils::msgToMat(this->get_logger(), leftImg, sensor_msgs::image_encodings::MONO8); cv::Mat dispFrame; - if(disp->encoding == sensor_msgs::image_encodings::TYPE_16UC1) { - dispFrame = utils::msgToMat(this->get_logger(), disp, sensor_msgs::image_encodings::TYPE_16UC1); - } else { - dispFrame = utils::msgToMat(this->get_logger(), disp, sensor_msgs::image_encodings::MONO8); - } + dispFrame = utils::msgToMat(this->get_logger(), disp, disp->encoding); cv::Mat dispFiltered; sensor_msgs::msg::CameraInfo depthInfo = *disp_info; filter->filter(dispFrame, leftFrame, dispFiltered); sensor_msgs::msg::Image depth; - auto factor = (disp_info->k[0] * disp_info->p[3]); - cv::Mat depthOut = cv::Mat(cv::Size(dispFiltered.cols, dispFiltered.rows), CV_16UC1); - depthOut.forEach([&dispFiltered, &factor, &disp](short& pixel, const int* position) -> void { - if(disp->encoding == sensor_msgs::image_encodings::TYPE_16UC1) { - auto dispPoint = dispFiltered.at(position); - if(dispPoint == 0) + auto factor = abs(depthInfo.p[3]) * 100.0; + // set distortion to 0 + if(disp->encoding == sensor_msgs::image_encodings::MONO8) { + auto dispMultiplier = 255.0 / maxDisparity; + cv::Mat depthOut = cv::Mat(dispFiltered.size(), CV_8UC1); + depthOut.forEach([&dispFiltered, &factor, &dispMultiplier](uint8_t& pixel, const int* position) -> void { + auto disp = dispFiltered.at(position); + if(disp == 0) { pixel = 0; - else - pixel = factor / dispPoint; - } else { - auto dispPoint = dispFiltered.at(position); - if(dispPoint == 0) + } else { + pixel = factor / disp * dispMultiplier; + } + }); + cv_bridge::CvImage(disp->header, sensor_msgs::image_encodings::MONO8, depthOut).toImageMsg(depth); + depthPub.publish(depth, depthInfo); + return; + } else { + cv::Mat depthOut = cv::Mat(dispFiltered.size(), CV_16UC1); + auto dispMultiplier = 255.0 * 255.0 / maxDisparity; + depthOut.forEach([&dispFiltered, &factor, &dispMultiplier](uint16_t& pixel, const int* position) -> void { + auto disp = dispFiltered.at(position); + if(disp == 0) { pixel = 0; - else - pixel = factor / dispPoint; - } - }); - - cv_bridge::CvImage(disp->header, sensor_msgs::image_encodings::TYPE_16UC1, depthOut).toImageMsg(depth); - depthPub.publish(depth, depthInfo); + } else { + pixel = factor / disp * dispMultiplier; + } + }); + cv_bridge::CvImage(disp->header, sensor_msgs::image_encodings::TYPE_16UC1, depthOut).toImageMsg(depth); + depthPub.publish(depth, depthInfo); + } } } // namespace depthai_filters diff --git a/depthai_ros_driver/CMakeLists.txt b/depthai_ros_driver/CMakeLists.txt index e6ba0821..a33a8ef6 100644 --- a/depthai_ros_driver/CMakeLists.txt +++ b/depthai_ros_driver/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.22) -project(depthai_ros_driver VERSION 2.9.0) +project(depthai_ros_driver VERSION 2.10.0) set(CMAKE_POSITION_INDEPENDENT_CODE ON) set(CMAKE_BUILD_SHARED_LIBS ON) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) @@ -30,6 +30,7 @@ rclcpp sensor_msgs diagnostic_updater diagnostic_msgs +ffmpeg_image_transport_msgs ) set(SENSOR_DEPS @@ -66,6 +67,7 @@ add_library( src/utils.cpp src/dai_nodes/base_node.cpp src/dai_nodes/sys_logger.cpp + src/dai_nodes/sensors/img_pub.cpp src/dai_nodes/sensors/sensor_helpers.cpp # TODO: Figure out different place for this src/param_handlers/camera_param_handler.cpp src/param_handlers/imu_param_handler.cpp @@ -73,6 +75,9 @@ add_library( src/param_handlers/sensor_param_handler.cpp src/param_handlers/feature_tracker_param_handler.cpp src/param_handlers/stereo_param_handler.cpp + src/param_handlers/tof_param_handler.cpp + src/param_handlers/pipeline_gen_param_handler.cpp + src/param_handlers/sync_param_handler.cpp ) ament_target_dependencies(${COMMON_LIB_NAME} ${COMMON_DEPS}) @@ -85,7 +90,9 @@ add_library( src/dai_nodes/sensors/mono.cpp src/dai_nodes/sensors/feature_tracker.cpp src/dai_nodes/sensors/sensor_wrapper.cpp - src/dai_nodes/stereo.cpp + src/dai_nodes/sensors/stereo.cpp + src/dai_nodes/sensors/tof.cpp + src/dai_nodes/sensors/sync.cpp ) ament_target_dependencies(${SENSOR_LIB_NAME} ${SENSOR_DEPS}) @@ -120,7 +127,11 @@ target_link_libraries( ${NN_LIB_NAME} ${COMMON_LIB_NAME} ) - +add_executable( + camera_node + src/camera_node.cpp) +ament_target_dependencies(camera_node ${CAM_DEPS} ${COMMON_DEPS} ${SENSOR_DEPS} ${NN_DEPS}) +target_link_libraries(camera_node ${PROJECT_NAME} ${SENSOR_LIB_NAME} ${NN_LIB_NAME} ${COMMON_LIB_NAME}) rclcpp_components_register_nodes(${PROJECT_NAME} "${PROJECT_NAME}::Camera") pluginlib_export_plugin_description_file(${PROJECT_NAME} plugins.xml) ament_export_include_directories(include) @@ -143,7 +154,7 @@ EXPORT ${PROJECT_NAME}Targets install(EXPORT ${PROJECT_NAME}Targets DESTINATION share/${PROJECT_NAME}/cmake) - +install(TARGETS camera_node DESTINATION lib/${PROJECT_NAME}) ament_python_install_package(${PROJECT_NAME}) # Install Python executables install( diff --git a/depthai_ros_driver/config/camera.yaml b/depthai_ros_driver/config/camera.yaml index b8a25dab..51afc242 100644 --- a/depthai_ros_driver/config/camera.yaml +++ b/depthai_ros_driver/config/camera.yaml @@ -1,4 +1,4 @@ -/oak: +/**: ros__parameters: camera: i_enable_imu: true diff --git a/depthai_ros_driver/config/sr_poe_rgbd.yaml b/depthai_ros_driver/config/sr_poe_rgbd.yaml new file mode 100644 index 00000000..350dd6d7 --- /dev/null +++ b/depthai_ros_driver/config/sr_poe_rgbd.yaml @@ -0,0 +1,10 @@ + +/oak: + ros__parameters: + camera: + i_enable_imu: true + i_enable_ir: false + i_nn_type: none + i_pipeline_type: rgbtof + right: + i_low_bandwidth: true diff --git a/depthai_ros_driver/include/depthai_ros_driver/camera.hpp b/depthai_ros_driver/include/depthai_ros_driver/camera.hpp index 19bc51ed..3d0ed6fa 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/camera.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/camera.hpp @@ -77,6 +77,10 @@ class Camera : public rclcpp::Node { * Runs onConfigure(); */ void start(); + /* + * Since we cannot use shared_from this before the object is initialized, we need to use a timer to start the device. + */ + void indirectStart(); void restart(); void diagCB(const diagnostic_msgs::msg::DiagnosticArray::SharedPtr msg); @@ -89,6 +93,8 @@ class Camera : public rclcpp::Node { std::shared_ptr device; std::vector> daiNodes; bool camRunning = false; + bool initialized = false; std::unique_ptr tfPub; + rclcpp::TimerBase::SharedPtr startTimer; }; -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/base_node.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/base_node.hpp index de6ca37e..d6df3e3d 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/base_node.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/base_node.hpp @@ -3,11 +3,18 @@ #include #include +#include "depthai-shared/common/CameraBoardSocket.hpp" #include "depthai/pipeline/Node.hpp" +#include "depthai_ros_driver/utils.hpp" +#include "rclcpp/logger.hpp" namespace dai { class Pipeline; class Device; +namespace node { +class XLinkOut; +class VideoEncoder; +} // namespace node } // namespace dai namespace rclcpp { @@ -17,6 +24,10 @@ class Parameter; namespace depthai_ros_driver { namespace dai_nodes { +namespace sensor_helpers { +class ImagePublisher; +struct VideoEncoderConfig; +} // namespace sensor_helpers class BaseNode { public: /** @@ -26,11 +37,13 @@ class BaseNode { * @param node The node * @param pipeline The pipeline */ - BaseNode(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr pipeline); + BaseNode(const std::string& daiNodeName, std::shared_ptr node, std::shared_ptr pipeline); virtual ~BaseNode(); virtual void updateParams(const std::vector& params); virtual void link(dai::Node::Input in, int linkType = 0); virtual dai::Node::Input getInput(int linkType = 0); + virtual dai::Node::Input getInputByName(const std::string& name = ""); + virtual std::vector> getPublishers(); virtual void setupQueues(std::shared_ptr device) = 0; /** * @brief Sets the names of the queues. @@ -42,16 +55,22 @@ class BaseNode { * @param pipeline The pipeline */ virtual void setXinXout(std::shared_ptr pipeline) = 0; + std::shared_ptr setupOutput(std::shared_ptr pipeline, + const std::string& qName, + std::function nodeLink, + bool isSynced = false, + const utils::VideoEncoderConfig& encoderConfig = {}); virtual void closeQueues() = 0; + std::shared_ptr setupXout(std::shared_ptr pipeline, const std::string& name); void setNodeName(const std::string& daiNodeName); - void setROSNodePointer(rclcpp::Node* node); + void setROSNodePointer(std::shared_ptr node); /** * @brief Gets the ROS node pointer. * * @return The ROS node pointer. */ - rclcpp::Node* getROSNode(); + std::shared_ptr getROSNode(); /** * @brief Gets the name of the node. * @@ -64,12 +83,22 @@ class BaseNode { * @param[in] frameName The frame name */ std::string getTFPrefix(const std::string& frameName = ""); + /** + * @brief Append ROS node name to the frameName given and append optical frame suffix to it. + * + * @param[in] frameName The frame name + */ + std::string getOpticalTFPrefix(const std::string& frameName = ""); bool ipcEnabled(); + std::string getSocketName(dai::CameraBoardSocket socket); + bool rsCompabilityMode(); + rclcpp::Logger getLogger(); private: - rclcpp::Node* baseNode; + std::shared_ptr baseNode; std::string baseDAINodeName; bool intraProcessEnabled; + rclcpp::Logger logger; }; } // namespace dai_nodes -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/detection.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/detection.hpp index 812a2908..3986f6c4 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/detection.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/detection.hpp @@ -4,7 +4,6 @@ #include #include -#include "camera_info_manager/camera_info_manager.hpp" #include "depthai-shared/common/CameraBoardSocket.hpp" #include "depthai/device/DataQueue.hpp" #include "depthai/device/Device.hpp" @@ -15,11 +14,9 @@ #include "depthai_bridge/ImageConverter.hpp" #include "depthai_bridge/ImgDetectionConverter.hpp" #include "depthai_ros_driver/dai_nodes/base_node.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/img_pub.hpp" #include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp" #include "depthai_ros_driver/param_handlers/nn_param_handler.hpp" -#include "depthai_ros_driver/utils.hpp" -#include "image_transport/camera_publisher.hpp" -#include "image_transport/image_transport.hpp" #include "rclcpp/node.hpp" namespace depthai_ros_driver { @@ -38,17 +35,17 @@ class Detection : public BaseNode { * @param pipeline The pipeline */ Detection(const std::string& daiNodeName, - rclcpp::Node* node, + std::shared_ptr node, std::shared_ptr pipeline, const dai::CameraBoardSocket& socket = dai::CameraBoardSocket::CAM_A) : BaseNode(daiNodeName, node, pipeline) { - RCLCPP_DEBUG(node->get_logger(), "Creating node %s", daiNodeName.c_str()); + RCLCPP_DEBUG(getLogger(), "Creating node %s", daiNodeName.c_str()); setNames(); detectionNode = pipeline->create(); imageManip = pipeline->create(); ph = std::make_unique(node, daiNodeName, socket); ph->declareParams(detectionNode, imageManip); - RCLCPP_DEBUG(node->get_logger(), "Node %s created", daiNodeName.c_str()); + RCLCPP_DEBUG(getLogger(), "Node %s created", daiNodeName.c_str()); imageManip->out.link(detectionNode->input); setXinXout(pipeline); } @@ -61,8 +58,8 @@ class Detection : public BaseNode { */ void setupQueues(std::shared_ptr device) override { nnQ = device->getOutputQueue(nnQName, ph->getParam("i_max_q_size"), false); - std::string socketName = utils::getSocketName(static_cast(ph->getParam("i_board_socket_id"))); - auto tfPrefix = getTFPrefix(socketName); + std::string socketName = getSocketName(static_cast(ph->getParam("i_board_socket_id"))); + auto tfPrefix = getOpticalTFPrefix(socketName); int width; int height; if(ph->getParam("i_disable_resize")) { @@ -72,8 +69,8 @@ class Detection : public BaseNode { width = imageManip->initialConfig.getResizeConfig().width; height = imageManip->initialConfig.getResizeConfig().height; } - detConverter = std::make_unique( - tfPrefix + "_camera_optical_frame", width, height, false, ph->getParam("i_get_base_device_timestamp")); + + detConverter = std::make_unique(tfPrefix, width, height, false, ph->getParam("i_get_base_device_timestamp")); detConverter->setUpdateRosBaseTimeOnToRosMsg(ph->getParam("i_update_ros_base_time_on_ros_msg")); rclcpp::PublisherOptions options; options.qos_overriding_options = rclcpp::QosOverridingOptions(); @@ -81,20 +78,18 @@ class Detection : public BaseNode { nnQ->addCallback(std::bind(&Detection::detectionCB, this, std::placeholders::_1, std::placeholders::_2)); if(ph->getParam("i_enable_passthrough")) { - ptQ = device->getOutputQueue(ptQName, ph->getParam("i_max_q_size"), false); - imageConverter = std::make_unique(tfPrefix + "_camera_optical_frame", false); - imageConverter->setUpdateRosBaseTimeOnToRosMsg(ph->getParam("i_update_ros_base_time_on_ros_msg")); - infoManager = std::make_shared( - getROSNode()->create_sub_node(std::string(getROSNode()->get_name()) + "/" + getName()).get(), "/" + getName()); - infoManager->setCameraInfo(sensor_helpers::getCalibInfo(getROSNode()->get_logger(), - *imageConverter, - device, - static_cast(ph->getParam("i_board_socket_id")), - width, - height)); + utils::ImgConverterConfig convConf; + convConf.getBaseDeviceTimestamp = ph->getParam("i_get_base_device_timestamp"); + convConf.tfPrefix = tfPrefix; + convConf.updateROSBaseTimeOnRosMsg = ph->getParam("i_update_ros_base_time_on_ros_msg"); + + utils::ImgPublisherConfig pubConf; + pubConf.daiNodeName = getName(); + pubConf.topicName = "~/" + getName(); + pubConf.topicSuffix = "passthrough"; + pubConf.socket = static_cast(ph->getParam("i_board_socket_id")); - ptPub = image_transport::create_camera_publisher(getROSNode(), "~/" + getName() + "/passthrough/image_raw"); - ptQ->addCallback(std::bind(sensor_helpers::basicCameraPub, std::placeholders::_1, std::placeholders::_2, *imageConverter, ptPub, infoManager)); + ptPub->setup(device, convConf, pubConf); } }; /** @@ -134,9 +129,7 @@ class Detection : public BaseNode { xoutNN->setStreamName(nnQName); detectionNode->out.link(xoutNN->input); if(ph->getParam("i_enable_passthrough")) { - xoutPT = pipeline->create(); - xoutPT->setStreamName(ptQName); - detectionNode->passthrough.link(xoutPT->input); + ptPub = setupOutput(pipeline, ptQName, [&](dai::Node::Input input) { detectionNode->passthrough.link(input); }); } }; /** @@ -173,17 +166,15 @@ class Detection : public BaseNode { std::unique_ptr detConverter; std::vector labelNames; rclcpp::Publisher::SharedPtr detPub; - std::unique_ptr imageConverter; - image_transport::CameraPublisher ptPub; - std::shared_ptr infoManager; + std::shared_ptr ptPub; std::shared_ptr detectionNode; std::shared_ptr imageManip; std::unique_ptr ph; std::shared_ptr nnQ, ptQ; - std::shared_ptr xoutNN, xoutPT; + std::shared_ptr xoutNN; std::string nnQName, ptQName; }; } // namespace nn } // namespace dai_nodes -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/nn_wrapper.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/nn_wrapper.hpp index 6890ab38..cfab6975 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/nn_wrapper.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/nn_wrapper.hpp @@ -27,7 +27,7 @@ namespace dai_nodes { class NNWrapper : public BaseNode { public: explicit NNWrapper(const std::string& daiNodeName, - rclcpp::Node* node, + std::shared_ptr node, std::shared_ptr pipeline, const dai::CameraBoardSocket& socket = dai::CameraBoardSocket::CAM_A); ~NNWrapper(); @@ -45,4 +45,4 @@ class NNWrapper : public BaseNode { }; } // namespace dai_nodes -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/segmentation.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/segmentation.hpp index 97127b9b..4c3903c6 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/segmentation.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/segmentation.hpp @@ -4,7 +4,7 @@ #include #include -#include "cv_bridge/cv_bridge.hpp" +#include "cv_bridge/cv_bridge.h" #include "depthai-shared/common/CameraBoardSocket.hpp" #include "depthai_ros_driver/dai_nodes/base_node.hpp" #include "image_transport/camera_publisher.hpp" @@ -42,7 +42,7 @@ namespace nn { class Segmentation : public BaseNode { public: Segmentation(const std::string& daiNodeName, - rclcpp::Node* node, + std::shared_ptr node, std::shared_ptr pipeline, const dai::CameraBoardSocket& socket = dai::CameraBoardSocket::CAM_A); ~Segmentation(); @@ -58,7 +58,7 @@ class Segmentation : public BaseNode { cv::Mat decodeDeeplab(cv::Mat mat); void segmentationCB(const std::string& name, const std::shared_ptr& data); std::vector labelNames; - std::unique_ptr imageConverter; + std::shared_ptr imageConverter; std::shared_ptr infoManager; image_transport::CameraPublisher nnPub, ptPub; sensor_msgs::msg::CameraInfo nnInfo; @@ -72,4 +72,4 @@ class Segmentation : public BaseNode { } // namespace nn } // namespace dai_nodes -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/spatial_detection.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/spatial_detection.hpp index 18f9bf62..f22d1986 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/spatial_detection.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/spatial_detection.hpp @@ -16,11 +16,9 @@ #include "depthai_bridge/SpatialDetectionConverter.hpp" #include "depthai_ros_driver/dai_nodes/base_node.hpp" #include "depthai_ros_driver/dai_nodes/nn/nn_helpers.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/img_pub.hpp" #include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp" #include "depthai_ros_driver/param_handlers/nn_param_handler.hpp" -#include "depthai_ros_driver/utils.hpp" -#include "image_transport/camera_publisher.hpp" -#include "image_transport/image_transport.hpp" #include "rclcpp/node.hpp" namespace depthai_ros_driver { @@ -30,17 +28,17 @@ template class SpatialDetection : public BaseNode { public: SpatialDetection(const std::string& daiNodeName, - rclcpp::Node* node, + std::shared_ptr node, std::shared_ptr pipeline, const dai::CameraBoardSocket& socket = dai::CameraBoardSocket::CAM_A) : BaseNode(daiNodeName, node, pipeline) { - RCLCPP_DEBUG(node->get_logger(), "Creating node %s", daiNodeName.c_str()); + RCLCPP_DEBUG(getLogger(), "Creating node %s", daiNodeName.c_str()); setNames(); spatialNode = pipeline->create(); imageManip = pipeline->create(); ph = std::make_unique(node, daiNodeName, socket); ph->declareParams(spatialNode, imageManip); - RCLCPP_DEBUG(node->get_logger(), "Node %s created", daiNodeName.c_str()); + RCLCPP_DEBUG(getLogger(), "Node %s created", daiNodeName.c_str()); imageManip->out.link(spatialNode->input); setXinXout(pipeline); } @@ -50,8 +48,8 @@ class SpatialDetection : public BaseNode { }; void setupQueues(std::shared_ptr device) override { nnQ = device->getOutputQueue(nnQName, ph->getParam("i_max_q_size"), false); - std::string socketName = utils::getSocketName(static_cast(ph->getParam("i_board_socket_id"))); - auto tfPrefix = getTFPrefix(socketName); + std::string socketName = getSocketName(static_cast(ph->getParam("i_board_socket_id"))); + auto tfPrefix = getOpticalTFPrefix(socketName); int width; int height; if(ph->getParam("i_disable_resize")) { @@ -61,8 +59,7 @@ class SpatialDetection : public BaseNode { width = imageManip->initialConfig.getResizeConfig().width; height = imageManip->initialConfig.getResizeConfig().height; } - detConverter = std::make_unique( - tfPrefix + "_camera_optical_frame", width, height, false, ph->getParam("i_get_base_device_timestamp")); + detConverter = std::make_unique(tfPrefix, width, height, false, ph->getParam("i_get_base_device_timestamp")); detConverter->setUpdateRosBaseTimeOnToRosMsg(ph->getParam("i_update_ros_base_time_on_ros_msg")); nnQ->addCallback(std::bind(&SpatialDetection::spatialCB, this, std::placeholders::_1, std::placeholders::_2)); rclcpp::PublisherOptions options; @@ -70,20 +67,20 @@ class SpatialDetection : public BaseNode { detPub = getROSNode()->template create_publisher("~/" + getName() + "/spatial_detections", 10, options); if(ph->getParam("i_enable_passthrough")) { - ptQ = device->getOutputQueue(ptQName, ph->getParam("i_max_q_size"), false); - ptImageConverter = std::make_unique(tfPrefix + "_camera_optical_frame", false); - ptImageConverter->setUpdateRosBaseTimeOnToRosMsg(ph->getParam("i_update_ros_base_time_on_ros_msg")); - ptInfoMan = std::make_shared( - getROSNode()->create_sub_node(std::string(getROSNode()->get_name()) + "/" + getName()).get(), "/" + getName()); - ptInfoMan->setCameraInfo(sensor_helpers::getCalibInfo(getROSNode()->get_logger(), - *ptImageConverter, - device, - static_cast(ph->getParam("i_board_socket_id")), - width, - height)); + utils::ImgConverterConfig convConf; + convConf.tfPrefix = tfPrefix; + convConf.getBaseDeviceTimestamp = ph->getParam("i_get_base_device_timestamp"); + convConf.updateROSBaseTimeOnRosMsg = ph->getParam("i_update_ros_base_time_on_ros_msg"); - ptPub = image_transport::create_camera_publisher(getROSNode(), "~/" + getName() + "/passthrough/image_raw"); - ptQ->addCallback(std::bind(sensor_helpers::basicCameraPub, std::placeholders::_1, std::placeholders::_2, *ptImageConverter, ptPub, ptInfoMan)); + utils::ImgPublisherConfig pubConf; + pubConf.width = width; + pubConf.height = height; + pubConf.daiNodeName = getName(); + pubConf.topicName = "~/" + getName(); + pubConf.topicSuffix = "passthrough"; + pubConf.socket = static_cast(ph->getParam("i_board_socket_id")); + + ptPub->setup(device, convConf, pubConf); } if(ph->getParam("i_enable_passthrough_depth")) { @@ -91,21 +88,20 @@ class SpatialDetection : public BaseNode { if(!ph->getOtherNodeParam("stereo", "i_align_depth")) { tfPrefix = getTFPrefix("right"); }; - ptDepthQ = device->getOutputQueue(ptDepthQName, ph->getParam("i_max_q_size"), false); - ptDepthImageConverter = std::make_unique(tfPrefix + "_camera_optical_frame", false); - ptDepthImageConverter->setUpdateRosBaseTimeOnToRosMsg(ph->getParam("i_update_ros_base_time_on_ros_msg")); - ptDepthInfoMan = std::make_shared( - getROSNode()->create_sub_node(std::string(getROSNode()->get_name()) + "/" + getName()).get(), "/" + getName()); - ptDepthInfoMan->setCameraInfo(sensor_helpers::getCalibInfo(getROSNode()->get_logger(), - *ptDepthImageConverter, - device, - socket, - ph->getOtherNodeParam("stereo", "i_width"), - ph->getOtherNodeParam("stereo", "i_height"))); + utils::ImgConverterConfig convConf; + convConf.tfPrefix = tfPrefix; + convConf.getBaseDeviceTimestamp = ph->getParam("i_get_base_device_timestamp"); + convConf.updateROSBaseTimeOnRosMsg = ph->getParam("i_update_ros_base_time_on_ros_msg"); + + utils::ImgPublisherConfig pubConf; + pubConf.width = ph->getOtherNodeParam(sensor_helpers::getNodeName(getROSNode(), sensor_helpers::NodeNameEnum::Stereo), "i_width"); + pubConf.height = ph->getOtherNodeParam(sensor_helpers::getNodeName(getROSNode(), sensor_helpers::NodeNameEnum::Stereo), "i_height"); + pubConf.daiNodeName = getName(); + pubConf.topicName = "~/" + getName(); + pubConf.topicSuffix = "/passthrough_depth"; + pubConf.socket = socket; - ptDepthPub = image_transport::create_camera_publisher(getROSNode(), "~/" + getName() + "/passthrough_depth/image_raw"); - ptDepthQ->addCallback( - std::bind(sensor_helpers::basicCameraPub, std::placeholders::_1, std::placeholders::_2, *ptDepthImageConverter, ptDepthPub, ptDepthInfoMan)); + ptDepthPub->setup(device, convConf, pubConf); } }; void link(dai::Node::Input in, int /*linkType = 0*/) override { @@ -131,14 +127,10 @@ class SpatialDetection : public BaseNode { xoutNN->setStreamName(nnQName); spatialNode->out.link(xoutNN->input); if(ph->getParam("i_enable_passthrough")) { - xoutPT = pipeline->create(); - xoutPT->setStreamName(ptQName); - spatialNode->passthrough.link(xoutPT->input); + ptPub = setupOutput(pipeline, ptQName, [&](dai::Node::Input input) { spatialNode->passthrough.link(input); }); } if(ph->getParam("i_enable_passthrough_depth")) { - xoutPTDepth = pipeline->create(); - xoutPTDepth->setStreamName(ptDepthQName); - spatialNode->passthroughDepth.link(xoutPTDepth->input); + ptDepthPub = setupOutput(pipeline, ptDepthQName, [&](dai::Node::Input input) { spatialNode->passthroughDepth.link(input); }); } }; void closeQueues() override { @@ -165,9 +157,8 @@ class SpatialDetection : public BaseNode { std::unique_ptr detConverter; std::vector labelNames; rclcpp::Publisher::SharedPtr detPub; - std::unique_ptr ptImageConverter, ptDepthImageConverter; - image_transport::CameraPublisher ptPub, ptDepthPub; - sensor_msgs::msg::CameraInfo ptInfo, ptDepthInfo; + std::shared_ptr ptImageConverter, ptDepthImageConverter; + std::shared_ptr ptPub, ptDepthPub; std::shared_ptr ptInfoMan, ptDepthInfoMan; std::shared_ptr spatialNode; std::shared_ptr imageManip; @@ -179,4 +170,4 @@ class SpatialDetection : public BaseNode { } // namespace nn } // namespace dai_nodes -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/spatial_nn_wrapper.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/spatial_nn_wrapper.hpp index dc27c09c..6e555a2d 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/spatial_nn_wrapper.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/nn/spatial_nn_wrapper.hpp @@ -28,7 +28,7 @@ namespace dai_nodes { class SpatialNNWrapper : public BaseNode { public: explicit SpatialNNWrapper(const std::string& daiNodeName, - rclcpp::Node* node, + std::shared_ptr node, std::shared_ptr pipeline, const dai::CameraBoardSocket& socket = dai::CameraBoardSocket::CAM_A); ~SpatialNNWrapper(); @@ -46,4 +46,4 @@ class SpatialNNWrapper : public BaseNode { }; } // namespace dai_nodes -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/feature_tracker.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/feature_tracker.hpp index 746d6cc5..81e7ef92 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/feature_tracker.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/feature_tracker.hpp @@ -31,7 +31,7 @@ namespace dai_nodes { class FeatureTracker : public BaseNode { public: - explicit FeatureTracker(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr pipeline); + explicit FeatureTracker(const std::string& daiNodeName, std::shared_ptr node, std::shared_ptr pipeline); ~FeatureTracker(); void updateParams(const std::vector& params) override; void setupQueues(std::shared_ptr device) override; @@ -55,4 +55,4 @@ class FeatureTracker : public BaseNode { }; } // namespace dai_nodes -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/img_pub.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/img_pub.hpp new file mode 100644 index 00000000..1fcda95f --- /dev/null +++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/img_pub.hpp @@ -0,0 +1,116 @@ +#pragma once + +#include + +#include "depthai/pipeline/Node.hpp" +#include "depthai/pipeline/datatype/ADatatype.hpp" +#include "depthai_ros_driver/utils.hpp" +#include "ffmpeg_image_transport_msgs/msg/ffmpeg_packet.hpp" +#include "image_transport/camera_publisher.hpp" +#include "sensor_msgs/msg/camera_info.hpp" +#include "sensor_msgs/msg/compressed_image.hpp" +#include "sensor_msgs/msg/image.hpp" + +namespace dai { +class Device; +class Pipeline; +class DataOutputQueue; +namespace node { +class VideoEncoder; +class XLinkOut; +} // namespace node +namespace ros { +class ImageConverter; +} +} // namespace dai + +namespace rclcpp { +class Logger; +class Node; +} // namespace rclcpp + +namespace camera_info_manager { +class CameraInfoManager; +} +namespace depthai_ros_driver { +namespace dai_nodes { + +namespace sensor_helpers { +/** + * @brief Image struct + * + * This struct is used to store the image, camera info, header and ffmpeg packet. + */ +class Image { + public: + sensor_msgs::msg::Image::UniquePtr image; + sensor_msgs::msg::CameraInfo::UniquePtr info; + ffmpeg_image_transport_msgs::msg::FFMPEGPacket::UniquePtr ffmpegPacket; + sensor_msgs::msg::CompressedImage::UniquePtr compressedImg; +}; +/** + * @brief ImagePublisher class + * + * This class is used to publish images from the device to ROS2. It creates a publisher for the image and camera info topics. + */ +class ImagePublisher { + public: + /** + * @brief Construct a new Image Publisher object + * + * Creates XLinkOut if synced and VideoEncoder if lowBandwidth is enabled. linkFunc is stored and returned when link is called. + */ + ImagePublisher(std::shared_ptr node, + std::shared_ptr pipeline, + const std::string& qName, + std::function linkFunc, + bool synced = false, + bool ipcEnabled = false, + const utils::VideoEncoderConfig& encoderConfig = {}); + + ~ImagePublisher(); + /** + * @brief Setup the image publisher + * + * Creates Publishers, ImageConverter and CameraInfoManager. Creates a Queue and adds a callback if not synced. + */ + void setup(std::shared_ptr device, const utils::ImgConverterConfig& convConf, const utils::ImgPublisherConfig& pubConf); + void createImageConverter(std::shared_ptr device); + void createInfoManager(std::shared_ptr device); + void addQueueCB(const std::shared_ptr& queue); + void closeQueue(); + std::shared_ptr getQueue(); + void link(dai::Node::Input in); + std::string getQueueName(); + void publish(const std::shared_ptr& data); + void publish(std::shared_ptr img); + void publish(std::shared_ptr img, rclcpp::Time timestamp); + std::shared_ptr convertData(const std::shared_ptr& data); + std::shared_ptr createEncoder(std::shared_ptr pipeline, const utils::VideoEncoderConfig& encoderConfig); + + private: + bool detectSubscription(const rclcpp::Publisher::SharedPtr& pub, + const rclcpp::Publisher::SharedPtr& infoPub); + std::shared_ptr node; + utils::VideoEncoderConfig encConfig; + utils::ImgPublisherConfig pubConfig; + utils::ImgConverterConfig convConfig; + std::shared_ptr infoManager; + std::shared_ptr converter; + std::shared_ptr xout; + std::shared_ptr encoder; + std::function linkCB; + rclcpp::Publisher::SharedPtr imgPub; + rclcpp::Publisher::SharedPtr infoPub; + rclcpp::Publisher::SharedPtr ffmpegPub; + rclcpp::Publisher::SharedPtr compressedImgPub; + image_transport::CameraPublisher imgPubIT; + std::shared_ptr dataQ; + int cbID; + std::string qName; + bool ipcEnabled; + bool synced; +}; +} // namespace sensor_helpers +} // namespace dai_nodes +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/imu.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/imu.hpp index 6d9c851c..121b97a1 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/imu.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/imu.hpp @@ -33,7 +33,10 @@ namespace dai_nodes { class Imu : public BaseNode { public: - explicit Imu(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr pipeline, std::shared_ptr device); + explicit Imu(const std::string& daiNodeName, + std::shared_ptr node, + std::shared_ptr pipeline, + std::shared_ptr device); ~Imu(); void updateParams(const std::vector& params) override; void setupQueues(std::shared_ptr device) override; @@ -58,4 +61,4 @@ class Imu : public BaseNode { }; } // namespace dai_nodes -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/mono.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/mono.hpp index 5621dada..5a307f18 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/mono.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/mono.hpp @@ -1,27 +1,16 @@ #pragma once #include "depthai_ros_driver/dai_nodes/base_node.hpp" -#include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp" -#include "image_transport/camera_publisher.hpp" -#include "image_transport/image_transport.hpp" -#include "sensor_msgs/msg/camera_info.hpp" -#include "sensor_msgs/msg/image.hpp" namespace dai { class Pipeline; class Device; -class DataOutputQueue; class DataInputQueue; class ADatatype; namespace node { class MonoCamera; class XLinkIn; -class XLinkOut; -class VideoEncoder; } // namespace node -namespace ros { -class ImageConverter; -} } // namespace dai namespace rclcpp { @@ -29,20 +18,20 @@ class Node; class Parameter; } // namespace rclcpp -namespace camera_info_manager { -class CameraInfoManager; -} - namespace depthai_ros_driver { namespace param_handlers { class SensorParamHandler; } namespace dai_nodes { +namespace sensor_helpers { +struct ImageSensor; +class ImagePublisher; +} // namespace sensor_helpers class Mono : public BaseNode { public: explicit Mono(const std::string& daiNodeName, - rclcpp::Node* node, + std::shared_ptr node, std::shared_ptr pipeline, dai::CameraBoardSocket socket, sensor_helpers::ImageSensor sensor, @@ -54,22 +43,16 @@ class Mono : public BaseNode { void setNames() override; void setXinXout(std::shared_ptr pipeline) override; void closeQueues() override; + std::vector> getPublishers() override; private: - std::unique_ptr imageConverter; - image_transport::CameraPublisher monoPubIT; - rclcpp::Publisher::SharedPtr monoPub; - rclcpp::Publisher::SharedPtr infoPub; - std::shared_ptr infoManager; + std::shared_ptr imagePublisher; std::shared_ptr monoCamNode; - std::shared_ptr videoEnc; std::unique_ptr ph; - std::shared_ptr monoQ; std::shared_ptr controlQ; - std::shared_ptr xoutMono; std::shared_ptr xinControl; std::string monoQName, controlQName; }; } // namespace dai_nodes -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/rgb.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/rgb.hpp index 864a993e..b3f8e4aa 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/rgb.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/rgb.hpp @@ -1,27 +1,17 @@ #pragma once #include "depthai_ros_driver/dai_nodes/base_node.hpp" -#include "image_transport/camera_publisher.hpp" -#include "image_transport/image_transport.hpp" -#include "sensor_msgs/msg/camera_info.hpp" -#include "sensor_msgs/msg/image.hpp" namespace dai { class Pipeline; class Device; -class DataOutputQueue; class DataInputQueue; enum class CameraBoardSocket; class ADatatype; namespace node { class ColorCamera; class XLinkIn; -class XLinkOut; -class VideoEncoder; } // namespace node -namespace ros { -class ImageConverter; -} } // namespace dai namespace rclcpp { @@ -29,10 +19,6 @@ class Node; class Parameter; } // namespace rclcpp -namespace camera_info_manager { -class CameraInfoManager; -} - namespace depthai_ros_driver { namespace param_handlers { class SensorParamHandler; @@ -41,12 +27,13 @@ namespace dai_nodes { namespace sensor_helpers { struct ImageSensor; -} +class ImagePublisher; +} // namespace sensor_helpers class RGB : public BaseNode { public: explicit RGB(const std::string& daiNodeName, - rclcpp::Node* node, + std::shared_ptr node, std::shared_ptr pipeline, dai::CameraBoardSocket socket, sensor_helpers::ImageSensor sensor, @@ -58,22 +45,16 @@ class RGB : public BaseNode { void setNames() override; void setXinXout(std::shared_ptr pipeline) override; void closeQueues() override; + std::vector> getPublishers() override; private: - std::unique_ptr imageConverter; - image_transport::CameraPublisher rgbPubIT, previewPubIT; - rclcpp::Publisher::SharedPtr rgbPub, previewPub; - rclcpp::Publisher::SharedPtr rgbInfoPub, previewInfoPub; - std::shared_ptr infoManager, previewInfoManager; + std::shared_ptr rgbPub, previewPub; std::shared_ptr colorCamNode; - std::shared_ptr videoEnc; std::unique_ptr ph; - std::shared_ptr colorQ, previewQ; std::shared_ptr controlQ; - std::shared_ptr xoutColor, xoutPreview; std::shared_ptr xinControl; std::string ispQName, previewQName, controlQName; }; } // namespace dai_nodes -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp index 3066088c..d468e749 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp @@ -1,24 +1,25 @@ #pragma once -#include #include #include -#include "depthai-shared/datatype/RawImgFrame.hpp" #include "depthai-shared/properties/ColorCameraProperties.hpp" #include "depthai-shared/properties/MonoCameraProperties.hpp" -#include "depthai-shared/properties/VideoEncoderProperties.hpp" +#include "depthai/pipeline/Node.hpp" #include "depthai/pipeline/datatype/ADatatype.hpp" #include "depthai/pipeline/datatype/CameraControl.hpp" #include "image_transport/camera_publisher.hpp" #include "sensor_msgs/msg/camera_info.hpp" +#include "sensor_msgs/msg/image.hpp" namespace dai { class Device; class Pipeline; +class DataOutputQueue; namespace node { class VideoEncoder; -} +class XLinkOut; +} // namespace node namespace ros { class ImageConverter; } @@ -38,6 +39,7 @@ namespace link_types { enum class RGBLinkType { video, isp, preview }; }; namespace sensor_helpers { +enum class NodeNameEnum { RGB, Left, Right, Stereo, IMU, NN }; struct ImageSensor { std::string name; std::string defaultResolution; @@ -46,42 +48,28 @@ struct ImageSensor { }; extern std::vector availableSensors; extern const std::unordered_map socketNameMap; +extern const std::unordered_map rsSocketNameMap; +extern const std::unordered_map rsNodeNameMap; +extern const std::unordered_map NodeNameMap; extern const std::unordered_map monoResolutionMap; extern const std::unordered_map rgbResolutionMap; extern const std::unordered_map fSyncModeMap; extern const std::unordered_map cameraImageOrientationMap; +bool rsCompabilityMode(std::shared_ptr node); +std::string getSocketName(std::shared_ptr node, dai::CameraBoardSocket socket); +std::string getNodeName(std::shared_ptr node, NodeNameEnum name); void basicCameraPub(const std::string& /*name*/, const std::shared_ptr& data, dai::ros::ImageConverter& converter, image_transport::CameraPublisher& pub, std::shared_ptr infoManager); -void cameraPub(const std::string& /*name*/, - const std::shared_ptr& data, - dai::ros::ImageConverter& converter, - image_transport::CameraPublisher& pub, - std::shared_ptr infoManager, - bool lazyPub = true); - -void splitPub(const std::string& /*name*/, - const std::shared_ptr& data, - dai::ros::ImageConverter& converter, - rclcpp::Publisher::SharedPtr imgPub, - rclcpp::Publisher::SharedPtr infoPub, - std::shared_ptr infoManager, - bool lazyPub = true); - sensor_msgs::msg::CameraInfo getCalibInfo(const rclcpp::Logger& logger, - dai::ros::ImageConverter& converter, + std::shared_ptr converter, std::shared_ptr device, dai::CameraBoardSocket socket, int width = 0, int height = 0); -std::shared_ptr createEncoder(std::shared_ptr pipeline, - int quality, - dai::VideoEncoderProperties::Profile profile = dai::VideoEncoderProperties::Profile::MJPEG); -bool detectSubscription(const rclcpp::Publisher::SharedPtr& pub, - const rclcpp::Publisher::SharedPtr& infoPub); } // namespace sensor_helpers } // namespace dai_nodes -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/sensor_wrapper.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/sensor_wrapper.hpp index 792e763b..e5ad89aa 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/sensor_wrapper.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/sensor_wrapper.hpp @@ -37,7 +37,7 @@ namespace dai_nodes { class SensorWrapper : public BaseNode { public: explicit SensorWrapper(const std::string& daiNodeName, - rclcpp::Node* node, + std::shared_ptr node, std::shared_ptr pipeline, std::shared_ptr device, dai::CameraBoardSocket socket, @@ -50,6 +50,7 @@ class SensorWrapper : public BaseNode { void setXinXout(std::shared_ptr pipeline) override; void closeQueues() override; sensor_helpers::ImageSensor getSensorData(); + std::vector> getPublishers() override; private: void subCB(const sensor_msgs::msg::Image& img); @@ -65,4 +66,4 @@ class SensorWrapper : public BaseNode { }; } // namespace dai_nodes -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/stereo.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/stereo.hpp new file mode 100644 index 00000000..59d641bb --- /dev/null +++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/stereo.hpp @@ -0,0 +1,86 @@ +#pragma once + +#include +#include +#include + +#include "depthai-shared/common/CameraBoardSocket.hpp" +#include "depthai-shared/common/CameraFeatures.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/sensor_wrapper.hpp" + +namespace dai { +class Pipeline; +class Device; +class DataOutputQueue; +class ADatatype; +class ImgFrame; +namespace node { +class StereoDepth; +class XLinkOut; +class VideoEncoder; +} // namespace node +} // namespace dai + +namespace rclcpp { +class Node; +class Parameter; +} // namespace rclcpp + +namespace depthai_ros_driver { +namespace param_handlers { +class StereoParamHandler; +} + +namespace dai_nodes { +namespace link_types { +enum class StereoLinkType { stereo, left, right }; +}; + +namespace sensor_helpers { +class ImagePubliser; +} +class Stereo : public BaseNode { + public: + explicit Stereo(const std::string& daiNodeName, + std::shared_ptr node, + std::shared_ptr pipeline, + std::shared_ptr device, + dai::CameraBoardSocket leftSocket = dai::CameraBoardSocket::CAM_B, + dai::CameraBoardSocket rightSocket = dai::CameraBoardSocket::CAM_C); + ~Stereo(); + void updateParams(const std::vector& params) override; + void setupQueues(std::shared_ptr dvice) override; + void link(dai::Node::Input in, int linkType = 1) override; + dai::Node::Input getInput(int linkType = 0) override; + void setNames() override; + void setXinXout(std::shared_ptr pipeline) override; + void closeQueues() override; + std::vector> getPublishers() override; + + private: + void setupStereoQueue(std::shared_ptr device); + void setupLeftRectQueue(std::shared_ptr device); + void setupRightRectQueue(std::shared_ptr device); + void setupRectQueue(std::shared_ptr device, dai::CameraFeatures& sensorInfo, std::shared_ptr pub, bool isLeft); + /* + * This callback is used to synchronize left and right rectified frames + * It is called every 10ms and it publishes the frames if they are synchronized + * If they are not synchronized, it prints a warning message + */ + void syncTimerCB(); + std::shared_ptr stereoPub, leftRectPub, rightRectPub; + std::shared_ptr stereoCamNode; + std::shared_ptr stereoEnc, leftRectEnc, rightRectEnc; + std::unique_ptr left; + std::unique_ptr right; + std::unique_ptr featureTrackerLeftR, featureTrackerRightR, nnNode; + std::unique_ptr ph; + std::shared_ptr leftRectQ, rightRectQ; + std::shared_ptr xoutStereo, xoutLeftRect, xoutRightRect; + std::string stereoQName, leftRectQName, rightRectQName; + dai::CameraFeatures leftSensInfo, rightSensInfo; + rclcpp::TimerBase::SharedPtr syncTimer; +}; + +} // namespace dai_nodes +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/sync.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/sync.hpp new file mode 100644 index 00000000..947c14f1 --- /dev/null +++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/sync.hpp @@ -0,0 +1,55 @@ + +#pragma once + +#include +#include +#include + +#include "depthai_ros_driver/dai_nodes/base_node.hpp" + +namespace dai { +class Pipeline; +class Device; +class DataOutputQueue; +class ADatatype; +class ImgFrame; +namespace node { +class Sync; +class XLinkOut; +} // namespace node +} // namespace dai + +namespace rclcpp { +class Node; +class Parameter; +} // namespace rclcpp + +namespace depthai_ros_driver { +namespace param_handlers { +class SyncParamHandler; +} +namespace dai_nodes { +class Sync : public BaseNode { + public: + explicit Sync(const std::string& daiNodeName, std::shared_ptr node, std::shared_ptr pipeline); + ~Sync(); + void setupQueues(std::shared_ptr device) override; + void link(dai::Node::Input in, int linkType = 0) override; + dai::Node::Input getInputByName(const std::string& name = "") override; + void setNames() override; + void setXinXout(std::shared_ptr pipeline) override; + void closeQueues() override; + void addPublishers(const std::vector>& pubs); + + private: + std::unique_ptr paramHandler; + std::shared_ptr syncNode; + std::string syncOutputName; + std::shared_ptr xoutFrame; + std::shared_ptr outQueue; + void publishOutputs(); + std::vector> publishers; + std::vector syncNames; +}; +} // namespace dai_nodes +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/tof.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/tof.hpp new file mode 100644 index 00000000..e6c368a7 --- /dev/null +++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sensors/tof.hpp @@ -0,0 +1,60 @@ +#pragma once + +#include "depthai_ros_driver/dai_nodes/base_node.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp" + +namespace dai { +class Pipeline; +class Device; +class DataInputQueue; +class ADatatype; +namespace node { +class Camera; +class ToF; +class ImageAlign; +class XLinkIn; +} // namespace node +} // namespace dai + +namespace rclcpp { +class Node; +class Parameter; +} // namespace rclcpp + +namespace depthai_ros_driver { +namespace param_handlers { +class ToFParamHandler; +} +namespace dai_nodes { + +namespace sensor_helpers { +class ImagePublisher; +} // namespace sensor_helpers +class ToF : public BaseNode { + public: + explicit ToF(const std::string& daiNodeName, + std::shared_ptr node, + std::shared_ptr pipeline, + dai::CameraBoardSocket boardSocket = dai::CameraBoardSocket::CAM_A); + ~ToF(); + void updateParams(const std::vector& params) override; + void setupQueues(std::shared_ptr device) override; + void link(dai::Node::Input in, int linkType = 0) override; + dai::Node::Input getInput(int linkType = 0) override; + void setNames() override; + void setXinXout(std::shared_ptr pipeline) override; + std::vector> getPublishers() override; + void closeQueues() override; + + private: + std::shared_ptr tofPub; + std::shared_ptr camNode; + std::shared_ptr tofNode; + std::shared_ptr alignNode; + std::unique_ptr ph; + dai::CameraBoardSocket boardSocket; + std::string tofQName; +}; + +} // namespace dai_nodes +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sys_logger.hpp b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sys_logger.hpp index ccebcc4a..31706cb0 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sys_logger.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/dai_nodes/sys_logger.hpp @@ -22,7 +22,7 @@ namespace depthai_ros_driver { namespace dai_nodes { class SysLogger : public BaseNode { public: - SysLogger(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr pipeline); + SysLogger(const std::string& daiNodeName, std::shared_ptr node, std::shared_ptr pipeline); ~SysLogger(); void setupQueues(std::shared_ptr device) override; void setNames() override; @@ -39,4 +39,4 @@ class SysLogger : public BaseNode { std::string loggerQName; }; } // namespace dai_nodes -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/base_param_handler.hpp b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/base_param_handler.hpp index 1faac56c..6fc5d88c 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/base_param_handler.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/base_param_handler.hpp @@ -1,5 +1,8 @@ #pragma once +#include + #include "depthai/pipeline/datatype/CameraControl.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp" #include "rcl_interfaces/msg/parameter_descriptor.hpp" #include "rclcpp/node.hpp" namespace depthai_ros_driver { @@ -15,24 +18,27 @@ inline rcl_interfaces::msg::ParameterDescriptor getRangedIntDescriptor(uint16_t } class BaseParamHandler { public: - BaseParamHandler(rclcpp::Node* node, const std::string& name) { - baseName = name; - baseNode = node; - }; + BaseParamHandler(std::shared_ptr node, const std::string& name) : baseName(name), baseNode(node){}; virtual ~BaseParamHandler() = default; virtual dai::CameraControl setRuntimeParams(const std::vector& params) = 0; std::string getName() { return baseName; } template - T getParam(const std::string paramName) { + T getParam(const std::string& paramName) { T value; + if(!baseNode->has_parameter(getFullParamName(paramName))) { + RCLCPP_WARN(baseNode->get_logger(), "Parameter %s not found", getFullParamName(paramName).c_str()); + } baseNode->get_parameter(getFullParamName(paramName), value); return value; } template T getOtherNodeParam(const std::string& daiNodeName, const std::string& paramName) { T value; + if(!baseNode->has_parameter(getFullParamName(daiNodeName, paramName))) { + RCLCPP_WARN(baseNode->get_logger(), "Parameter %s not found", getFullParamName(daiNodeName, paramName).c_str()); + } baseNode->get_parameter(getFullParamName(daiNodeName, paramName), value); return value; } @@ -45,8 +51,12 @@ class BaseParamHandler { return name; } + std::string getSocketName(dai::CameraBoardSocket socket) { + return dai_nodes::sensor_helpers::getSocketName(getROSNode(), socket); + } + protected: - rclcpp::Node* getROSNode() { + std::shared_ptr getROSNode() { return baseNode; } template @@ -110,7 +120,7 @@ class BaseParamHandler { RCLCPP_DEBUG(baseNode->get_logger(), "Setting param %s with value %s", name.c_str(), ss.str().c_str()); } std::string baseName; - rclcpp::Node* baseNode; + std::shared_ptr baseNode; }; } // namespace param_handlers } // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/camera_param_handler.hpp b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/camera_param_handler.hpp index 0a18c800..ffb56825 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/camera_param_handler.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/camera_param_handler.hpp @@ -19,7 +19,7 @@ namespace param_handlers { class CameraParamHandler : public BaseParamHandler { public: - explicit CameraParamHandler(rclcpp::Node* node, const std::string& name); + explicit CameraParamHandler(std::shared_ptr node, const std::string& name); ~CameraParamHandler(); void declareParams(); dai::CameraControl setRuntimeParams(const std::vector& params) override; @@ -29,4 +29,4 @@ class CameraParamHandler : public BaseParamHandler { std::unordered_map usbSpeedMap; }; } // namespace param_handlers -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/feature_tracker_param_handler.hpp b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/feature_tracker_param_handler.hpp index 14c8a9ab..605c550b 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/feature_tracker_param_handler.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/feature_tracker_param_handler.hpp @@ -24,11 +24,11 @@ namespace param_handlers { class FeatureTrackerParamHandler : public BaseParamHandler { public: - explicit FeatureTrackerParamHandler(rclcpp::Node* node, const std::string& name); + explicit FeatureTrackerParamHandler(std::shared_ptr node, const std::string& name); ~FeatureTrackerParamHandler(); void declareParams(std::shared_ptr featureTracker); dai::CameraControl setRuntimeParams(const std::vector& params) override; std::unordered_map motionEstMap; }; } // namespace param_handlers -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/imu_param_handler.hpp b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/imu_param_handler.hpp index d6836fc6..be882eed 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/imu_param_handler.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/imu_param_handler.hpp @@ -6,7 +6,6 @@ #include "depthai-shared/properties/IMUProperties.hpp" #include "depthai/pipeline/datatype/CameraControl.hpp" -#include "depthai-shared/properties/IMUProperties.hpp" #include "depthai_bridge/ImuConverter.hpp" #include "depthai_ros_driver/param_handlers/base_param_handler.hpp" @@ -28,7 +27,7 @@ enum class ImuMsgType { IMU, IMU_WITH_MAG, IMU_WITH_MAG_SPLIT }; } class ImuParamHandler : public BaseParamHandler { public: - explicit ImuParamHandler(rclcpp::Node* node, const std::string& name); + explicit ImuParamHandler(std::shared_ptr node, const std::string& name); ~ImuParamHandler(); void declareParams(std::shared_ptr imu, const std::string& imuType); dai::CameraControl setRuntimeParams(const std::vector& params) override; @@ -39,4 +38,4 @@ class ImuParamHandler : public BaseParamHandler { dai::ros::ImuSyncMethod getSyncMethod(); }; } // namespace param_handlers -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/nn_param_handler.hpp b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/nn_param_handler.hpp index a4fc36b6..40656c1b 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/nn_param_handler.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/nn_param_handler.hpp @@ -34,7 +34,7 @@ enum class NNFamily { Segmentation, Mobilenet, Yolo }; } class NNParamHandler : public BaseParamHandler { public: - explicit NNParamHandler(rclcpp::Node* node, const std::string& name, const dai::CameraBoardSocket& socket = dai::CameraBoardSocket::CAM_A); + explicit NNParamHandler(std::shared_ptr node, const std::string& name, const dai::CameraBoardSocket& socket = dai::CameraBoardSocket::CAM_A); ~NNParamHandler(); nn::NNFamily getNNFamily(); template @@ -125,4 +125,4 @@ class NNParamHandler : public BaseParamHandler { std::unordered_map nnFamilyMap; }; } // namespace param_handlers -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/pipeline_gen_param_handler.hpp b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/pipeline_gen_param_handler.hpp new file mode 100644 index 00000000..e44c4a5d --- /dev/null +++ b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/pipeline_gen_param_handler.hpp @@ -0,0 +1,29 @@ + +#pragma once + +#include +#include + +#include "depthai_ros_driver/param_handlers/base_param_handler.hpp" + +namespace dai { +enum class UsbSpeed; +} + +namespace rclcpp { +class Node; +class Parameter; +} // namespace rclcpp + +namespace depthai_ros_driver { +namespace param_handlers { + +class PipelineGenParamHandler : public BaseParamHandler { + public: + explicit PipelineGenParamHandler(std::shared_ptr node, const std::string& name); + ~PipelineGenParamHandler(); + void declareParams(); + dai::CameraControl setRuntimeParams(const std::vector& params) override; +}; +} // namespace param_handlers +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/sensor_param_handler.hpp b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/sensor_param_handler.hpp index 4907c57d..ab63dc2e 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/sensor_param_handler.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/sensor_param_handler.hpp @@ -24,7 +24,7 @@ namespace depthai_ros_driver { namespace param_handlers { class SensorParamHandler : public BaseParamHandler { public: - explicit SensorParamHandler(rclcpp::Node* node, const std::string& name, dai::CameraBoardSocket socket); + explicit SensorParamHandler(std::shared_ptr node, const std::string& name, dai::CameraBoardSocket socket); ~SensorParamHandler(); void declareCommonParams(dai::CameraBoardSocket socket); void declareParams(std::shared_ptr monoCam, dai_nodes::sensor_helpers::ImageSensor sensor, bool publish); @@ -35,4 +35,4 @@ class SensorParamHandler : public BaseParamHandler { dai::CameraBoardSocket socketID; }; } // namespace param_handlers -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/stereo_param_handler.hpp b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/stereo_param_handler.hpp index f2546e70..6aa583e3 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/stereo_param_handler.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/stereo_param_handler.hpp @@ -22,7 +22,7 @@ namespace depthai_ros_driver { namespace param_handlers { class StereoParamHandler : public BaseParamHandler { public: - explicit StereoParamHandler(rclcpp::Node* node, const std::string& name); + explicit StereoParamHandler(std::shared_ptr node, const std::string& name); ~StereoParamHandler(); void declareParams(std::shared_ptr stereo); dai::CameraControl setRuntimeParams(const std::vector& params) override; @@ -36,4 +36,4 @@ class StereoParamHandler : public BaseParamHandler { dai::CameraBoardSocket alignSocket; }; } // namespace param_handlers -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/sync_param_handler.hpp b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/sync_param_handler.hpp new file mode 100644 index 00000000..50bcee39 --- /dev/null +++ b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/sync_param_handler.hpp @@ -0,0 +1,32 @@ + +#pragma once + +#include +#include +#include + +#include "depthai/pipeline/datatype/CameraControl.hpp" +#include "depthai_ros_driver/param_handlers/base_param_handler.hpp" + +namespace dai { +namespace node { +class Sync; +} +} // namespace dai + +namespace rclcpp { +class Node; +class Parameter; +} // namespace rclcpp + +namespace depthai_ros_driver { +namespace param_handlers { +class SyncParamHandler : public BaseParamHandler { + public: + explicit SyncParamHandler(std::shared_ptr node, const std::string& name); + ~SyncParamHandler(); + void declareParams(std::shared_ptr sync); + dai::CameraControl setRuntimeParams(const std::vector& params) override; +}; +} // namespace param_handlers +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/param_handlers/tof_param_handler.hpp b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/tof_param_handler.hpp new file mode 100644 index 00000000..add5a6c1 --- /dev/null +++ b/depthai_ros_driver/include/depthai_ros_driver/param_handlers/tof_param_handler.hpp @@ -0,0 +1,35 @@ +#pragma once + +#include +#include +#include + +#include "depthai/pipeline/datatype/CameraControl.hpp" +#include "depthai/pipeline/datatype/ToFConfig.hpp" +#include "depthai_ros_driver/param_handlers/base_param_handler.hpp" + +namespace dai { +namespace node { +class Camera; +class ToF; +} // namespace node +} // namespace dai + +namespace rclcpp { +class Node; +class Parameter; +} // namespace rclcpp + +namespace depthai_ros_driver { +namespace param_handlers { + +class ToFParamHandler : public BaseParamHandler { + public: + explicit ToFParamHandler(std::shared_ptr node, const std::string& name); + ~ToFParamHandler(); + void declareParams(std::shared_ptr cam, std::shared_ptr tof); + dai::CameraControl setRuntimeParams(const std::vector& params) override; + std::unordered_map medianFilterMap; +}; +} // namespace param_handlers +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/pipeline/base_pipeline.hpp b/depthai_ros_driver/include/depthai_ros_driver/pipeline/base_pipeline.hpp index ffce5152..996c6182 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/pipeline/base_pipeline.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/pipeline/base_pipeline.hpp @@ -10,7 +10,7 @@ #include "depthai_ros_driver/dai_nodes/nn/nn_wrapper.hpp" #include "depthai_ros_driver/dai_nodes/nn/spatial_nn_wrapper.hpp" #include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp" -#include "depthai_ros_driver/dai_nodes/stereo.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/stereo.hpp" namespace dai { class Pipeline; @@ -27,23 +27,25 @@ enum class NNType { None, RGB, Spatial }; class BasePipeline { public: ~BasePipeline() = default; - std::unique_ptr createNN(rclcpp::Node* node, std::shared_ptr pipeline, dai_nodes::BaseNode& daiNode) { - auto nn = std::make_unique("nn", node, pipeline); + std::unique_ptr createNN(std::shared_ptr node, std::shared_ptr pipeline, dai_nodes::BaseNode& daiNode) { + using namespace dai_nodes::sensor_helpers; + auto nn = std::make_unique(getNodeName(node, NodeNameEnum::NN), node, pipeline); daiNode.link(nn->getInput(), static_cast(dai_nodes::link_types::RGBLinkType::preview)); return nn; } - std::unique_ptr createSpatialNN(rclcpp::Node* node, + std::unique_ptr createSpatialNN(std::shared_ptr node, std::shared_ptr pipeline, dai_nodes::BaseNode& daiNode, dai_nodes::BaseNode& daiStereoNode) { - auto nn = std::make_unique("nn", node, pipeline); + using namespace dai_nodes::sensor_helpers; + auto nn = std::make_unique(getNodeName(node, NodeNameEnum::NN), node, pipeline); daiNode.link(nn->getInput(static_cast(dai_nodes::nn_helpers::link_types::SpatialNNLinkType::input)), static_cast(dai_nodes::link_types::RGBLinkType::preview)); daiStereoNode.link(nn->getInput(static_cast(dai_nodes::nn_helpers::link_types::SpatialNNLinkType::inputDepth))); return nn; } - virtual std::vector> createPipeline(rclcpp::Node* node, + virtual std::vector> createPipeline(std::shared_ptr node, std::shared_ptr device, std::shared_ptr pipeline, const std::string& nnType) = 0; @@ -58,4 +60,4 @@ class BasePipeline { }; }; } // namespace pipeline_gen -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/pipeline/base_types.hpp b/depthai_ros_driver/include/depthai_ros_driver/pipeline/base_types.hpp index a78c6333..c7c74d01 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/pipeline/base_types.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/pipeline/base_types.hpp @@ -21,45 +21,73 @@ namespace depthai_ros_driver { namespace pipeline_gen { class RGB : public BasePipeline { public: - std::vector> createPipeline(rclcpp::Node* node, + std::vector> createPipeline(std::shared_ptr node, std::shared_ptr device, std::shared_ptr pipeline, const std::string& nnType) override; }; class RGBD : public BasePipeline { public: - std::vector> createPipeline(rclcpp::Node* node, + std::vector> createPipeline(std::shared_ptr node, std::shared_ptr device, std::shared_ptr pipeline, const std::string& nnType) override; }; class RGBStereo : public BasePipeline { public: - std::vector> createPipeline(rclcpp::Node* node, + std::vector> createPipeline(std::shared_ptr node, std::shared_ptr device, std::shared_ptr pipeline, const std::string& nnType) override; }; class Stereo : public BasePipeline { public: - std::vector> createPipeline(rclcpp::Node* node, + std::vector> createPipeline(std::shared_ptr node, std::shared_ptr device, std::shared_ptr pipeline, const std::string& nnType) override; }; class Depth : public BasePipeline { public: - std::vector> createPipeline(rclcpp::Node* node, + std::vector> createPipeline(std::shared_ptr node, std::shared_ptr device, std::shared_ptr pipeline, const std::string& nnType) override; }; class CamArray : public BasePipeline { public: - std::vector> createPipeline(rclcpp::Node* node, + std::vector> createPipeline(std::shared_ptr node, + std::shared_ptr device, + std::shared_ptr pipeline, + const std::string& nnType) override; +}; +class DepthToF : public BasePipeline { + public: + std::vector> createPipeline(std::shared_ptr node, + std::shared_ptr device, + std::shared_ptr pipeline, + const std::string& nnType) override; +}; +class StereoToF : public BasePipeline { + public: + std::vector> createPipeline(std::shared_ptr node, + std::shared_ptr device, + std::shared_ptr pipeline, + const std::string& nnType) override; +}; +class ToF : public BasePipeline { + public: + std::vector> createPipeline(std::shared_ptr node, + std::shared_ptr device, + std::shared_ptr pipeline, + const std::string& nnType) override; +}; +class RGBToF : public BasePipeline { + public: + std::vector> createPipeline(std::shared_ptr node, std::shared_ptr device, std::shared_ptr pipeline, const std::string& nnType) override; }; } // namespace pipeline_gen -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/pipeline/pipeline_generator.hpp b/depthai_ros_driver/include/depthai_ros_driver/pipeline/pipeline_generator.hpp index 986e7d74..6a893455 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/pipeline/pipeline_generator.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/pipeline/pipeline_generator.hpp @@ -17,12 +17,16 @@ class Node; } namespace depthai_ros_driver { +namespace param_handlers { +class PipelineGenParamHandler; +} // namespace param_handlers namespace pipeline_gen { -enum class PipelineType { RGB, RGBD, RGBStereo, Stereo, Depth, CamArray }; +enum class PipelineType { RGB, RGBD, RGBStereo, Stereo, Depth, CamArray, DepthToF, StereoToF, ToF, RGBToF }; class PipelineGenerator { public: - ~PipelineGenerator() = default; + PipelineGenerator(); + ~PipelineGenerator(); /** * @brief Validates the pipeline type. If the pipeline type is not valid for the number of sensors, it will be changed to the default type. * @@ -32,7 +36,7 @@ class PipelineGenerator { * * @return The validated pipeline type. */ - std::string validatePipeline(rclcpp::Node* node, const std::string& typeStr, int sensorNum); + std::string validatePipeline(std::shared_ptr node, const std::string& typeStr, int sensorNum, const std::string& deviceName); /** * @brief Creates the pipeline by using a plugin. Plugin types need to be of type depthai_ros_driver::pipeline_gen::BasePipeline. * @@ -45,26 +49,18 @@ class PipelineGenerator { * * @return Vector BaseNodes created. */ - std::vector> createPipeline(rclcpp::Node* node, + std::vector> createPipeline(std::shared_ptr node, std::shared_ptr device, std::shared_ptr pipeline, const std::string& pipelineType, - const std::string& nnType, - bool enableImu); + const std::string& nnType); protected: - std::unordered_map pluginTypeMap{{"RGB", "depthai_ros_driver::pipeline_gen::RGB"}, - {"RGBD", "depthai_ros_driver::pipeline_gen::RGBD"}, - {"RGBSTEREO", "depthai_ros_driver::pipeline_gen::RGBStereo"}, - {"STEREO", "depthai_ros_driver::pipeline_gen::Stereo"}, - {"DEPTH", "depthai_ros_driver::pipeline_gen::Depth"}, - {"CAMARRAY", "depthai_ros_driver::pipeline_gen::CamArray"}}; - std::unordered_map pipelineTypeMap{{"RGB", PipelineType::RGB}, - {"RGBD", PipelineType::RGBD}, - {"RGBSTEREO", PipelineType::RGBStereo}, - {"STEREO", PipelineType::Stereo}, - {"DEPTH", PipelineType::Depth}, - {"CAMARRAY", PipelineType::CamArray}}; + std::unordered_map pluginTypeMap; + std::unordered_map pipelineTypeMap; + + private: + std::unique_ptr ph; }; } // namespace pipeline_gen -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/include/depthai_ros_driver/utils.hpp b/depthai_ros_driver/include/depthai_ros_driver/utils.hpp index a98b4483..c83444d7 100644 --- a/depthai_ros_driver/include/depthai_ros_driver/utils.hpp +++ b/depthai_ros_driver/include/depthai_ros_driver/utils.hpp @@ -1,6 +1,5 @@ #pragma once -#include #include #include #include @@ -11,6 +10,18 @@ namespace dai { enum class CameraBoardSocket; } // namespace dai +#include "depthai-shared/common/CameraBoardSocket.hpp" +#include "depthai-shared/datatype/RawImgFrame.hpp" +#include "depthai-shared/properties/VideoEncoderProperties.hpp" +#include "depthai/common/CameraExposureOffset.hpp" + +namespace dai { +class Pipeline; +namespace node { +class XLinkOut; +} // namespace node +} // namespace dai + namespace depthai_ros_driver { namespace utils { template @@ -28,6 +39,47 @@ T getValFromMap(const std::string& name, const std::unordered_map setupXout(std::shared_ptr pipeline, const std::string& name); } // namespace utils -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/launch/calibration.launch.py b/depthai_ros_driver/launch/calibration.launch.py index fc2e4540..a8b474c1 100644 --- a/depthai_ros_driver/launch/calibration.launch.py +++ b/depthai_ros_driver/launch/calibration.launch.py @@ -2,7 +2,11 @@ from ament_index_python.packages import get_package_share_directory from launch import LaunchDescription -from launch.actions import DeclareLaunchArgument, OpaqueFunction, IncludeLaunchDescription +from launch.actions import ( + DeclareLaunchArgument, + IncludeLaunchDescription, + OpaqueFunction, +) from launch.launch_description_sources import PythonLaunchDescriptionSource from launch.substitutions import LaunchConfiguration from launch_ros.actions import Node @@ -13,25 +17,39 @@ def launch_setup(context, *args, **kwargs): params_file = LaunchConfiguration("params_file") name = LaunchConfiguration("name").perform(context) type = LaunchConfiguration("type").perform(context) - print("#########") - print(type) size = LaunchConfiguration("size").perform(context) square = LaunchConfiguration("square").perform(context) - mono_args = ["--size", size, - "--square", square, - "--camera_name", "oak/rgb", - "--no-service-check"] - stereo_args = ["--size", size, - "--square", square, - "--camera_name", "/rgb", - "--approximate", "0.1", - "--camera_name", "oak", - "left_camera", "left", - "right_camera", "right", - "--no-service-check"] - mono_remappings = [('/image', '{}/rgb/image_raw'.format(name))] - stereo_remappings = [('/left', '{}/left/image_raw'.format(name)), - ('/right', '{}/right/image_raw'.format(name))] + mono_args = [ + "--size", + size, + "--square", + square, + "--camera_name", + "oak/rgb", + "--no-service-check", + ] + stereo_args = [ + "--size", + size, + "--square", + square, + "--camera_name", + "/rgb", + "--approximate", + "0.1", + "--camera_name", + "oak", + "left_camera", + "left", + "right_camera", + "right", + "--no-service-check", + ] + mono_remappings = [("/image", "{}/rgb/image_raw".format(name))] + stereo_remappings = [ + ("/left", "{}/left/image_raw".format(name)), + ("/right", "{}/right/image_raw".format(name)), + ] args = [] remappings = [] if type == "mono": @@ -44,9 +62,9 @@ def launch_setup(context, *args, **kwargs): return [ IncludeLaunchDescription( PythonLaunchDescriptionSource( - os.path.join(depthai_prefix, 'launch', 'camera.launch.py')), - launch_arguments={"name": name, - "params_file": params_file}.items() + os.path.join(depthai_prefix, "launch", "camera.launch.py") + ), + launch_arguments={"name": name, "params_file": params_file}.items(), ), Node( name="calibrator", @@ -54,8 +72,8 @@ def launch_setup(context, *args, **kwargs): package="camera_calibration", executable="cameracalibrator", arguments=args, - remappings=remappings - ) + remappings=remappings, + ), ] @@ -66,7 +84,10 @@ def generate_launch_description(): DeclareLaunchArgument("size", default_value="8x6"), DeclareLaunchArgument("square", default_value="0.108"), DeclareLaunchArgument("type", default_value="mono"), - DeclareLaunchArgument("params_file", default_value=os.path.join(depthai_prefix, 'config', 'calibration.yaml')), + DeclareLaunchArgument( + "params_file", + default_value=os.path.join(depthai_prefix, "config", "calibration.yaml"), + ), ] return LaunchDescription( diff --git a/depthai_ros_driver/launch/camera.launch.py b/depthai_ros_driver/launch/camera.launch.py index c152ea8d..5f3a8d87 100644 --- a/depthai_ros_driver/launch/camera.launch.py +++ b/depthai_ros_driver/launch/camera.launch.py @@ -2,116 +2,280 @@ from ament_index_python.packages import get_package_share_directory from launch import LaunchDescription -from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription, OpaqueFunction +from launch.actions import ( + DeclareLaunchArgument, + IncludeLaunchDescription, + OpaqueFunction, +) +from launch.conditions import IfCondition from launch.launch_description_sources import PythonLaunchDescriptionSource from launch.substitutions import LaunchConfiguration -from launch.conditions import IfCondition -from launch_ros.actions import ComposableNodeContainer, Node +from launch_ros.actions import ComposableNodeContainer, LoadComposableNodes, Node from launch_ros.descriptions import ComposableNode +def is_launch_config_true(context, name): + return LaunchConfiguration(name).perform(context) == "true" + + +def setup_launch_prefix(context, *args, **kwargs): + use_gdb = LaunchConfiguration("use_gdb", default="false") + use_valgrind = LaunchConfiguration("use_valgrind", default="false") + use_perf = LaunchConfiguration("use_perf", default="false") + + launch_prefix = "" + + if use_gdb.perform(context) == "true": + launch_prefix += "xterm -e gdb -ex run --args" + if use_valgrind.perform(context) == "true": + launch_prefix += "valgrind --tool=callgrind" + if use_perf.perform(context) == "true": + launch_prefix += ( + "perf record -g --call-graph dwarf --output=perf.out.node_name.data --" + ) + + return launch_prefix + + def launch_setup(context, *args, **kwargs): - log_level = 'info' - if(context.environment.get('DEPTHAI_DEBUG')=='1'): - log_level='debug' + log_level = "info" + if context.environment.get("DEPTHAI_DEBUG") == "1": + log_level = "debug" - + urdf_launch_dir = os.path.join( + get_package_share_directory("depthai_descriptions"), "launch" + ) - urdf_launch_dir = os.path.join(get_package_share_directory('depthai_descriptions'), 'launch') - + parent_frame = LaunchConfiguration( + "parent_frame", default="oak-d-base-frame" + ).perform(context) + cam_pos_x = LaunchConfiguration("cam_pos_x", default="0.0") + cam_pos_y = LaunchConfiguration("cam_pos_y", default="0.0") + cam_pos_z = LaunchConfiguration("cam_pos_z", default="0.0") + cam_roll = LaunchConfiguration("cam_roll", default="0.0") + cam_pitch = LaunchConfiguration("cam_pitch", default="0.0") + cam_yaw = LaunchConfiguration("cam_yaw", default="0.0") + use_composition = LaunchConfiguration("rsp_use_composition", default="true") + imu_from_descr = LaunchConfiguration("imu_from_descr", default="false") + publish_tf_from_calibration = LaunchConfiguration( + "publish_tf_from_calibration", default="false" + ) + override_cam_model = LaunchConfiguration("override_cam_model", default="false") params_file = LaunchConfiguration("params_file") - camera_model = LaunchConfiguration('camera_model', default = 'OAK-D') - - name = LaunchConfiguration('name').perform(context) - - parent_frame = LaunchConfiguration('parent_frame', default = 'oak-d-base-frame') - cam_pos_x = LaunchConfiguration('cam_pos_x', default = '0.0') - cam_pos_y = LaunchConfiguration('cam_pos_y', default = '0.0') - cam_pos_z = LaunchConfiguration('cam_pos_z', default = '0.0') - cam_roll = LaunchConfiguration('cam_roll', default = '0.0') - cam_pitch = LaunchConfiguration('cam_pitch', default = '0.0') - cam_yaw = LaunchConfiguration('cam_yaw', default = '0.0') - use_composition = LaunchConfiguration('rsp_use_composition', default='true') - imu_from_descr = LaunchConfiguration('imu_from_descr', default='false') - publish_tf_from_calibration = LaunchConfiguration('publish_tf_from_calibration', default='false') - override_cam_model = LaunchConfiguration('override_cam_model', default='false') + camera_model = LaunchConfiguration("camera_model", default="OAK-D") + rs_compat = LaunchConfiguration("rs_compat", default="false") + pointcloud_enable = LaunchConfiguration("pointcloud.enable", default="false") + rectify_rgb = LaunchConfiguration("rectify_rgb", default="true") + namespace = LaunchConfiguration("namespace", default="").perform(context) + name = LaunchConfiguration("name").perform(context) + + # If RealSense compatibility is enabled, we need to override some parameters, topics and node names + parameter_overrides = {} + color_sens_name = "rgb" + stereo_sens_name = "stereo" + points_topic_name = f"{name}/points" + if pointcloud_enable.perform(context) == "true": + parameter_overrides = { + "pipeline_gen": {"i_enable_sync": True}, + "rgb": {"i_synced": True}, + "stereo": {"i_synced": True}, + } + depth_topic_suffix = "image_raw" + if rs_compat.perform(context) == "true": + depth_topic_suffix = "image_rect_raw" + depth_profile = LaunchConfiguration("depth_module.depth_profile").perform( + context + ) + color_profile = LaunchConfiguration("rgb_camera.color_profile").perform(context) + infra_profile = LaunchConfiguration("depth_module.infra_profile").perform( + context + ) + # split profile string (0,0,0 or 0x0x0 or 0X0X0) into with (int) height(int) and fps(double) + # find delimiter + delimiter = "," + if "x" in depth_profile: + delimiter = "x" + elif "X" in depth_profile: + delimiter = "X" + depth_profile = depth_profile.split(delimiter) + color_profile = color_profile.split(delimiter) + infra_profile = infra_profile.split(delimiter) + + color_sens_name = "color" + stereo_sens_name = "depth" + if name == "oak": + name = "camera" + points_topic_name = f"{name}/depth/color/points" + if namespace == "": + namespace = "camera" + if parent_frame == "oak-d-base-frame": + parent_frame = f"{name}_link" + parameter_overrides = { + "camera": { + "i_rs_compat": True, + }, + "pipeline_gen": { + "i_enable_sync": True, + }, + "color": { + "i_publish_topic": is_launch_config_true(context, "enable_color"), + "i_synced": True, + "i_width": int(color_profile[0]), + "i_height": int(color_profile[1]), + "i_fps": float(color_profile[2]), + }, + "depth": { + "i_publish_topic": is_launch_config_true(context, "enable_depth"), + "i_synced": True, + "i_width": int(depth_profile[0]), + "i_height": int(depth_profile[1]), + "i_fps": float(depth_profile[2]), + }, + "infra1": { + "i_width": int(infra_profile[0]), + "i_height": int(infra_profile[1]), + "i_fps": float(infra_profile[2]), + }, + "infra2": { + "i_width": int(infra_profile[0]), + "i_height": int(infra_profile[1]), + "i_fps": float(infra_profile[2]), + }, + } + parameter_overrides["depth"] = { + "i_publish_left_rect": True, + "i_publish_right_rect": True, + } tf_params = {} - if(publish_tf_from_calibration.perform(context) == 'true'): - cam_model = '' - if override_cam_model.perform(context) == 'true': + if publish_tf_from_calibration.perform(context) == "true": + cam_model = "" + if override_cam_model.perform(context) == "true": cam_model = camera_model.perform(context) - tf_params = {'camera': { - 'i_publish_tf_from_calibration': True, - 'i_tf_tf_prefix': name, - 'i_tf_camera_model': cam_model, - 'i_tf_base_frame': name, - 'i_tf_parent_frame': parent_frame.perform(context), - 'i_tf_cam_pos_x': cam_pos_x.perform(context), - 'i_tf_cam_pos_y': cam_pos_y.perform(context), - 'i_tf_cam_pos_z': cam_pos_z.perform(context), - 'i_tf_cam_roll': cam_roll.perform(context), - 'i_tf_cam_pitch': cam_pitch.perform(context), - 'i_tf_cam_yaw': cam_yaw.perform(context), - 'i_tf_imu_from_descr': imu_from_descr.perform(context), - } + tf_params = { + "camera": { + "i_publish_tf_from_calibration": True, + "i_tf_tf_prefix": name, + "i_tf_camera_model": cam_model, + "i_tf_base_frame": name, + "i_tf_parent_frame": parent_frame, + "i_tf_cam_pos_x": cam_pos_x.perform(context), + "i_tf_cam_pos_y": cam_pos_y.perform(context), + "i_tf_cam_pos_z": cam_pos_z.perform(context), + "i_tf_cam_roll": cam_roll.perform(context), + "i_tf_cam_pitch": cam_pitch.perform(context), + "i_tf_cam_yaw": cam_yaw.perform(context), + "i_tf_imu_from_descr": imu_from_descr.perform(context), + } } - - use_gdb = LaunchConfiguration('use_gdb', default = 'false') - use_valgrind = LaunchConfiguration('use_valgrind', default = 'false') - use_perf = LaunchConfiguration('use_perf', default = 'false') - launch_prefix = '' + launch_prefix = setup_launch_prefix(context) - if (use_gdb.perform(context) == 'true'): - launch_prefix += "gdb -ex run --args " - if (use_valgrind.perform(context) == 'true'): - launch_prefix += "valgrind --tool=callgrind" - if (use_perf.perform(context) == 'true'): - launch_prefix += "perf record -g --call-graph dwarf --output=perf.out.node_name.data --" return [ - Node( - condition=IfCondition(LaunchConfiguration("use_rviz").perform(context)), - package="rviz2", - executable="rviz2", - name="rviz2", - output="log", - arguments=["-d", LaunchConfiguration("rviz_config")], - ), + Node( + condition=IfCondition(LaunchConfiguration("use_rviz").perform(context)), + package="rviz2", + executable="rviz2", + name="rviz2", + output="log", + arguments=["-d", LaunchConfiguration("rviz_config")], + ), IncludeLaunchDescription( PythonLaunchDescriptionSource( - os.path.join(urdf_launch_dir, 'urdf_launch.py')), - launch_arguments={'tf_prefix': name, - 'camera_model': camera_model, - 'base_frame': name, - 'parent_frame': parent_frame, - 'cam_pos_x': cam_pos_x, - 'cam_pos_y': cam_pos_y, - 'cam_pos_z': cam_pos_z, - 'cam_roll': cam_roll, - 'cam_pitch': cam_pitch, - 'cam_yaw': cam_yaw, - 'use_composition': use_composition, - 'use_base_descr': publish_tf_from_calibration}.items()), - + os.path.join(urdf_launch_dir, "urdf_launch.py") + ), + launch_arguments={ + "namespace": namespace, + "tf_prefix": name, + "camera_model": camera_model, + "base_frame": name, + "parent_frame": parent_frame, + "cam_pos_x": cam_pos_x, + "cam_pos_y": cam_pos_y, + "cam_pos_z": cam_pos_z, + "cam_roll": cam_roll, + "cam_pitch": cam_pitch, + "cam_yaw": cam_yaw, + "use_composition": use_composition, + "use_base_descr": publish_tf_from_calibration, + "rs_compat": rs_compat, + }.items(), + ), ComposableNodeContainer( - name=name+"_container", - namespace="", + name=f"{name}_container", + namespace=namespace, package="rclcpp_components", executable="component_container", composable_node_descriptions=[ - ComposableNode( - package="depthai_ros_driver", - plugin="depthai_ros_driver::Camera", - name=name, - parameters=[params_file, tf_params], - ) + ComposableNode( + package="depthai_ros_driver", + plugin="depthai_ros_driver::Camera", + name=name, + namespace=namespace, + parameters=[ + params_file, + tf_params, + parameter_overrides, + ], + ) ], - arguments=['--ros-args', '--log-level', log_level], + arguments=["--ros-args", "--log-level", log_level], prefix=[launch_prefix], output="both", ), - + LoadComposableNodes( + condition=IfCondition(rectify_rgb), + target_container=f"{namespace}/{name}_container", + composable_node_descriptions=[ + ComposableNode( + package="image_proc", + plugin="image_proc::RectifyNode", + name="rectify_color_node", + namespace=namespace, + remappings=[ + ("image", f"{name}/{color_sens_name}/image_raw"), + ("camera_info", f"{name}/{color_sens_name}/camera_info"), + ("image_rect", f"{name}/{color_sens_name}/image_rect"), + ( + "image_rect/compressed", + f"{name}/{color_sens_name}/image_rect/compressed", + ), + ( + "image_rect/compressedDepth", + f"{name}/{color_sens_name}/image_rect/compressedDepth", + ), + ( + "image_rect/theora", + f"{name}/{color_sens_name}/image_rect/theora", + ), + ], + ) + ], + ), + LoadComposableNodes( + condition=IfCondition(pointcloud_enable), + target_container=f"{namespace}/{name}_container", + composable_node_descriptions=[ + ComposableNode( + package="depth_image_proc", + plugin="depth_image_proc::PointCloudXyzrgbNode", + name="point_cloud_xyzrgb_node", + namespace=namespace, + remappings=[ + ( + "depth_registered/image_rect", + f"{name}/{stereo_sens_name}/{depth_topic_suffix}", + ), + ( + "rgb/image_rect_color", + f"{name}/{color_sens_name}/image_rect", + ), + ("rgb/camera_info", f"{name}/{color_sens_name}/camera_info"), + ("points", points_topic_name), + ], + ), + ], + ), ] @@ -120,29 +284,59 @@ def generate_launch_description(): declared_arguments = [ DeclareLaunchArgument("name", default_value="oak"), + DeclareLaunchArgument("namespace", default_value=""), DeclareLaunchArgument("parent_frame", default_value="oak-d-base-frame"), - DeclareLaunchArgument("camera_model", default_value="OAK-D"), + DeclareLaunchArgument("camera_model", default_value="OAK-D-PRO"), DeclareLaunchArgument("cam_pos_x", default_value="0.0"), DeclareLaunchArgument("cam_pos_y", default_value="0.0"), DeclareLaunchArgument("cam_pos_z", default_value="0.0"), DeclareLaunchArgument("cam_roll", default_value="0.0"), DeclareLaunchArgument("cam_pitch", default_value="0.0"), DeclareLaunchArgument("cam_yaw", default_value="0.0"), - DeclareLaunchArgument("params_file", default_value=os.path.join(depthai_prefix, 'config', 'camera.yaml')), - DeclareLaunchArgument("use_rviz", default_value='false'), - DeclareLaunchArgument("rviz_config", default_value=os.path.join(depthai_prefix, "config", "rviz", "rgbd.rviz")), - DeclareLaunchArgument("rsp_use_composition", default_value='true'), - DeclareLaunchArgument("publish_tf_from_calibration", default_value='false', description='Enables TF publishing from camera calibration file.'), - DeclareLaunchArgument("imu_from_descr", default_value='false', description='Enables IMU publishing from URDF.'), - DeclareLaunchArgument("override_cam_model", default_value='false', description='Overrides camera model from calibration file.'), - DeclareLaunchArgument("use_gdb", default_value='false'), - DeclareLaunchArgument("use_valgrind", default_value='false'), - DeclareLaunchArgument("use_perf", default_value='false') + DeclareLaunchArgument( + "params_file", + default_value=os.path.join(depthai_prefix, "config", "camera.yaml"), + ), + DeclareLaunchArgument("use_rviz", default_value="false"), + DeclareLaunchArgument( + "rviz_config", + default_value=os.path.join(depthai_prefix, "config", "rviz", "rgbd.rviz"), + ), + DeclareLaunchArgument("rsp_use_composition", default_value="true"), + DeclareLaunchArgument( + "publish_tf_from_calibration", + default_value="false", + description="Enables TF publishing from camera calibration file.", + ), + DeclareLaunchArgument( + "imu_from_descr", + default_value="false", + description="Enables IMU publishing from URDF.", + ), + DeclareLaunchArgument( + "override_cam_model", + default_value="false", + description="Overrides camera model from calibration file.", + ), + DeclareLaunchArgument("use_gdb", default_value="false"), + DeclareLaunchArgument("use_valgrind", default_value="false"), + DeclareLaunchArgument("use_perf", default_value="false"), + DeclareLaunchArgument( + "rs_compat", + default_value="false", + description="Enables compatibility with RealSense nodes.", + ), + DeclareLaunchArgument("rectify_rgb", default_value="true"), + DeclareLaunchArgument("pointcloud.enable", default_value="false"), + DeclareLaunchArgument("enable_color", default_value="true"), + DeclareLaunchArgument("enable_depth", default_value="true"), + DeclareLaunchArgument("enable_infra1", default_value="false"), + DeclareLaunchArgument("enable_infra2", default_value="false"), + DeclareLaunchArgument("depth_module.depth_profile", default_value="1280,720,30"), + DeclareLaunchArgument("rgb_camera.color_profile", default_value="1280,720,30"), + DeclareLaunchArgument("depth_module.infra_profile", default_value="1280,720,30"), ] return LaunchDescription( declared_arguments + [OpaqueFunction(function=launch_setup)] ) - - - diff --git a/depthai_ros_driver/launch/camera_as_part_of_a_robot.launch.py b/depthai_ros_driver/launch/camera_as_part_of_a_robot.launch.py new file mode 100644 index 00000000..e28794a6 --- /dev/null +++ b/depthai_ros_driver/launch/camera_as_part_of_a_robot.launch.py @@ -0,0 +1,161 @@ +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +from launch.actions import ( + DeclareLaunchArgument, + IncludeLaunchDescription, + OpaqueFunction, +) +from launch.conditions import IfCondition +from launch.launch_description_sources import PythonLaunchDescriptionSource +from launch.substitutions import LaunchConfiguration +from launch_ros.actions import ComposableNodeContainer, LoadComposableNodes, Node +from launch_ros.descriptions import ComposableNode + + +def launch_setup(context, *args, **kwargs): + log_level = "info" + if context.environment.get("DEPTHAI_DEBUG") == "1": + log_level = "debug" + + params_file = LaunchConfiguration("params_file") + depthai_prefix = get_package_share_directory("depthai_ros_driver") + + name = LaunchConfiguration("name").perform(context) + rgb_topic_name = name + "/rgb/image_raw" + if LaunchConfiguration("rectify_rgb").perform(context) == "true": + rgb_topic_name = name + "/rgb/image_rect" + + parent_frame = LaunchConfiguration("parent_frame", default="oak-d-base-frame") + cam_pos_x = LaunchConfiguration("cam_pos_x", default="0.0") + cam_pos_y = LaunchConfiguration("cam_pos_y", default="0.0") + cam_pos_z = LaunchConfiguration("cam_pos_z", default="0.0") + cam_roll = LaunchConfiguration("cam_roll", default="0.0") + cam_pitch = LaunchConfiguration("cam_pitch", default="0.0") + cam_yaw = LaunchConfiguration("cam_yaw", default="0.0") + use_composition = LaunchConfiguration("rsp_use_composition", default="true") + imu_from_descr = LaunchConfiguration("imu_from_descr", default="false") + publish_tf_from_calibration = LaunchConfiguration( + "publish_tf_from_calibration", default="false" + ) + override_cam_model = LaunchConfiguration("override_cam_model", default="false") + + tf_params = {} + if publish_tf_from_calibration.perform(context) == "true": + cam_model = "" + if override_cam_model.perform(context) == "true": + cam_model = camera_model.perform(context) + tf_params = { + "camera": { + "i_publish_tf_from_calibration": True, + "i_tf_tf_prefix": name, + "i_tf_camera_model": cam_model, + "i_tf_base_frame": name, + "i_tf_parent_frame": parent_frame.perform(context), + "i_tf_cam_pos_x": cam_pos_x.perform(context), + "i_tf_cam_pos_y": cam_pos_y.perform(context), + "i_tf_cam_pos_z": cam_pos_z.perform(context), + "i_tf_cam_roll": cam_roll.perform(context), + "i_tf_cam_pitch": cam_pitch.perform(context), + "i_tf_cam_yaw": cam_yaw.perform(context), + "i_tf_imu_from_descr": imu_from_descr.perform(context), + } + } + + return [ + ComposableNodeContainer( + name=name + "_container", + namespace="", + package="rclcpp_components", + executable="component_container", + composable_node_descriptions=[ + ComposableNode( + package="depthai_ros_driver", + plugin="depthai_ros_driver::Camera", + name=name, + parameters=[params_file, tf_params], + ) + ], + arguments=["--ros-args", "--log-level", log_level], + output="both", + ), + LoadComposableNodes( + condition=IfCondition(LaunchConfiguration("rectify_rgb")), + target_container=name + "_container", + composable_node_descriptions=[ + ComposableNode( + package="image_proc", + plugin="image_proc::RectifyNode", + name=name + "_rectify_color_node", + remappings=[ + ("image", name + "/rgb/image_raw"), + ("camera_info", name + "/rgb/camera_info"), + ("image_rect", name + "/rgb/image_rect"), + ("image_rect/compressed", name + "/rgb/image_rect/compressed"), + ( + "image_rect/compressedDepth", + name + "/rgb/image_rect/compressedDepth", + ), + ("image_rect/theora", name + "/rgb/image_rect/theora"), + ], + ) + ], + ), + LoadComposableNodes( + target_container=name + "_container", + composable_node_descriptions=[ + ComposableNode( + package="depth_image_proc", + plugin="depth_image_proc::PointCloudXyzrgbNode", + name=name + "_point_cloud_xyzrgb_node", + remappings=[ + ("depth_registered/image_rect", name + "/stereo/image_raw"), + ("rgb/image_rect_color", rgb_topic_name), + ("rgb/camera_info", name + "/rgb/camera_info"), + ("points", name + "/points"), + ], + ), + ], + ), + ] + + +def generate_launch_description(): + depthai_prefix = get_package_share_directory("depthai_ros_driver") + declared_arguments = [ + DeclareLaunchArgument("name", default_value="oak"), + DeclareLaunchArgument("camera_model", default_value="OAK-D"), + DeclareLaunchArgument("parent_frame", default_value="oak-d-base-frame"), + DeclareLaunchArgument("cam_pos_x", default_value="0.0"), + DeclareLaunchArgument("cam_pos_y", default_value="0.0"), + DeclareLaunchArgument("cam_pos_z", default_value="0.0"), + DeclareLaunchArgument("cam_roll", default_value="0.0"), + DeclareLaunchArgument("cam_pitch", default_value="0.0"), + DeclareLaunchArgument("cam_yaw", default_value="0.0"), + DeclareLaunchArgument( + "params_file", + default_value=os.path.join(depthai_prefix, "config", "rgbd.yaml"), + ), + DeclareLaunchArgument("rectify_rgb", default_value="False"), + DeclareLaunchArgument("rsp_use_composition", default_value="true"), + DeclareLaunchArgument( + "publish_tf_from_calibration", + default_value="false", + description="Enables TF publishing from camera calibration file.", + ), + DeclareLaunchArgument( + "imu_from_descr", + default_value="false", + description="Enables IMU publishing from URDF.", + ), + DeclareLaunchArgument( + "override_cam_model", + default_value="false", + description="Overrides camera model from calibration file.", + ), + ] + + return LaunchDescription( + declared_arguments + [OpaqueFunction(function=launch_setup)] + ) diff --git a/depthai_ros_driver/launch/example_multicam.launch.py b/depthai_ros_driver/launch/example_multicam.launch.py index f9258642..73615bb6 100644 --- a/depthai_ros_driver/launch/example_multicam.launch.py +++ b/depthai_ros_driver/launch/example_multicam.launch.py @@ -3,47 +3,54 @@ from ament_index_python.packages import get_package_share_directory from launch import LaunchDescription from launch.actions import ( - IncludeLaunchDescription, DeclareLaunchArgument, + IncludeLaunchDescription, OpaqueFunction, ) from launch.launch_description_sources import PythonLaunchDescriptionSource from launch_ros.actions import Node - def launch_setup(context, *args, **kwargs): depthai_prefix = get_package_share_directory("depthai_ros_driver") params_file = os.path.join(depthai_prefix, "config", "multicam_example.yaml") cams = ["oak_d_w", "oak_d_lite"] nodes = [] - i=0.0 + i = 0.0 for cam_name in cams: node = IncludeLaunchDescription( PythonLaunchDescriptionSource( - os.path.join(depthai_prefix, 'launch', 'camera.launch.py')), - launch_arguments={"name": cam_name, - "parent_frame": "map", - "params_file": params_file, - "cam_pos_y": str(i)}.items()) + os.path.join(depthai_prefix, "launch", "camera.launch.py") + ), + launch_arguments={ + "name": cam_name, + "parent_frame": "map", + "params_file": params_file, + "cam_pos_y": str(i), + }.items(), + ) nodes.append(node) - i=i+0.1 + i = i + 0.1 spatial_rgbd = IncludeLaunchDescription( PythonLaunchDescriptionSource( - os.path.join(depthai_prefix, 'launch', - 'rgbd_pcl.launch.py') + os.path.join(depthai_prefix, "launch", "rgbd_pcl.launch.py") ), - launch_arguments={"name": "oak_d_pro", - "parent_frame": "map", - "params_file": params_file, - "cam_pos_y": str(-0.1)}.items()) + launch_arguments={ + "name": "oak_d_pro", + "parent_frame": "map", + "params_file": params_file, + "cam_pos_y": str(-0.1), + }.items(), + ) obj_det = Node( package="depthai_ros_driver", executable="obj_pub.py", - remappings=[("/oak/nn/detections", "/oak_d_pro/nn/detections"), - ("/oak/nn/detection_markers", "/oak_d_pro/nn/detection_markers")] + remappings=[ + ("/oak/nn/detections", "/oak_d_pro/nn/detections"), + ("/oak/nn/detection_markers", "/oak_d_pro/nn/detection_markers"), + ], ) nodes.append(spatial_rgbd) diff --git a/depthai_ros_driver/launch/example_segmentation.launch.py b/depthai_ros_driver/launch/example_segmentation.launch.py index 00b3c869..d97fb3cb 100644 --- a/depthai_ros_driver/launch/example_segmentation.launch.py +++ b/depthai_ros_driver/launch/example_segmentation.launch.py @@ -2,13 +2,17 @@ from ament_index_python.packages import get_package_share_directory from launch import LaunchDescription -from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription, OpaqueFunction +from launch.actions import ( + DeclareLaunchArgument, + IncludeLaunchDescription, + OpaqueFunction, +) from launch.launch_description_sources import PythonLaunchDescriptionSource from launch.substitutions import LaunchConfiguration def launch_setup(context, *args, **kwargs): - name = LaunchConfiguration('name').perform(context) + name = LaunchConfiguration("name").perform(context) depthai_prefix = get_package_share_directory("depthai_ros_driver") rviz_config = os.path.join(depthai_prefix, "config", "rviz", "segmentation.rviz") @@ -17,11 +21,15 @@ def launch_setup(context, *args, **kwargs): return [ IncludeLaunchDescription( PythonLaunchDescriptionSource( - os.path.join(depthai_prefix, 'launch', 'camera.launch.py')), - launch_arguments={"name": name, - "params_file": params_file, - "use_rviz": LaunchConfiguration("use_rviz"), - "rviz_config": rviz_config}.items()) + os.path.join(depthai_prefix, "launch", "camera.launch.py") + ), + launch_arguments={ + "name": name, + "params_file": params_file, + "use_rviz": LaunchConfiguration("use_rviz"), + "rviz_config": rviz_config, + }.items(), + ) ] @@ -29,7 +37,10 @@ def generate_launch_description(): depthai_prefix = get_package_share_directory("depthai_ros_driver") declared_arguments = [ DeclareLaunchArgument("name", default_value="oak"), - DeclareLaunchArgument("params_file", default_value=os.path.join(depthai_prefix, 'config', 'segmentation.yaml')), + DeclareLaunchArgument( + "params_file", + default_value=os.path.join(depthai_prefix, "config", "segmentation.yaml"), + ), DeclareLaunchArgument("use_rviz", default_value="False"), ] diff --git a/depthai_ros_driver/launch/pointcloud.launch.py b/depthai_ros_driver/launch/pointcloud.launch.py index b1e14448..26e138fb 100644 --- a/depthai_ros_driver/launch/pointcloud.launch.py +++ b/depthai_ros_driver/launch/pointcloud.launch.py @@ -2,7 +2,11 @@ from ament_index_python.packages import get_package_share_directory from launch import LaunchDescription -from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription, OpaqueFunction +from launch.actions import ( + DeclareLaunchArgument, + IncludeLaunchDescription, + OpaqueFunction, +) from launch.launch_description_sources import PythonLaunchDescriptionSource from launch.substitutions import LaunchConfiguration from launch_ros.actions import LoadComposableNodes @@ -13,37 +17,40 @@ def launch_setup(context, *args, **kwargs): params_file = LaunchConfiguration("params_file") depthai_prefix = get_package_share_directory("depthai_ros_driver") - name = LaunchConfiguration('name').perform(context) - - return [ + name = LaunchConfiguration("name").perform(context) + return [ IncludeLaunchDescription( PythonLaunchDescriptionSource( - os.path.join(depthai_prefix, 'launch', 'camera.launch.py')), - launch_arguments={"name": name, - "params_file": params_file, - "parent_frame": LaunchConfiguration("parent_frame"), - "cam_pos_x": LaunchConfiguration("cam_pos_x"), - "cam_pos_y": LaunchConfiguration("cam_pos_y"), - "cam_pos_z": LaunchConfiguration("cam_pos_z"), - "cam_roll": LaunchConfiguration("cam_roll"), - "cam_pitch": LaunchConfiguration("cam_pitch"), - "cam_yaw": LaunchConfiguration("cam_yaw"), - "use_rviz": LaunchConfiguration("use_rviz") - }.items()), - + os.path.join(depthai_prefix, "launch", "camera.launch.py") + ), + launch_arguments={ + "name": name, + "params_file": params_file, + "parent_frame": LaunchConfiguration("parent_frame"), + "cam_pos_x": LaunchConfiguration("cam_pos_x"), + "cam_pos_y": LaunchConfiguration("cam_pos_y"), + "cam_pos_z": LaunchConfiguration("cam_pos_z"), + "cam_roll": LaunchConfiguration("cam_roll"), + "cam_pitch": LaunchConfiguration("cam_pitch"), + "cam_yaw": LaunchConfiguration("cam_yaw"), + "use_rviz": LaunchConfiguration("use_rviz"), + }.items(), + ), LoadComposableNodes( - target_container=name+"_container", + target_container=name + "_container", composable_node_descriptions=[ - ComposableNode( - package='depth_image_proc', - plugin='depth_image_proc::PointCloudXyziNode', - name='point_cloud_xyzi', - remappings=[('depth/image_rect', name+'/stereo/image_raw'), - ('intensity/image_rect', name+'/right/image_rect'), - ('intensity/camera_info', name+'/stereo/camera_info'), - ('points', name+'/points') - ]), + ComposableNode( + package="depth_image_proc", + plugin="depth_image_proc::PointCloudXyziNode", + name="point_cloud_xyzi", + remappings=[ + ("depth/image_rect", name + "/stereo/image_raw"), + ("intensity/image_rect", name + "/right/image_rect"), + ("intensity/camera_info", name + "/stereo/camera_info"), + ("points", name + "/points"), + ], + ), ], ), ] @@ -60,7 +67,10 @@ def generate_launch_description(): DeclareLaunchArgument("cam_roll", default_value="0.0"), DeclareLaunchArgument("cam_pitch", default_value="0.0"), DeclareLaunchArgument("cam_yaw", default_value="0.0"), - DeclareLaunchArgument("params_file", default_value=os.path.join(depthai_prefix, 'config', 'pcl.yaml')), + DeclareLaunchArgument( + "params_file", + default_value=os.path.join(depthai_prefix, "config", "pcl.yaml"), + ), DeclareLaunchArgument("use_rviz", default_value="False"), ] diff --git a/depthai_ros_driver/launch/rgbd_pcl.launch.py b/depthai_ros_driver/launch/rgbd_pcl.launch.py index 6d8b17c6..ceea2467 100644 --- a/depthai_ros_driver/launch/rgbd_pcl.launch.py +++ b/depthai_ros_driver/launch/rgbd_pcl.launch.py @@ -2,67 +2,39 @@ from ament_index_python.packages import get_package_share_directory from launch import LaunchDescription -from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription, OpaqueFunction +from launch.actions import ( + DeclareLaunchArgument, + IncludeLaunchDescription, + OpaqueFunction, +) from launch.launch_description_sources import PythonLaunchDescriptionSource from launch.substitutions import LaunchConfiguration -from launch_ros.actions import LoadComposableNodes, Node -from launch_ros.descriptions import ComposableNode -from launch.conditions import IfCondition def launch_setup(context, *args, **kwargs): params_file = LaunchConfiguration("params_file") depthai_prefix = get_package_share_directory("depthai_ros_driver") - name = LaunchConfiguration('name').perform(context) - rgb_topic_name = name+'/rgb/image_raw' - if LaunchConfiguration('rectify_rgb').perform(context)=='true': - rgb_topic_name = name +'/rgb/image_rect' + name = LaunchConfiguration("name").perform(context) return [ IncludeLaunchDescription( PythonLaunchDescriptionSource( - os.path.join(depthai_prefix, 'launch', 'camera.launch.py')), - launch_arguments={"name": name, - "params_file": params_file, - "parent_frame": LaunchConfiguration("parent_frame"), - "cam_pos_x": LaunchConfiguration("cam_pos_x"), - "cam_pos_y": LaunchConfiguration("cam_pos_y"), - "cam_pos_z": LaunchConfiguration("cam_pos_z"), - "cam_roll": LaunchConfiguration("cam_roll"), - "cam_pitch": LaunchConfiguration("cam_pitch"), - "cam_yaw": LaunchConfiguration("cam_yaw"), - "use_rviz": LaunchConfiguration("use_rviz") - }.items()), - - LoadComposableNodes( - condition=IfCondition(LaunchConfiguration("rectify_rgb")), - target_container=name+"_container", - composable_node_descriptions=[ - ComposableNode( - package="image_proc", - plugin="image_proc::RectifyNode", - name="rectify_color_node", - remappings=[('image', name+'/rgb/image_raw'), - ('camera_info', name+'/rgb/camera_info'), - ('image_rect', name+'/rgb/image_rect'), - ('image_rect/compressed', name+'/rgb/image_rect/compressed'), - ('image_rect/compressedDepth', name+'/rgb/image_rect/compressedDepth'), - ('image_rect/theora', name+'/rgb/image_rect/theora')] - ) - ]), - LoadComposableNodes( - target_container=name+"_container", - composable_node_descriptions=[ - ComposableNode( - package='depth_image_proc', - plugin='depth_image_proc::PointCloudXyzrgbNode', - name='point_cloud_xyzrgb_node', - remappings=[('depth_registered/image_rect', name+'/stereo/image_raw'), - ('rgb/image_rect_color', rgb_topic_name), - ('rgb/camera_info', name+'/rgb/camera_info'), - ('points', name+'/points')] - ), - ], + os.path.join(depthai_prefix, "launch", "camera.launch.py") + ), + launch_arguments={ + "name": name, + "params_file": params_file, + "parent_frame": LaunchConfiguration("parent_frame"), + "cam_pos_x": LaunchConfiguration("cam_pos_x"), + "cam_pos_y": LaunchConfiguration("cam_pos_y"), + "cam_pos_z": LaunchConfiguration("cam_pos_z"), + "cam_roll": LaunchConfiguration("cam_roll"), + "cam_pitch": LaunchConfiguration("cam_pitch"), + "cam_yaw": LaunchConfiguration("cam_yaw"), + "use_rviz": LaunchConfiguration("use_rviz"), + "pointcloud.enable": "true", + "rs_compat": LaunchConfiguration("rs_compat"), + }.items(), ), ] @@ -79,9 +51,13 @@ def generate_launch_description(): DeclareLaunchArgument("cam_roll", default_value="0.0"), DeclareLaunchArgument("cam_pitch", default_value="0.0"), DeclareLaunchArgument("cam_yaw", default_value="0.0"), - DeclareLaunchArgument("params_file", default_value=os.path.join(depthai_prefix, 'config', 'rgbd.yaml')), + DeclareLaunchArgument( + "params_file", + default_value=os.path.join(depthai_prefix, "config", "rgbd.yaml"), + ), DeclareLaunchArgument("use_rviz", default_value="False"), DeclareLaunchArgument("rectify_rgb", default_value="False"), + DeclareLaunchArgument("rs_compat", default_value="False"), ] return LaunchDescription( diff --git a/depthai_ros_driver/launch/rtabmap.launch.py b/depthai_ros_driver/launch/rtabmap.launch.py index bd36584a..7e6e2f6f 100644 --- a/depthai_ros_driver/launch/rtabmap.launch.py +++ b/depthai_ros_driver/launch/rtabmap.launch.py @@ -2,18 +2,23 @@ from ament_index_python.packages import get_package_share_directory from launch import LaunchDescription -from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription, OpaqueFunction +from launch.actions import ( + DeclareLaunchArgument, + IncludeLaunchDescription, + OpaqueFunction, +) from launch.conditions import IfCondition from launch.launch_description_sources import PythonLaunchDescriptionSource from launch.substitutions import LaunchConfiguration from launch_ros.actions import LoadComposableNodes, Node from launch_ros.descriptions import ComposableNode + def launch_setup(context, *args, **kwargs): - name = LaunchConfiguration('name').perform(context) + name = LaunchConfiguration("name").perform(context) depthai_prefix = get_package_share_directory("depthai_ros_driver") - params_file= LaunchConfiguration("params_file") + params_file = LaunchConfiguration("params_file") parameters = [ { "frame_id": name, @@ -26,61 +31,42 @@ def launch_setup(context, *args, **kwargs): ] remappings = [ - ("rgb/image", name+"/rgb/image_rect"), - ("rgb/camera_info", name+"/rgb/camera_info"), - ("depth/image", name+"/stereo/image_raw"), + ("rgb/image", name + "/rgb/image_rect"), + ("rgb/camera_info", name + "/rgb/camera_info"), + ("depth/image", name + "/stereo/image_raw"), ] return [ IncludeLaunchDescription( PythonLaunchDescriptionSource( - os.path.join(depthai_prefix, 'launch', 'camera.launch.py')), - launch_arguments={"name": name, - "params_file": params_file}.items()), - - LoadComposableNodes( - condition=IfCondition(LaunchConfiguration("rectify_rgb")), - target_container=name+"_container", - composable_node_descriptions=[ - ComposableNode( - package="image_proc", - plugin="image_proc::RectifyNode", - name="rectify_color_node", - remappings=[('image', name+'/rgb/image_raw'), - ('camera_info', name+'/rgb/camera_info'), - ('image_rect', name+'/rgb/image_rect'), - ('image_rect/compressed', name+'/rgb/image_rect/compressed'), - ('image_rect/compressedDepth', name+'/rgb/image_rect/compressedDepth'), - ('image_rect/theora', name+'/rgb/image_rect/theora')] - ) - ]), - + os.path.join(depthai_prefix, "launch", "camera.launch.py") + ), + launch_arguments={"name": name, "params_file": params_file}.items(), + ), LoadComposableNodes( - target_container=name+"_container", + target_container=name + "_container", composable_node_descriptions=[ ComposableNode( - package='rtabmap_odom', - plugin='rtabmap_odom::RGBDOdometry', - name='rgbd_odometry', + package="rtabmap_odom", + plugin="rtabmap_odom::RGBDOdometry", + name="rgbd_odometry", parameters=parameters, remappings=remappings, ), ], ), - LoadComposableNodes( - target_container=name+"_container", + target_container=name + "_container", composable_node_descriptions=[ ComposableNode( - package='rtabmap_slam', - plugin='rtabmap_slam::CoreWrapper', - name='rtabmap', + package="rtabmap_slam", + plugin="rtabmap_slam::CoreWrapper", + name="rtabmap", parameters=parameters, remappings=remappings, ), ], ), - Node( package="rtabmap_viz", executable="rtabmap_viz", @@ -95,8 +81,10 @@ def generate_launch_description(): depthai_prefix = get_package_share_directory("depthai_ros_driver") declared_arguments = [ DeclareLaunchArgument("name", default_value="oak"), - DeclareLaunchArgument("params_file", default_value=os.path.join(depthai_prefix, 'config', 'rgbd.yaml')), - DeclareLaunchArgument("rectify_rgb", default_value="True"), + DeclareLaunchArgument( + "params_file", + default_value=os.path.join(depthai_prefix, "config", "rgbd.yaml"), + ), ] return LaunchDescription( diff --git a/depthai_ros_driver/launch/sr_poe_rgbd_pcl.launch.py b/depthai_ros_driver/launch/sr_poe_rgbd_pcl.launch.py new file mode 100644 index 00000000..c2b107d6 --- /dev/null +++ b/depthai_ros_driver/launch/sr_poe_rgbd_pcl.launch.py @@ -0,0 +1,86 @@ + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription, OpaqueFunction +from launch.launch_description_sources import PythonLaunchDescriptionSource +from launch.substitutions import LaunchConfiguration +from launch_ros.actions import LoadComposableNodes, Node +from launch_ros.descriptions import ComposableNode +from launch.conditions import IfCondition + + +def launch_setup(context, *args, **kwargs): + params_file = LaunchConfiguration("params_file") + depthai_prefix = get_package_share_directory("depthai_ros_driver") + + name = LaunchConfiguration('name').perform(context) + return [ + IncludeLaunchDescription( + PythonLaunchDescriptionSource( + os.path.join(depthai_prefix, 'launch', 'camera.launch.py')), + launch_arguments={"name": name, + "params_file": params_file, + "parent_frame": LaunchConfiguration("parent_frame"), + "cam_pos_x": LaunchConfiguration("cam_pos_x"), + "cam_pos_y": LaunchConfiguration("cam_pos_y"), + "cam_pos_z": LaunchConfiguration("cam_pos_z"), + "cam_roll": LaunchConfiguration("cam_roll"), + "cam_pitch": LaunchConfiguration("cam_pitch"), + "cam_yaw": LaunchConfiguration("cam_yaw"), + "use_rviz": LaunchConfiguration("use_rviz"), + "rectify_rgb": "false" + }.items()), + + LoadComposableNodes( + target_container=name+"_container", + composable_node_descriptions=[ + ComposableNode( + package="image_proc", + plugin="image_proc::RectifyNode", + name="rectify_color_node_right", + remappings=[('image', name+'/right/image_raw'), + ('camera_info', name+'/right/camera_info'), + ('image_rect', name+'/right/image_rect'), + ('image_rect/compressed', name+'/right/image_rect/compressed'), + ('image_rect/compressedDepth', name+'/right/image_rect/compressedDepth'), + ('image_rect/theora', name+'/right/image_rect/theora')] + ) + ]), + LoadComposableNodes( + target_container=name+"_container", + composable_node_descriptions=[ + ComposableNode( + package='depth_image_proc', + plugin='depth_image_proc::PointCloudXyzrgbNode', + name='point_cloud_xyzright_node', + remappings=[('depth_registered/image_rect', name+'/tof/image_raw'), + ('rgb/image_rect_color', name+'/right/image_rect'), + ('rgb/camera_info', name+'/right/camera_info'), + ('points', name+'/points')] + ), + ], + ), + ] + + +def generate_launch_description(): + depthai_prefix = get_package_share_directory("depthai_ros_driver") + declared_arguments = [ + DeclareLaunchArgument("name", default_value="oak"), + DeclareLaunchArgument("camera_model", default_value="OAK-D-SR-POE"), + DeclareLaunchArgument("parent_frame", default_value="oak-d-base-frame"), + DeclareLaunchArgument("cam_pos_x", default_value="0.0"), + DeclareLaunchArgument("cam_pos_y", default_value="0.0"), + DeclareLaunchArgument("cam_pos_z", default_value="0.0"), + DeclareLaunchArgument("cam_roll", default_value="0.0"), + DeclareLaunchArgument("cam_pitch", default_value="0.0"), + DeclareLaunchArgument("cam_yaw", default_value="0.0"), + DeclareLaunchArgument("params_file", default_value=os.path.join(depthai_prefix, 'config', 'sr_poe_rgbd.yaml')), + DeclareLaunchArgument("use_rviz", default_value="False"), + ] + + return LaunchDescription( + declared_arguments + [OpaqueFunction(function=launch_setup)] + ) diff --git a/depthai_ros_driver/launch/sr_rgbd_pcl.launch.py b/depthai_ros_driver/launch/sr_rgbd_pcl.launch.py index 32c0035a..15110b63 100644 --- a/depthai_ros_driver/launch/sr_rgbd_pcl.launch.py +++ b/depthai_ros_driver/launch/sr_rgbd_pcl.launch.py @@ -2,66 +2,83 @@ from ament_index_python.packages import get_package_share_directory from launch import LaunchDescription -from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription, OpaqueFunction +from launch.actions import ( + DeclareLaunchArgument, + IncludeLaunchDescription, + OpaqueFunction, +) +from launch.conditions import IfCondition from launch.launch_description_sources import PythonLaunchDescriptionSource from launch.substitutions import LaunchConfiguration from launch_ros.actions import LoadComposableNodes, Node from launch_ros.descriptions import ComposableNode -from launch.conditions import IfCondition def launch_setup(context, *args, **kwargs): params_file = LaunchConfiguration("params_file") depthai_prefix = get_package_share_directory("depthai_ros_driver") - name = LaunchConfiguration('name').perform(context) - rgb_topic_name = name+'/right/image_raw' - if LaunchConfiguration('rectify_rgb').perform(context)=='true': - rgb_topic_name = name +'/right/image_rect' + name = LaunchConfiguration("name").perform(context) + rgb_topic_name = name + "/right/image_raw" + if LaunchConfiguration("rectify_rgb").perform(context) == "true": + rgb_topic_name = name + "/right/image_rect" return [ IncludeLaunchDescription( PythonLaunchDescriptionSource( - os.path.join(depthai_prefix, 'launch', 'camera.launch.py')), - launch_arguments={"name": name, - "params_file": params_file, - "parent_frame": LaunchConfiguration("parent_frame"), - "cam_pos_x": LaunchConfiguration("cam_pos_x"), - "cam_pos_y": LaunchConfiguration("cam_pos_y"), - "cam_pos_z": LaunchConfiguration("cam_pos_z"), - "cam_roll": LaunchConfiguration("cam_roll"), - "cam_pitch": LaunchConfiguration("cam_pitch"), - "cam_yaw": LaunchConfiguration("cam_yaw"), - "use_rviz": LaunchConfiguration("use_rviz") - }.items()), - + os.path.join(depthai_prefix, "launch", "camera.launch.py") + ), + launch_arguments={ + "name": name, + "params_file": params_file, + "parent_frame": LaunchConfiguration("parent_frame"), + "cam_pos_x": LaunchConfiguration("cam_pos_x"), + "cam_pos_y": LaunchConfiguration("cam_pos_y"), + "cam_pos_z": LaunchConfiguration("cam_pos_z"), + "cam_roll": LaunchConfiguration("cam_roll"), + "cam_pitch": LaunchConfiguration("cam_pitch"), + "cam_yaw": LaunchConfiguration("cam_yaw"), + "use_rviz": LaunchConfiguration("use_rviz"), + }.items(), + ), LoadComposableNodes( condition=IfCondition(LaunchConfiguration("rectify_rgb")), - target_container=name+"_container", + target_container=name + "_container", composable_node_descriptions=[ - ComposableNode( - package="image_proc", - plugin="image_proc::RectifyNode", - name="rectify_color_node", - remappings=[('image', name+'/right/image_raw'), - ('camera_info', name+'/right/camera_info'), - ('image_rect', name+'/right/image_rect'), - ('image_rect/compressed', name+'/right/image_rect/compressed'), - ('image_rect/compressedDepth', name+'/right/image_rect/compressedDepth'), - ('image_rect/theora', name+'/right/image_rect/theora')] - ) - ]), + ComposableNode( + package="image_proc", + plugin="image_proc::RectifyNode", + name="rectify_color_node", + remappings=[ + ("image", name + "/right/image_raw"), + ("camera_info", name + "/right/camera_info"), + ("image_rect", name + "/right/image_rect"), + ( + "image_rect/compressed", + name + "/right/image_rect/compressed", + ), + ( + "image_rect/compressedDepth", + name + "/right/image_rect/compressedDepth", + ), + ("image_rect/theora", name + "/right/image_rect/theora"), + ], + ) + ], + ), LoadComposableNodes( - target_container=name+"_container", + target_container=name + "_container", composable_node_descriptions=[ - ComposableNode( - package='depth_image_proc', - plugin='depth_image_proc::PointCloudXyzrgbNode', - name='point_cloud_xyzrgb_node', - remappings=[('depth_registered/image_rect', name+'/stereo/image_raw'), - ('rgb/image_rect_color', rgb_topic_name), - ('rgb/camera_info', name+'/right/camera_info'), - ('points', name+'/points')] - ), + ComposableNode( + package="depth_image_proc", + plugin="depth_image_proc::PointCloudXyzrgbNode", + name="point_cloud_xyzrgb_node", + remappings=[ + ("depth_registered/image_rect", name + "/stereo/image_raw"), + ("rgb/image_rect_color", rgb_topic_name), + ("rgb/camera_info", name + "/right/camera_info"), + ("points", name + "/points"), + ], + ), ], ), ] @@ -79,7 +96,10 @@ def generate_launch_description(): DeclareLaunchArgument("cam_roll", default_value="0.0"), DeclareLaunchArgument("cam_pitch", default_value="0.0"), DeclareLaunchArgument("cam_yaw", default_value="0.0"), - DeclareLaunchArgument("params_file", default_value=os.path.join(depthai_prefix, 'config', 'sr_rgbd.yaml')), + DeclareLaunchArgument( + "params_file", + default_value=os.path.join(depthai_prefix, "config", "sr_rgbd.yaml"), + ), DeclareLaunchArgument("use_rviz", default_value="False"), DeclareLaunchArgument("rectify_rgb", default_value="False"), ] diff --git a/depthai_ros_driver/launch/stereo_from_rosbag.launch.py b/depthai_ros_driver/launch/stereo_from_rosbag.launch.py index e01233ae..d82d4438 100644 --- a/depthai_ros_driver/launch/stereo_from_rosbag.launch.py +++ b/depthai_ros_driver/launch/stereo_from_rosbag.launch.py @@ -2,31 +2,37 @@ from ament_index_python.packages import get_package_share_directory from launch import LaunchDescription -from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription, OpaqueFunction, ExecuteProcess +from launch.actions import ( + DeclareLaunchArgument, + ExecuteProcess, + IncludeLaunchDescription, + OpaqueFunction, +) from launch.launch_description_sources import PythonLaunchDescriptionSource from launch.substitutions import LaunchConfiguration def launch_setup(context, *args, **kwargs): - name = LaunchConfiguration('name').perform(context) + name = LaunchConfiguration("name").perform(context) depthai_prefix = get_package_share_directory("depthai_ros_driver") rosbag_path = LaunchConfiguration("rosbag_path").perform(context) rviz_config = os.path.join(depthai_prefix, "config", "rviz", "rgbd.rviz") - params_file= LaunchConfiguration("params_file") + params_file = LaunchConfiguration("params_file") return [ IncludeLaunchDescription( PythonLaunchDescriptionSource( - os.path.join(depthai_prefix, 'launch', 'camera.launch.py')), - launch_arguments={"name": name, - "params_file": params_file, - "use_rviz": LaunchConfiguration("use_rviz"), - "rviz_config": rviz_config}.items()), - ExecuteProcess( - cmd=['ros2', 'bag', 'play', '-l', rosbag_path], - output='screen' - ) + os.path.join(depthai_prefix, "launch", "camera.launch.py") + ), + launch_arguments={ + "name": name, + "params_file": params_file, + "use_rviz": LaunchConfiguration("use_rviz"), + "rviz_config": rviz_config, + }.items(), + ), + ExecuteProcess(cmd=["ros2", "bag", "play", "-l", rosbag_path], output="screen"), ] @@ -34,9 +40,14 @@ def generate_launch_description(): depthai_prefix = get_package_share_directory("depthai_ros_driver") declared_arguments = [ DeclareLaunchArgument("name", default_value="oak"), - DeclareLaunchArgument("params_file", default_value=os.path.join(depthai_prefix, 'config', 'stereo_from_rosbag.yaml')), + DeclareLaunchArgument( + "params_file", + default_value=os.path.join( + depthai_prefix, "config", "stereo_from_rosbag.yaml" + ), + ), DeclareLaunchArgument("use_rviz", default_value="True"), - DeclareLaunchArgument("rosbag_path", default_value="") + DeclareLaunchArgument("rosbag_path", default_value=""), ] return LaunchDescription( diff --git a/depthai_ros_driver/package.xml b/depthai_ros_driver/package.xml index f34b3597..553755f9 100644 --- a/depthai_ros_driver/package.xml +++ b/depthai_ros_driver/package.xml @@ -2,10 +2,9 @@ depthai_ros_driver - 2.9.0 + 2.10.0 Depthai ROS Monolithic node. Adam Serafin - Sachin Guruswamy MIT @@ -30,6 +29,7 @@ pluginlib diagnostic_updater diagnostic_msgs + ffmpeg_image_transport_msgs ament_cmake diff --git a/depthai_ros_driver/plugins.xml b/depthai_ros_driver/plugins.xml index 1dfd7d40..1de38c58 100644 --- a/depthai_ros_driver/plugins.xml +++ b/depthai_ros_driver/plugins.xml @@ -17,4 +17,16 @@ CamArray Pipeline. - \ No newline at end of file + + Depth + ToF Pipeline. + + + Stereo + ToF Pipeline. + + + ToF Pipeline. + + + RGB + ToF Pipeline. + + diff --git a/depthai_ros_driver/src/camera.cpp b/depthai_ros_driver/src/camera.cpp index 3e690a7b..4ca0da14 100644 --- a/depthai_ros_driver/src/camera.cpp +++ b/depthai_ros_driver/src/camera.cpp @@ -2,19 +2,23 @@ #include -#include "ament_index_cpp/get_package_share_directory.hpp" #include "depthai/device/Device.hpp" #include "depthai/pipeline/Pipeline.hpp" #include "depthai_bridge/TFPublisher.hpp" #include "depthai_ros_driver/pipeline/pipeline_generator.hpp" -#include "diagnostic_msgs/msg/diagnostic_status.hpp" +#include "diagnostic_msgs/msg/diagnostic_array.hpp" namespace depthai_ros_driver { Camera::Camera(const rclcpp::NodeOptions& options) : rclcpp::Node("camera", options) { - ph = std::make_unique(this, "camera"); - ph->declareParams(); - onConfigure(); + // Since we cannot use shared_from this before the object is initialized, we need to use a timer to start the device. + startTimer = this->create_wall_timer(std::chrono::seconds(1), [this]() { + ph = std::make_unique(shared_from_this(), "camera"); + ph->declareParams(); + onConfigure(); + startTimer->cancel(); + }); + rclcpp::on_shutdown([this]() { stop(); }); } Camera::~Camera() = default; void Camera::onConfigure() { @@ -36,7 +40,7 @@ void Camera::onConfigure() { } if(ph->getParam("i_publish_tf_from_calibration")) { - tfPub = std::make_unique(this, + tfPub = std::make_unique(shared_from_this(), device->readCalibration(), device->getConnectedCameraFeatures(), ph->getParam("i_tf_camera_name"), @@ -51,17 +55,18 @@ void Camera::onConfigure() { ph->getParam("i_tf_cam_yaw"), ph->getParam("i_tf_imu_from_descr"), ph->getParam("i_tf_custom_urdf_location"), - ph->getParam("i_tf_custom_xacro_args")); + ph->getParam("i_tf_custom_xacro_args"), + ph->getParam("i_rs_compat")); } diagSub = this->create_subscription("/diagnostics", 10, std::bind(&Camera::diagCB, this, std::placeholders::_1)); - RCLCPP_INFO(this->get_logger(), "Camera ready!"); + RCLCPP_INFO(get_logger(), "Camera ready!"); } void Camera::diagCB(const diagnostic_msgs::msg::DiagnosticArray::SharedPtr msg) { for(const auto& status : msg->status) { if(status.name == get_name() + std::string(": sys_logger")) { if(status.level == diagnostic_msgs::msg::DiagnosticStatus::ERROR) { - RCLCPP_ERROR(this->get_logger(), "Camera diagnostics error: %s", status.message.c_str()); + RCLCPP_ERROR(get_logger(), "Camera diagnostics error: %s", status.message.c_str()); if(ph->getParam("i_restart_on_diagnostics_error")) { restart(); }; @@ -71,16 +76,16 @@ void Camera::diagCB(const diagnostic_msgs::msg::DiagnosticArray::SharedPtr msg) } void Camera::start() { - RCLCPP_INFO(this->get_logger(), "Starting camera."); + RCLCPP_INFO(get_logger(), "Starting camera."); if(!camRunning) { onConfigure(); } else { - RCLCPP_INFO(this->get_logger(), "Camera already running!."); + RCLCPP_INFO(get_logger(), "Camera already running!."); } } void Camera::stop() { - RCLCPP_INFO(this->get_logger(), "Stopping camera."); + RCLCPP_INFO(get_logger(), "Stopping camera."); if(camRunning) { for(const auto& node : daiNodes) { node->closeQueues(); @@ -90,18 +95,18 @@ void Camera::stop() { pipeline.reset(); camRunning = false; } else { - RCLCPP_INFO(this->get_logger(), "Camera already stopped!"); + RCLCPP_INFO(get_logger(), "Camera already stopped!"); } } void Camera::restart() { - RCLCPP_ERROR(this->get_logger(), "Restarting camera"); + RCLCPP_ERROR(get_logger(), "Restarting camera"); stop(); start(); if(camRunning) { return; } else { - RCLCPP_ERROR(this->get_logger(), "Restarting camera failed."); + RCLCPP_ERROR(get_logger(), "Restarting camera failed."); } } @@ -109,13 +114,13 @@ void Camera::saveCalib() { auto calibHandler = device->readCalibration(); std::stringstream savePath; savePath << "/tmp/" << device->getMxId().c_str() << "_calibration.json"; - RCLCPP_INFO(this->get_logger(), "Saving calibration to: %s", savePath.str().c_str()); + RCLCPP_INFO(get_logger(), "Saving calibration to: %s", savePath.str().c_str()); calibHandler.eepromToJsonFile(savePath.str()); auto json = calibHandler.eepromToJson(); } void Camera::loadCalib(const std::string& path) { - RCLCPP_INFO(this->get_logger(), "Reading calibration from: %s", path.c_str()); + RCLCPP_INFO(get_logger(), "Reading calibration from: %s", path.c_str()); dai::CalibrationHandler cH(path); pipeline->setCalibrationData(cH); } @@ -128,7 +133,7 @@ void Camera::saveCalibCB(const Trigger::Request::SharedPtr /*req*/, Trigger::Res void Camera::savePipeline() { std::stringstream savePath; savePath << "/tmp/" << device->getMxId().c_str() << "_pipeline.json"; - RCLCPP_INFO(this->get_logger(), "Saving pipeline schema to: %s", savePath.str().c_str()); + RCLCPP_INFO(get_logger(), "Saving pipeline schema to: %s", savePath.str().c_str()); std::ofstream file(savePath.str()); file << pipeline->serializeToJson()["pipeline"]; file.close(); @@ -151,15 +156,15 @@ void Camera::getDeviceType() { pipeline = std::make_shared(); startDevice(); auto name = device->getDeviceName(); - RCLCPP_INFO(this->get_logger(), "Device type: %s", name.c_str()); + RCLCPP_INFO(get_logger(), "Device type: %s", name.c_str()); for(auto& sensor : device->getCameraSensorNames()) { - RCLCPP_DEBUG(this->get_logger(), "Socket %d - %s", static_cast(sensor.first), sensor.second.c_str()); + RCLCPP_DEBUG(get_logger(), "Socket %d - %s", static_cast(sensor.first), sensor.second.c_str()); } auto ir_drivers = device->getIrDrivers(); if(ir_drivers.empty()) { - RCLCPP_DEBUG(this->get_logger(), "Device has no IR drivers"); + RCLCPP_DEBUG(get_logger(), "Device has no IR drivers"); } else { - RCLCPP_DEBUG(this->get_logger(), "IR Drivers present"); + RCLCPP_DEBUG(get_logger(), "IR Drivers present"); } } @@ -168,8 +173,8 @@ void Camera::createPipeline() { if(!ph->getParam("i_external_calibration_path").empty()) { loadCalib(ph->getParam("i_external_calibration_path")); } - daiNodes = generator->createPipeline( - this, device, pipeline, ph->getParam("i_pipeline_type"), ph->getParam("i_nn_type"), ph->getParam("i_enable_imu")); + daiNodes = + generator->createPipeline(shared_from_this(), device, pipeline, ph->getParam("i_pipeline_type"), ph->getParam("i_nn_type")); if(ph->getParam("i_pipeline_dump")) { savePipeline(); } @@ -192,18 +197,25 @@ void Camera::startDevice() { auto usb_id = ph->getParam("i_usb_port_id"); try { if(mxid.empty() && ip.empty() && usb_id.empty()) { - RCLCPP_INFO(this->get_logger(), "No ip/mxid specified, connecting to the next available device."); + RCLCPP_INFO(get_logger(), "No ip/mxid specified, connecting to the next available device."); device = std::make_shared(); camRunning = true; } else { std::vector availableDevices = dai::Device::getAllAvailableDevices(); if(availableDevices.size() == 0) { - throw std::runtime_error("No devices detected!"); + // autodiscovery might not work so try connecting via IP directly if set + if(!ip.empty()) { + dai::DeviceInfo info(ip); + RCLCPP_INFO(this->get_logger(), "No devices detected by autodiscovery, trying to connect to camera via IP: %s", ip.c_str()); + availableDevices.push_back(info); + } else { + throw std::runtime_error("No devices detected!"); + } } dai::UsbSpeed speed = ph->getUSBSpeed(); for(const auto& info : availableDevices) { if(!mxid.empty() && info.getMxId() == mxid) { - RCLCPP_INFO(this->get_logger(), "Connecting to the camera using mxid: %s", mxid.c_str()); + RCLCPP_INFO(get_logger(), "Connecting to the camera using mxid: %s", mxid.c_str()); if(info.state == X_LINK_UNBOOTED || info.state == X_LINK_BOOTLOADER) { device = std::make_shared(info, speed); camRunning = true; @@ -211,15 +223,15 @@ void Camera::startDevice() { throw std::runtime_error("Device is already booted in different process."); } } else if(!ip.empty() && info.name == ip) { - RCLCPP_INFO(this->get_logger(), "Connecting to the camera using ip: %s", ip.c_str()); + RCLCPP_INFO(get_logger(), "Connecting to the camera using ip: %s", ip.c_str()); if(info.state == X_LINK_UNBOOTED || info.state == X_LINK_BOOTLOADER) { device = std::make_shared(info); camRunning = true; } else if(info.state == X_LINK_BOOTED) { - throw std::runtime_error("Device is already booted in different process."); + throw std::runtime_error("Device is already booted in different process..."); } } else if(!usb_id.empty() && info.name == usb_id) { - RCLCPP_INFO(this->get_logger(), "Connecting to the camera using USB ID: %s", usb_id.c_str()); + RCLCPP_INFO(get_logger(), "Connecting to the camera using USB ID: %s", usb_id.c_str()); if(info.state == X_LINK_UNBOOTED || info.state == X_LINK_BOOTLOADER) { device = std::make_shared(info, speed); camRunning = true; @@ -227,33 +239,43 @@ void Camera::startDevice() { throw std::runtime_error("Device is already booted in different process."); } } else { - RCLCPP_INFO(this->get_logger(), "Ignoring device info: MXID: %s, Name: %s", info.getMxId().c_str(), info.name.c_str()); + RCLCPP_INFO(get_logger(), "Ignoring device info: MXID: %s, Name: %s", info.getMxId().c_str(), info.name.c_str()); } } } } catch(const std::runtime_error& e) { - RCLCPP_ERROR(this->get_logger(), "%s", e.what()); + RCLCPP_ERROR(get_logger(), "%s", e.what()); } r.sleep(); } - RCLCPP_INFO(this->get_logger(), "Camera with MXID: %s and Name: %s connected!", device->getMxId().c_str(), device->getDeviceInfo().name.c_str()); + RCLCPP_INFO(get_logger(), "Camera with MXID: %s and Name: %s connected!", device->getMxId().c_str(), device->getDeviceInfo().name.c_str()); auto protocol = device->getDeviceInfo().getXLinkDeviceDesc().protocol; if(protocol != XLinkProtocol_t::X_LINK_TCP_IP) { auto speed = usbStrings[static_cast(device->getUsbSpeed())]; - RCLCPP_INFO(this->get_logger(), "USB SPEED: %s", speed.c_str()); + RCLCPP_INFO(get_logger(), "USB SPEED: %s", speed.c_str()); } else { - RCLCPP_INFO(this->get_logger(), + RCLCPP_INFO(get_logger(), "PoE camera detected. Consider enabling low bandwidth for specific image topics (see " - "readme)."); + "Readme->DepthAI ROS Driver->Specific camera configurations)."); } } void Camera::setIR() { if(ph->getParam("i_enable_ir") && !device->getIrDrivers().empty()) { - device->setIrLaserDotProjectorBrightness(ph->getParam("i_laser_dot_brightness")); - device->setIrFloodLightBrightness(ph->getParam("i_floodlight_brightness")); + // Normalize laserdot brightness to 0-1 range, max value can be 1200mA + float laserdotBrightness = float(ph->getParam("i_laser_dot_brightness")); + if(laserdotBrightness > 1.0) { + laserdotBrightness = laserdotBrightness / 1200.0; + } + // Normalize floodlight brightness to 0-1 range, max value can be 1500mA + float floodlightBrightness = float(ph->getParam("i_floodlight_brightness")); + if(floodlightBrightness > 1.0) { + floodlightBrightness = floodlightBrightness / 1500.0; + } + device->setIrLaserDotProjectorIntensity(laserdotBrightness); + device->setIrFloodLightIntensity(floodlightBrightness); } } @@ -261,9 +283,17 @@ rcl_interfaces::msg::SetParametersResult Camera::parameterCB(const std::vectorgetParam("i_enable_ir") && !device->getIrDrivers().empty()) { if(p.get_name() == ph->getFullParamName("i_laser_dot_brightness")) { - device->setIrLaserDotProjectorBrightness(p.get_value()); + float laserdotBrightness = float(p.get_value()); + if(laserdotBrightness > 1.0) { + laserdotBrightness = 1200.0 / laserdotBrightness; + } + device->setIrLaserDotProjectorIntensity(laserdotBrightness); } else if(p.get_name() == ph->getFullParamName("i_floodlight_brightness")) { - device->setIrFloodLightBrightness(p.get_value()); + float floodlightBrightness = float(p.get_value()); + if(floodlightBrightness > 1.0) { + floodlightBrightness = 1500.0 / floodlightBrightness; + } + device->setIrFloodLightIntensity(floodlightBrightness); } } } @@ -277,4 +307,4 @@ rcl_interfaces::msg::SetParametersResult Camera::parameterCB(const std::vector(); + + rclcpp::spin(cam); + rclcpp::shutdown(); + return 0; +} diff --git a/depthai_ros_driver/src/dai_nodes/base_node.cpp b/depthai_ros_driver/src/dai_nodes/base_node.cpp index c57fa733..b6376bfa 100644 --- a/depthai_ros_driver/src/dai_nodes/base_node.cpp +++ b/depthai_ros_driver/src/dai_nodes/base_node.cpp @@ -3,23 +3,27 @@ #include "depthai-shared/common/CameraBoardSocket.hpp" #include "depthai/device/Device.hpp" #include "depthai/pipeline/Pipeline.hpp" +#include "depthai/pipeline/node/VideoEncoder.hpp" +#include "depthai/pipeline/node/XLinkOut.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/img_pub.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp" +#include "depthai_ros_driver/utils.hpp" #include "rclcpp/node.hpp" namespace depthai_ros_driver { namespace dai_nodes { -BaseNode::BaseNode(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr /*pipeline*/) { - setNodeName(daiNodeName); - setROSNodePointer(node); +BaseNode::BaseNode(const std::string& daiNodeName, std::shared_ptr node, std::shared_ptr /*pipeline*/) + : baseNode(node), baseDAINodeName(daiNodeName), logger(node->get_logger()) { intraProcessEnabled = node->get_node_options().use_intra_process_comms(); }; BaseNode::~BaseNode() = default; void BaseNode::setNodeName(const std::string& daiNodeName) { baseDAINodeName = daiNodeName; }; -void BaseNode::setROSNodePointer(rclcpp::Node* node) { +void BaseNode::setROSNodePointer(std::shared_ptr node) { baseNode = node; }; -rclcpp::Node* BaseNode::getROSNode() { +std::shared_ptr BaseNode::getROSNode() { return baseNode; }; std::string BaseNode::getName() { @@ -29,19 +33,53 @@ std::string BaseNode::getName() { bool BaseNode::ipcEnabled() { return intraProcessEnabled; } +rclcpp::Logger BaseNode::getLogger() { + return logger; +} + +bool BaseNode::rsCompabilityMode() { + return sensor_helpers::rsCompabilityMode(getROSNode()); +} + +std::string BaseNode::getSocketName(dai::CameraBoardSocket socket) { + return sensor_helpers::getSocketName(getROSNode(), socket); +} std::string BaseNode::getTFPrefix(const std::string& frameName) { return std::string(getROSNode()->get_name()) + "_" + frameName; } +std::string BaseNode::getOpticalTFPrefix(const std::string& frameName) { + std::string suffix = "_camera_optical_frame"; + if(sensor_helpers::rsCompabilityMode(getROSNode())) { + suffix = "_optical_frame"; + } + return getTFPrefix(frameName) + suffix; +} dai::Node::Input BaseNode::getInput(int /*linkType = 0*/) { throw(std::runtime_error("getInput() not implemented")); }; +dai::Node::Input BaseNode::getInputByName(const std::string& /*name*/) { + throw(std::runtime_error("getInputByName() not implemented")); +}; + void BaseNode::closeQueues() { throw(std::runtime_error("closeQueues() not implemented")); }; +std::shared_ptr BaseNode::setupXout(std::shared_ptr pipeline, const std::string& name) { + return utils::setupXout(pipeline, name); +}; + +std::shared_ptr BaseNode::setupOutput(std::shared_ptr pipeline, + const std::string& qName, + std::function nodeLink, + bool isSynced, + const utils::VideoEncoderConfig& encoderConfig) { + return std::make_shared(getROSNode(), pipeline, qName, nodeLink, isSynced, ipcEnabled(), encoderConfig); +}; + void BaseNode::setNames() { throw(std::runtime_error("setNames() not implemented")); }; @@ -58,8 +96,11 @@ void BaseNode::link(dai::Node::Input /*in*/, int /*linkType = 0*/) { throw(std::runtime_error("link() not implemented")); }; +std::vector> BaseNode::getPublishers() { + return std::vector>(); +}; void BaseNode::updateParams(const std::vector& /*params*/) { return; }; } // namespace dai_nodes -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/src/dai_nodes/nn/nn_wrapper.cpp b/depthai_ros_driver/src/dai_nodes/nn/nn_wrapper.cpp index 0f7561e4..b7d56f80 100644 --- a/depthai_ros_driver/src/dai_nodes/nn/nn_wrapper.cpp +++ b/depthai_ros_driver/src/dai_nodes/nn/nn_wrapper.cpp @@ -10,7 +10,10 @@ namespace depthai_ros_driver { namespace dai_nodes { -NNWrapper::NNWrapper(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr pipeline, const dai::CameraBoardSocket& socket) +NNWrapper::NNWrapper(const std::string& daiNodeName, + std::shared_ptr node, + std::shared_ptr pipeline, + const dai::CameraBoardSocket& socket) : BaseNode(daiNodeName, node, pipeline) { RCLCPP_DEBUG(node->get_logger(), "Creating node %s base", daiNodeName.c_str()); ph = std::make_unique(node, daiNodeName, socket); diff --git a/depthai_ros_driver/src/dai_nodes/nn/segmentation.cpp b/depthai_ros_driver/src/dai_nodes/nn/segmentation.cpp index 627ad259..22cd14ef 100644 --- a/depthai_ros_driver/src/dai_nodes/nn/segmentation.cpp +++ b/depthai_ros_driver/src/dai_nodes/nn/segmentation.cpp @@ -23,15 +23,18 @@ namespace depthai_ros_driver { namespace dai_nodes { namespace nn { -Segmentation::Segmentation(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr pipeline, const dai::CameraBoardSocket& socket) +Segmentation::Segmentation(const std::string& daiNodeName, + std::shared_ptr node, + std::shared_ptr pipeline, + const dai::CameraBoardSocket& socket) : BaseNode(daiNodeName, node, pipeline) { - RCLCPP_DEBUG(node->get_logger(), "Creating node %s", daiNodeName.c_str()); + RCLCPP_DEBUG(getLogger(), "Creating node %s", daiNodeName.c_str()); setNames(); segNode = pipeline->create(); imageManip = pipeline->create(); ph = std::make_unique(node, daiNodeName, socket); ph->declareParams(segNode, imageManip); - RCLCPP_DEBUG(node->get_logger(), "Node %s created", daiNodeName.c_str()); + RCLCPP_DEBUG(getLogger(), "Node %s created", daiNodeName.c_str()); imageManip->out.link(segNode->input); setXinXout(pipeline); } @@ -56,22 +59,22 @@ void Segmentation::setXinXout(std::shared_ptr pipeline) { void Segmentation::setupQueues(std::shared_ptr device) { nnQ = device->getOutputQueue(nnQName, ph->getParam("i_max_q_size"), false); - nnPub = image_transport::create_camera_publisher(getROSNode(), "~/" + getName() + "/image_raw"); + nnPub = image_transport::create_camera_publisher(getROSNode().get(), "~/" + getName() + "/image_raw"); nnQ->addCallback(std::bind(&Segmentation::segmentationCB, this, std::placeholders::_1, std::placeholders::_2)); if(ph->getParam("i_enable_passthrough")) { - auto tfPrefix = getTFPrefix(utils::getSocketName(static_cast(ph->getParam("i_board_socket_id")))); + auto tfPrefix = getOpticalTFPrefix(getSocketName(static_cast(ph->getParam("i_board_socket_id")))); ptQ = device->getOutputQueue(ptQName, ph->getParam("i_max_q_size"), false); - imageConverter = std::make_unique(tfPrefix + "_camera_optical_frame", false); + imageConverter = std::make_unique(tfPrefix, false); infoManager = std::make_shared( getROSNode()->create_sub_node(std::string(getROSNode()->get_name()) + "/" + getName()).get(), "/" + getName()); infoManager->setCameraInfo(sensor_helpers::getCalibInfo(getROSNode()->get_logger(), - *imageConverter, + imageConverter, device, dai::CameraBoardSocket::CAM_A, imageManip->initialConfig.getResizeWidth(), imageManip->initialConfig.getResizeWidth())); - ptPub = image_transport::create_camera_publisher(getROSNode(), "~/" + getName() + "/passthrough/image_raw"); + ptPub = image_transport::create_camera_publisher(getROSNode().get(), "~/" + getName() + "/passthrough/image_raw"); ptQ->addCallback(std::bind(sensor_helpers::basicCameraPub, std::placeholders::_1, std::placeholders::_2, *imageConverter, ptPub, infoManager)); } } @@ -94,7 +97,8 @@ void Segmentation::segmentationCB(const std::string& /*name*/, const std::shared sensor_msgs::msg::Image img_msg; std_msgs::msg::Header header; header.stamp = getROSNode()->get_clock()->now(); - header.frame_id = std::string(getROSNode()->get_name()) + "_rgb_camera_optical_frame"; + auto tfPrefix = getOpticalTFPrefix(getSocketName(static_cast(ph->getParam("i_board_socket_id")))); + header.frame_id = tfPrefix; nnInfo.header = header; imgBridge = cv_bridge::CvImage(header, sensor_msgs::image_encodings::BGR8, cv_frame); imgBridge.toImageMsg(img_msg); @@ -133,4 +137,4 @@ void Segmentation::updateParams(const std::vector& params) { } } // namespace nn } // namespace dai_nodes -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/src/dai_nodes/nn/spatial_nn_wrapper.cpp b/depthai_ros_driver/src/dai_nodes/nn/spatial_nn_wrapper.cpp index 938cd6fa..16443492 100644 --- a/depthai_ros_driver/src/dai_nodes/nn/spatial_nn_wrapper.cpp +++ b/depthai_ros_driver/src/dai_nodes/nn/spatial_nn_wrapper.cpp @@ -10,7 +10,7 @@ namespace depthai_ros_driver { namespace dai_nodes { SpatialNNWrapper::SpatialNNWrapper(const std::string& daiNodeName, - rclcpp::Node* node, + std::shared_ptr node, std::shared_ptr pipeline, const dai::CameraBoardSocket& socket) : BaseNode(daiNodeName, node, pipeline) { diff --git a/depthai_ros_driver/src/dai_nodes/sensors/feature_tracker.cpp b/depthai_ros_driver/src/dai_nodes/sensors/feature_tracker.cpp index a66eedaf..5af9c212 100644 --- a/depthai_ros_driver/src/dai_nodes/sensors/feature_tracker.cpp +++ b/depthai_ros_driver/src/dai_nodes/sensors/feature_tracker.cpp @@ -13,16 +13,16 @@ namespace depthai_ros_driver { namespace dai_nodes { -FeatureTracker::FeatureTracker(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr pipeline) +FeatureTracker::FeatureTracker(const std::string& daiNodeName, std::shared_ptr node, std::shared_ptr pipeline) : BaseNode(daiNodeName, node, pipeline) { - RCLCPP_DEBUG(node->get_logger(), "Creating node %s", daiNodeName.c_str()); + RCLCPP_DEBUG(getLogger(), "Creating node %s", daiNodeName.c_str()); getParentName(daiNodeName); setNames(); featureNode = pipeline->create(); ph = std::make_unique(node, daiNodeName); ph->declareParams(featureNode); setXinXout(pipeline); - RCLCPP_DEBUG(node->get_logger(), "Node %s created", daiNodeName.c_str()); + RCLCPP_DEBUG(getLogger(), "Node %s created", daiNodeName.c_str()); } FeatureTracker::~FeatureTracker() = default; diff --git a/depthai_ros_driver/src/dai_nodes/sensors/img_pub.cpp b/depthai_ros_driver/src/dai_nodes/sensors/img_pub.cpp new file mode 100644 index 00000000..70f9fb28 --- /dev/null +++ b/depthai_ros_driver/src/dai_nodes/sensors/img_pub.cpp @@ -0,0 +1,225 @@ +#include "depthai_ros_driver/dai_nodes/sensors/img_pub.hpp" + +#include + +#include "camera_info_manager/camera_info_manager.hpp" +#include "depthai/device/Device.hpp" +#include "depthai/pipeline/Pipeline.hpp" +#include "depthai/pipeline/node/VideoEncoder.hpp" +#include "depthai/pipeline/node/XLinkOut.hpp" +#include "depthai_bridge/ImageConverter.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp" +#include "depthai_ros_driver/utils.hpp" +#include "image_transport/image_transport.hpp" + +namespace depthai_ros_driver { +namespace dai_nodes { +namespace sensor_helpers { +ImagePublisher::ImagePublisher(std::shared_ptr node, + std::shared_ptr pipeline, + const std::string& qName, + std::function linkFunc, + bool synced, + bool ipcEnabled, + const utils::VideoEncoderConfig& encoderConfig) + : node(node), encConfig(encoderConfig), qName(qName), ipcEnabled(ipcEnabled), synced(synced) { + if(!synced) { + xout = utils::setupXout(pipeline, qName); + } + + linkCB = linkFunc; + if(encoderConfig.enabled) { + encoder = createEncoder(pipeline, encoderConfig); + linkFunc(encoder->input); + + if(!synced) { + if(encoderConfig.profile == dai::VideoEncoderProperties::Profile::MJPEG) { + encoder->bitstream.link(xout->input); + } else { + encoder->out.link(xout->input); + } + } else { + if(encoderConfig.profile == dai::VideoEncoderProperties::Profile::MJPEG) { + linkCB = [&](dai::Node::Input input) { encoder->bitstream.link(input); }; + } else { + linkCB = [&](dai::Node::Input input) { encoder->out.link(input); }; + } + } + } else { + if(!synced) { + linkFunc(xout->input); + } + } +} +void ImagePublisher::setup(std::shared_ptr device, const utils::ImgConverterConfig& convConf, const utils::ImgPublisherConfig& pubConf) { + convConfig = convConf; + pubConfig = pubConf; + createImageConverter(device); + createInfoManager(device); + if(pubConfig.topicName.empty()) { + throw std::runtime_error("Topic name cannot be empty!"); + } + rclcpp::PublisherOptions pubOptions; + pubOptions.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); + if(pubConfig.publishCompressed) { + if(encConfig.profile == dai::VideoEncoderProperties::Profile::MJPEG) { + compressedImgPub = + node->create_publisher(pubConfig.topicName + pubConfig.compressedTopicSuffix, rclcpp::QoS(10), pubOptions); + } else { + ffmpegPub = node->create_publisher( + pubConfig.topicName + pubConfig.compressedTopicSuffix, rclcpp::QoS(10), pubOptions); + } + infoPub = node->create_publisher(pubConfig.topicName + "/camera_info", rclcpp::QoS(10), pubOptions); + } else if(ipcEnabled) { + imgPub = node->create_publisher(pubConfig.topicName + pubConfig.topicSuffix, rclcpp::QoS(10), pubOptions); + infoPub = node->create_publisher(pubConfig.topicName + "/camera_info", rclcpp::QoS(10), pubOptions); + } else { + imgPubIT = image_transport::create_camera_publisher(node.get(), pubConfig.topicName + pubConfig.topicSuffix); + } + if(!synced) { + dataQ = device->getOutputQueue(getQueueName(), pubConf.maxQSize, pubConf.qBlocking); + addQueueCB(dataQ); + } +} + +void ImagePublisher::createImageConverter(std::shared_ptr device) { + converter = std::make_shared(convConfig.tfPrefix, convConfig.interleaved, convConfig.getBaseDeviceTimestamp); + converter->setUpdateRosBaseTimeOnToRosMsg(convConfig.updateROSBaseTimeOnRosMsg); + if(convConfig.lowBandwidth) { + converter->convertFromBitstream(convConfig.encoding); + } + if(convConfig.addExposureOffset) { + converter->addExposureOffset(convConfig.expOffset); + } + if(convConfig.reverseSocketOrder) { + converter->reverseStereoSocketOrder(); + } + if(convConfig.alphaScalingEnabled) { + converter->setAlphaScaling(convConfig.alphaScaling); + } + if(convConfig.outputDisparity) { + auto calHandler = device->readCalibration(); + double baseline = calHandler.getBaselineDistance(pubConfig.leftSocket, pubConfig.rightSocket, false); + if(convConfig.reverseSocketOrder) { + baseline = calHandler.getBaselineDistance(pubConfig.rightSocket, pubConfig.leftSocket, false); + } + converter->convertDispToDepth(baseline); + } + converter->setFFMPEGEncoding(convConfig.ffmpegEncoder); +} + +std::shared_ptr ImagePublisher::createEncoder(std::shared_ptr pipeline, + const utils::VideoEncoderConfig& encoderConfig) { + auto enc = pipeline->create(); + enc->setQuality(encoderConfig.quality); + enc->setProfile(encoderConfig.profile); + enc->setBitrate(encoderConfig.bitrate); + enc->setKeyframeFrequency(encoderConfig.frameFreq); + return enc; +} +void ImagePublisher::createInfoManager(std::shared_ptr device) { + infoManager = std::make_shared( + node->create_sub_node(std::string(node->get_name()) + "/" + pubConfig.daiNodeName).get(), "/" + pubConfig.daiNodeName + pubConfig.infoMgrSuffix); + if(pubConfig.calibrationFile.empty()) { + auto info = sensor_helpers::getCalibInfo(node->get_logger(), converter, device, pubConfig.socket, pubConfig.width, pubConfig.height); + if(pubConfig.rectified) { + std::fill(info.d.begin(), info.d.end(), 0.0); + std::fill(info.k.begin(), info.k.end(), 0.0); + info.r[0] = info.r[4] = info.r[8] = 1.0; + } + infoManager->setCameraInfo(info); + } else { + infoManager->loadCameraInfo(pubConfig.calibrationFile); + } +}; +ImagePublisher::~ImagePublisher() { + closeQueue(); +}; + +void ImagePublisher::closeQueue() { + if(dataQ) dataQ->close(); +} +void ImagePublisher::link(dai::Node::Input in) { + linkCB(in); +} +std::shared_ptr ImagePublisher::getQueue() { + return dataQ; +} +void ImagePublisher::addQueueCB(const std::shared_ptr& queue) { + dataQ = queue; + qName = queue->getName(); + cbID = dataQ->addCallback([this](const std::shared_ptr& data) { publish(data); }); +} + +std::string ImagePublisher::getQueueName() { + return qName; +} +std::shared_ptr ImagePublisher::convertData(const std::shared_ptr& data) { + auto info = infoManager->getCameraInfo(); + auto img = std::make_shared(); + if(pubConfig.publishCompressed) { + if(encConfig.profile == dai::VideoEncoderProperties::Profile::MJPEG) { + auto daiImg = std::dynamic_pointer_cast(data); + auto rawMsg = converter->toRosCompressedMsg(daiImg); + img->compressedImg = std::make_unique(rawMsg); + } else { + auto daiImg = std::dynamic_pointer_cast(data); + auto rawMsg = converter->toRosFFMPEGPacket(daiImg); + img->ffmpegPacket = std::make_unique(rawMsg); + } + } else { + auto daiImg = std::dynamic_pointer_cast(data); + auto rawMsg = converter->toRosMsgRawPtr(daiImg, info); + info.header = rawMsg.header; + sensor_msgs::msg::Image::UniquePtr msg = std::make_unique(rawMsg); + img->image = std::move(msg); + } + sensor_msgs::msg::CameraInfo::UniquePtr infoMsg = std::make_unique(info); + img->info = std::move(infoMsg); + return img; +} +void ImagePublisher::publish(std::shared_ptr img) { + if(pubConfig.publishCompressed) { + if(encConfig.profile == dai::VideoEncoderProperties::Profile::MJPEG) { + compressedImgPub->publish(std::move(img->compressedImg)); + } else { + ffmpegPub->publish(std::move(img->ffmpegPacket)); + } + } else { + if(ipcEnabled && (!pubConfig.lazyPub || detectSubscription(imgPub, infoPub))) { + imgPub->publish(std::move(img->image)); + infoPub->publish(std::move(img->info)); + } else { + if(!pubConfig.lazyPub || imgPubIT.getNumSubscribers() > 0) imgPubIT.publish(*img->image, *img->info); + } + } +} +void ImagePublisher::publish(std::shared_ptr img, rclcpp::Time timestamp) { + img->info->header.stamp = timestamp; + if(pubConfig.publishCompressed) { + if(encConfig.profile == dai::VideoEncoderProperties::Profile::MJPEG) { + img->compressedImg->header.stamp = timestamp; + } else { + img->ffmpegPacket->header.stamp = timestamp; + } + } else { + img->image->header.stamp = timestamp; + } + publish(img); +} + +void ImagePublisher::publish(const std::shared_ptr& data) { + if(rclcpp::ok()) { + auto img = convertData(data); + publish(img); + } +} + +bool ImagePublisher::detectSubscription(const rclcpp::Publisher::SharedPtr& pub, + const rclcpp::Publisher::SharedPtr& infoPub) { + return (pub->get_subscription_count() > 0 || pub->get_intra_process_subscription_count() > 0 || infoPub->get_subscription_count() > 0 + || infoPub->get_intra_process_subscription_count() > 0); +} +} // namespace sensor_helpers +} // namespace dai_nodes +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/src/dai_nodes/sensors/imu.cpp b/depthai_ros_driver/src/dai_nodes/sensors/imu.cpp index db305149..9cc6bd48 100644 --- a/depthai_ros_driver/src/dai_nodes/sensors/imu.cpp +++ b/depthai_ros_driver/src/dai_nodes/sensors/imu.cpp @@ -13,15 +13,15 @@ namespace depthai_ros_driver { namespace dai_nodes { -Imu::Imu(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr pipeline, std::shared_ptr device) +Imu::Imu(const std::string& daiNodeName, std::shared_ptr node, std::shared_ptr pipeline, std::shared_ptr device) : BaseNode(daiNodeName, node, pipeline) { - RCLCPP_DEBUG(node->get_logger(), "Creating node %s", daiNodeName.c_str()); + RCLCPP_DEBUG(getLogger(), "Creating node %s", daiNodeName.c_str()); setNames(); imuNode = pipeline->create(); ph = std::make_unique(node, daiNodeName); ph->declareParams(imuNode, device->getConnectedIMU()); setXinXout(pipeline); - RCLCPP_DEBUG(node->get_logger(), "Node %s created", daiNodeName.c_str()); + RCLCPP_DEBUG(getLogger(), "Node %s created", daiNodeName.c_str()); } Imu::~Imu() = default; void Imu::setNames() { @@ -52,19 +52,23 @@ void Imu::setupQueues(std::shared_ptr device) { enableMagn, ph->getParam("i_get_base_device_timestamp")); imuConverter->setUpdateRosBaseTimeOnToRosMsg(ph->getParam("i_update_ros_base_time_on_ros_msg")); + std::string topicSuffix = "/data"; + if(rsCompabilityMode()) { + topicSuffix = ""; + } switch(msgType) { case param_handlers::imu::ImuMsgType::IMU: { - rosImuPub = getROSNode()->create_publisher("~/" + getName() + "/data", 10, options); + rosImuPub = getROSNode()->create_publisher("~/" + getName() + topicSuffix, 10, options); imuQ->addCallback(std::bind(&Imu::imuRosQCB, this, std::placeholders::_1, std::placeholders::_2)); break; } case param_handlers::imu::ImuMsgType::IMU_WITH_MAG: { - daiImuPub = getROSNode()->create_publisher("~/" + getName() + "/data", 10, options); + daiImuPub = getROSNode()->create_publisher("~/" + getName() + topicSuffix, 10, options); imuQ->addCallback(std::bind(&Imu::imuDaiRosQCB, this, std::placeholders::_1, std::placeholders::_2)); break; } case param_handlers::imu::ImuMsgType::IMU_WITH_MAG_SPLIT: { - rosImuPub = getROSNode()->create_publisher("~/" + getName() + "/data", 10, options); + rosImuPub = getROSNode()->create_publisher("~/" + getName() + topicSuffix, 10, options); magPub = getROSNode()->create_publisher("~/" + getName() + "/mag", 10, options); imuQ->addCallback(std::bind(&Imu::imuMagQCB, this, std::placeholders::_1, std::placeholders::_2)); break; diff --git a/depthai_ros_driver/src/dai_nodes/sensors/mono.cpp b/depthai_ros_driver/src/dai_nodes/sensors/mono.cpp index e71b5495..9174e854 100644 --- a/depthai_ros_driver/src/dai_nodes/sensors/mono.cpp +++ b/depthai_ros_driver/src/dai_nodes/sensors/mono.cpp @@ -1,37 +1,31 @@ #include "depthai_ros_driver/dai_nodes/sensors/mono.hpp" -#include "camera_info_manager/camera_info_manager.hpp" -#include "depthai/device/DataQueue.hpp" #include "depthai/device/Device.hpp" #include "depthai/pipeline/Pipeline.hpp" #include "depthai/pipeline/node/MonoCamera.hpp" -#include "depthai/pipeline/node/VideoEncoder.hpp" #include "depthai/pipeline/node/XLinkIn.hpp" -#include "depthai/pipeline/node/XLinkOut.hpp" -#include "depthai_bridge/ImageConverter.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/img_pub.hpp" #include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp" #include "depthai_ros_driver/param_handlers/sensor_param_handler.hpp" #include "depthai_ros_driver/utils.hpp" -#include "image_transport/camera_publisher.hpp" -#include "image_transport/image_transport.hpp" #include "rclcpp/node.hpp" namespace depthai_ros_driver { namespace dai_nodes { Mono::Mono(const std::string& daiNodeName, - rclcpp::Node* node, + std::shared_ptr node, std::shared_ptr pipeline, dai::CameraBoardSocket socket, dai_nodes::sensor_helpers::ImageSensor sensor, bool publish = true) : BaseNode(daiNodeName, node, pipeline) { - RCLCPP_DEBUG(node->get_logger(), "Creating node %s", daiNodeName.c_str()); + RCLCPP_DEBUG(getLogger(), "Creating node %s", daiNodeName.c_str()); setNames(); monoCamNode = pipeline->create(); ph = std::make_unique(node, daiNodeName, socket); ph->declareParams(monoCamNode, sensor, publish); setXinXout(pipeline); - RCLCPP_DEBUG(node->get_logger(), "Node %s created", daiNodeName.c_str()); + RCLCPP_DEBUG(getLogger(), "Node %s created", daiNodeName.c_str()); } Mono::~Mono() = default; void Mono::setNames() { @@ -41,15 +35,15 @@ void Mono::setNames() { void Mono::setXinXout(std::shared_ptr pipeline) { if(ph->getParam("i_publish_topic")) { - xoutMono = pipeline->create(); - xoutMono->setStreamName(monoQName); - if(ph->getParam("i_low_bandwidth")) { - videoEnc = sensor_helpers::createEncoder(pipeline, ph->getParam("i_low_bandwidth_quality")); - monoCamNode->out.link(videoEnc->input); - videoEnc->bitstream.link(xoutMono->input); - } else { - monoCamNode->out.link(xoutMono->input); - } + utils::VideoEncoderConfig encConfig; + encConfig.profile = static_cast(ph->getParam("i_low_bandwidth_profile")); + encConfig.bitrate = ph->getParam("i_low_bandwidth_bitrate"); + encConfig.frameFreq = ph->getParam("i_low_bandwidth_frame_freq"); + encConfig.quality = ph->getParam("i_low_bandwidth_quality"); + encConfig.enabled = ph->getParam("i_low_bandwidth"); + + imagePublisher = setupOutput( + pipeline, monoQName, [&](auto input) { monoCamNode->out.link(input); }, ph->getParam("i_synced"), encConfig); } xinControl = pipeline->create(); xinControl->setStreamName(controlQName); @@ -58,62 +52,36 @@ void Mono::setXinXout(std::shared_ptr pipeline) { void Mono::setupQueues(std::shared_ptr device) { if(ph->getParam("i_publish_topic")) { - auto tfPrefix = getTFPrefix(utils::getSocketName(static_cast(ph->getParam("i_board_socket_id")))); - imageConverter = - std::make_unique(tfPrefix + "_camera_optical_frame", false, ph->getParam("i_get_base_device_timestamp")); - imageConverter->setUpdateRosBaseTimeOnToRosMsg(ph->getParam("i_update_ros_base_time_on_ros_msg")); - if(ph->getParam("i_low_bandwidth")) { - imageConverter->convertFromBitstream(dai::RawImgFrame::Type::GRAY8); - } - if(ph->getParam("i_add_exposure_offset")) { - auto offset = static_cast(ph->getParam("i_exposure_offset")); - imageConverter->addExposureOffset(offset); - } - if(ph->getParam("i_reverse_stereo_socket_order")) { - imageConverter->reverseStereoSocketOrder(); - } - infoManager = std::make_shared( - getROSNode()->create_sub_node(std::string(getROSNode()->get_name()) + "/" + getName()).get(), "/" + getName()); - if(ph->getParam("i_calibration_file").empty()) { - infoManager->setCameraInfo(sensor_helpers::getCalibInfo(getROSNode()->get_logger(), - *imageConverter, - device, - static_cast(ph->getParam("i_board_socket_id")), - ph->getParam("i_width"), - ph->getParam("i_height"))); - } else { - infoManager->loadCameraInfo(ph->getParam("i_calibration_file")); - } - monoQ = device->getOutputQueue(monoQName, ph->getParam("i_max_q_size"), false); - if(ipcEnabled()) { - RCLCPP_DEBUG(getROSNode()->get_logger(), "Enabling intra_process communication!"); - monoPub = getROSNode()->create_publisher("~/" + getName() + "/image_raw", 10); - infoPub = getROSNode()->create_publisher("~/" + getName() + "/camera_info", 10); - monoQ->addCallback(std::bind(sensor_helpers::splitPub, - std::placeholders::_1, - std::placeholders::_2, - *imageConverter, - monoPub, - infoPub, - infoManager, - ph->getParam("i_enable_lazy_publisher"))); + auto tfPrefix = getOpticalTFPrefix(getSocketName(static_cast(ph->getParam("i_board_socket_id")))); + utils::ImgConverterConfig convConf; + convConf.tfPrefix = tfPrefix; + convConf.getBaseDeviceTimestamp = ph->getParam("i_get_base_device_timestamp"); + convConf.updateROSBaseTimeOnRosMsg = ph->getParam("i_update_ros_base_time_on_ros_msg"); + convConf.lowBandwidth = ph->getParam("i_low_bandwidth"); + convConf.encoding = dai::RawImgFrame::Type::GRAY8; + convConf.addExposureOffset = ph->getParam("i_add_exposure_offset"); + convConf.expOffset = static_cast(ph->getParam("i_exposure_offset")); + convConf.reverseSocketOrder = ph->getParam("i_reverse_stereo_socket_order"); + + utils::ImgPublisherConfig pubConf; + pubConf.daiNodeName = getName(); + pubConf.topicName = "~/" + getName(); + pubConf.lazyPub = ph->getParam("i_enable_lazy_publisher"); + pubConf.socket = static_cast(ph->getParam("i_board_socket_id")); + pubConf.calibrationFile = ph->getParam("i_calibration_file"); + pubConf.rectified = false; + pubConf.width = ph->getParam("i_width"); + pubConf.height = ph->getParam("i_height"); + pubConf.maxQSize = ph->getParam("i_max_q_size"); + pubConf.publishCompressed = ph->getParam("i_publish_compressed"); - } else { - monoPubIT = image_transport::create_camera_publisher(getROSNode(), "~/" + getName() + "/image_raw"); - monoQ->addCallback(std::bind(sensor_helpers::cameraPub, - std::placeholders::_1, - std::placeholders::_2, - *imageConverter, - monoPubIT, - infoManager, - ph->getParam("i_enable_lazy_publisher"))); - } + imagePublisher->setup(device, convConf, pubConf); } controlQ = device->getInputQueue(controlQName); } void Mono::closeQueues() { if(ph->getParam("i_publish_topic")) { - monoQ->close(); + imagePublisher->closeQueue(); } controlQ->close(); } @@ -122,6 +90,14 @@ void Mono::link(dai::Node::Input in, int /*linkType*/) { monoCamNode->out.link(in); } +std::vector> Mono::getPublishers() { + std::vector> publishers; + if(ph->getParam("i_publish_topic") && ph->getParam("i_synced")) { + publishers.push_back(imagePublisher); + } + return publishers; +} + void Mono::updateParams(const std::vector& params) { auto ctrl = ph->setRuntimeParams(params); controlQ->send(ctrl); diff --git a/depthai_ros_driver/src/dai_nodes/sensors/rgb.cpp b/depthai_ros_driver/src/dai_nodes/sensors/rgb.cpp index 7286d19d..2ce908b3 100644 --- a/depthai_ros_driver/src/dai_nodes/sensors/rgb.cpp +++ b/depthai_ros_driver/src/dai_nodes/sensors/rgb.cpp @@ -1,7 +1,6 @@ #include "depthai_ros_driver/dai_nodes/sensors/rgb.hpp" #include "camera_info_manager/camera_info_manager.hpp" -#include "depthai/device/DataQueue.hpp" #include "depthai/device/Device.hpp" #include "depthai/pipeline/Pipeline.hpp" #include "depthai/pipeline/node/ColorCamera.hpp" @@ -9,29 +8,27 @@ #include "depthai/pipeline/node/XLinkIn.hpp" #include "depthai/pipeline/node/XLinkOut.hpp" #include "depthai_bridge/ImageConverter.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/img_pub.hpp" #include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp" #include "depthai_ros_driver/param_handlers/sensor_param_handler.hpp" -#include "depthai_ros_driver/utils.hpp" -#include "image_transport/camera_publisher.hpp" -#include "image_transport/image_transport.hpp" #include "rclcpp/node.hpp" namespace depthai_ros_driver { namespace dai_nodes { RGB::RGB(const std::string& daiNodeName, - rclcpp::Node* node, + std::shared_ptr node, std::shared_ptr pipeline, dai::CameraBoardSocket socket = dai::CameraBoardSocket::CAM_A, sensor_helpers::ImageSensor sensor = {"IMX378", "4k", {"12mp", "4k"}, true}, bool publish = true) : BaseNode(daiNodeName, node, pipeline) { - RCLCPP_DEBUG(node->get_logger(), "Creating node %s", daiNodeName.c_str()); + RCLCPP_DEBUG(getLogger(), "Creating node %s", daiNodeName.c_str()); setNames(); colorCamNode = pipeline->create(); ph = std::make_unique(node, daiNodeName, socket); ph->declareParams(colorCamNode, sensor, publish); setXinXout(pipeline); - RCLCPP_DEBUG(node->get_logger(), "Node %s created", daiNodeName.c_str()); + RCLCPP_DEBUG(getLogger(), "Node %s created", daiNodeName.c_str()); } RGB::~RGB() = default; void RGB::setNames() { @@ -41,26 +38,26 @@ void RGB::setNames() { } void RGB::setXinXout(std::shared_ptr pipeline) { + bool outputIsp = ph->getParam("i_output_isp"); + bool lowBandwidth = ph->getParam("i_low_bandwidth"); + std::function rgbLinkChoice; + if(outputIsp && !lowBandwidth) { + rgbLinkChoice = [&](auto input) { colorCamNode->isp.link(input); }; + } else { + rgbLinkChoice = [&](auto input) { colorCamNode->video.link(input); }; + } if(ph->getParam("i_publish_topic")) { - xoutColor = pipeline->create(); - xoutColor->setStreamName(ispQName); - if(ph->getParam("i_low_bandwidth")) { - videoEnc = sensor_helpers::createEncoder(pipeline, ph->getParam("i_low_bandwidth_quality")); - colorCamNode->video.link(videoEnc->input); - videoEnc->bitstream.link(xoutColor->input); - } else { - if(ph->getParam("i_output_isp")) - colorCamNode->isp.link(xoutColor->input); - else - colorCamNode->video.link(xoutColor->input); - } + utils::VideoEncoderConfig encConfig; + encConfig.profile = static_cast(ph->getParam("i_low_bandwidth_profile")); + encConfig.bitrate = ph->getParam("i_low_bandwidth_bitrate"); + encConfig.frameFreq = ph->getParam("i_low_bandwidth_frame_freq"); + encConfig.quality = ph->getParam("i_low_bandwidth_quality"); + encConfig.enabled = lowBandwidth; + + rgbPub = setupOutput(pipeline, ispQName, rgbLinkChoice, ph->getParam("i_synced"), encConfig); } if(ph->getParam("i_enable_preview")) { - xoutPreview = pipeline->create(); - xoutPreview->setStreamName(previewQName); - xoutPreview->input.setQueueSize(2); - xoutPreview->input.setBlocking(false); - colorCamNode->preview.link(xoutPreview->input); + previewPub = setupOutput(pipeline, previewQName, [&](auto input) { colorCamNode->preview.link(input); }); } xinControl = pipeline->create(); xinControl->setStreamName(controlQName); @@ -69,101 +66,60 @@ void RGB::setXinXout(std::shared_ptr pipeline) { void RGB::setupQueues(std::shared_ptr device) { if(ph->getParam("i_publish_topic")) { - auto tfPrefix = getTFPrefix(utils::getSocketName(static_cast(ph->getParam("i_board_socket_id")))); - infoManager = std::make_shared( - getROSNode()->create_sub_node(std::string(getROSNode()->get_name()) + "/" + getName()).get(), "/" + getName()); - imageConverter = - std::make_unique(tfPrefix + "_camera_optical_frame", false, ph->getParam("i_get_base_device_timestamp")); - imageConverter->setUpdateRosBaseTimeOnToRosMsg(ph->getParam("i_update_ros_base_time_on_ros_msg")); - if(ph->getParam("i_low_bandwidth")) { - imageConverter->convertFromBitstream(dai::RawImgFrame::Type::BGR888i); - } - if(ph->getParam("i_add_exposure_offset")) { - auto offset = static_cast(ph->getParam("i_exposure_offset")); - imageConverter->addExposureOffset(offset); - } - - if(ph->getParam("i_reverse_stereo_socket_order")) { - imageConverter->reverseStereoSocketOrder(); - } + auto tfPrefix = getOpticalTFPrefix(getSocketName(static_cast(ph->getParam("i_board_socket_id")))); + utils::ImgConverterConfig convConfig; + convConfig.tfPrefix = tfPrefix; + convConfig.getBaseDeviceTimestamp = ph->getParam("i_get_base_device_timestamp"); + convConfig.updateROSBaseTimeOnRosMsg = ph->getParam("i_update_ros_base_time_on_ros_msg"); + convConfig.lowBandwidth = ph->getParam("i_low_bandwidth"); + convConfig.encoding = dai::RawImgFrame::Type::BGR888i; + convConfig.addExposureOffset = ph->getParam("i_add_exposure_offset"); + convConfig.expOffset = static_cast(ph->getParam("i_exposure_offset")); + convConfig.reverseSocketOrder = ph->getParam("i_reverse_stereo_socket_order"); - if(ph->getParam("i_calibration_file").empty()) { - infoManager->setCameraInfo(sensor_helpers::getCalibInfo(getROSNode()->get_logger(), - *imageConverter, - device, - static_cast(ph->getParam("i_board_socket_id")), - ph->getParam("i_width"), - ph->getParam("i_height"))); - } else { - infoManager->loadCameraInfo(ph->getParam("i_calibration_file")); - } - colorQ = device->getOutputQueue(ispQName, ph->getParam("i_max_q_size"), false); - if(ipcEnabled()) { - rgbPub = getROSNode()->create_publisher("~/" + getName() + "/image_raw", 10); - rgbInfoPub = getROSNode()->create_publisher("~/" + getName() + "/camera_info", 10); - colorQ->addCallback(std::bind(sensor_helpers::splitPub, - std::placeholders::_1, - std::placeholders::_2, - *imageConverter, - rgbPub, - rgbInfoPub, - infoManager, - ph->getParam("i_enable_lazy_publisher"))); + utils::ImgPublisherConfig pubConfig; + pubConfig.daiNodeName = getName(); + pubConfig.topicName = "~/" + getName(); + pubConfig.lazyPub = ph->getParam("i_enable_lazy_publisher"); + pubConfig.socket = static_cast(ph->getParam("i_board_socket_id")); + pubConfig.calibrationFile = ph->getParam("i_calibration_file"); + pubConfig.rectified = false; + pubConfig.width = ph->getParam("i_width"); + pubConfig.height = ph->getParam("i_height"); + pubConfig.maxQSize = ph->getParam("i_max_q_size"); + pubConfig.publishCompressed = ph->getParam("i_publish_compressed"); - } else { - rgbPubIT = image_transport::create_camera_publisher(getROSNode(), "~/" + getName() + "/image_raw"); - colorQ->addCallback(std::bind(sensor_helpers::cameraPub, - std::placeholders::_1, - std::placeholders::_2, - *imageConverter, - rgbPubIT, - infoManager, - ph->getParam("i_enable_lazy_publisher"))); - } + rgbPub->setup(device, convConfig, pubConfig); } if(ph->getParam("i_enable_preview")) { - previewQ = device->getOutputQueue(previewQName, ph->getParam("i_max_q_size"), false); + auto tfPrefix = getOpticalTFPrefix(getSocketName(static_cast(ph->getParam("i_board_socket_id")))); + utils::ImgConverterConfig convConfig; + convConfig.tfPrefix = tfPrefix; + convConfig.getBaseDeviceTimestamp = ph->getParam("i_get_base_device_timestamp"); + convConfig.updateROSBaseTimeOnRosMsg = ph->getParam("i_update_ros_base_time_on_ros_msg"); - previewInfoManager = std::make_shared( - getROSNode()->create_sub_node(std::string(getROSNode()->get_name()) + "/" + previewQName).get(), previewQName); - auto tfPrefix = getTFPrefix(utils::getSocketName(static_cast(ph->getParam("i_board_socket_id")))); - imageConverter = std::make_unique(tfPrefix + "_camera_optical_frame", false); - imageConverter->setUpdateRosBaseTimeOnToRosMsg(ph->getParam("i_update_ros_base_time_on_ros_msg")); - if(ph->getParam("i_calibration_file").empty()) { - previewInfoManager->setCameraInfo(sensor_helpers::getCalibInfo(getROSNode()->get_logger(), - *imageConverter, - device, - static_cast(ph->getParam("i_board_socket_id")), - ph->getParam("i_preview_size"), - ph->getParam("i_preview_size"))); - } else { - previewInfoManager->loadCameraInfo(ph->getParam("i_calibration_file")); - } - if(ipcEnabled()) { - previewPubIT = image_transport::create_camera_publisher(getROSNode(), "~/" + getName() + "/preview/image_raw"); - previewQ->addCallback( - std::bind(sensor_helpers::basicCameraPub, std::placeholders::_1, std::placeholders::_2, *imageConverter, previewPubIT, previewInfoManager)); - } else { - previewPub = getROSNode()->create_publisher("~/" + getName() + "/preview/image_raw", 10); - previewInfoPub = getROSNode()->create_publisher("~/" + getName() + "/preview/camera_info", 10); - previewQ->addCallback(std::bind(sensor_helpers::splitPub, - std::placeholders::_1, - std::placeholders::_2, - *imageConverter, - previewPub, - previewInfoPub, - previewInfoManager, - ph->getParam("i_enable_lazy_publisher"))); - } + utils::ImgPublisherConfig pubConfig; + pubConfig.daiNodeName = getName(); + pubConfig.topicName = "~/" + getName(); + pubConfig.lazyPub = ph->getParam("i_enable_lazy_publisher"); + pubConfig.socket = static_cast(ph->getParam("i_board_socket_id")); + pubConfig.calibrationFile = ph->getParam("i_calibration_file"); + pubConfig.rectified = false; + pubConfig.width = ph->getParam("i_preview_width"); + pubConfig.height = ph->getParam("i_preview_height"); + pubConfig.maxQSize = ph->getParam("i_max_q_size"); + pubConfig.topicSuffix = "/preview/image_raw"; + + previewPub->setup(device, convConfig, pubConfig); }; controlQ = device->getInputQueue(controlQName); } void RGB::closeQueues() { if(ph->getParam("i_publish_topic")) { - colorQ->close(); + rgbPub->closeQueue(); if(ph->getParam("i_enable_preview")) { - previewQ->close(); + previewPub->closeQueue(); } } controlQ->close(); @@ -181,6 +137,14 @@ void RGB::link(dai::Node::Input in, int linkType) { } } +std::vector> RGB::getPublishers() { + std::vector> publishers; + if(ph->getParam("i_synced")) { + publishers.push_back(rgbPub); + } + return publishers; +} + void RGB::updateParams(const std::vector& params) { auto ctrl = ph->setRuntimeParams(params); controlQ->send(ctrl); diff --git a/depthai_ros_driver/src/dai_nodes/sensors/sensor_helpers.cpp b/depthai_ros_driver/src/dai_nodes/sensors/sensor_helpers.cpp index 59e9f8b0..82a976e1 100644 --- a/depthai_ros_driver/src/dai_nodes/sensors/sensor_helpers.cpp +++ b/depthai_ros_driver/src/dai_nodes/sensors/sensor_helpers.cpp @@ -1,9 +1,12 @@ #include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp" +#include + #include "camera_info_manager/camera_info_manager.hpp" #include "depthai/pipeline/Pipeline.hpp" #include "depthai/pipeline/node/VideoEncoder.hpp" #include "depthai_bridge/ImageConverter.hpp" +#include "image_transport/image_transport.hpp" #include "rclcpp/logger.hpp" namespace depthai_ros_driver { @@ -31,6 +34,48 @@ const std::unordered_map socketNameMap = { {dai::CameraBoardSocket::CAM_D, "left_back"}, {dai::CameraBoardSocket::CAM_E, "right_back"}, }; +const std::unordered_map rsSocketNameMap = { + {dai::CameraBoardSocket::AUTO, "color"}, + {dai::CameraBoardSocket::CAM_A, "color"}, + {dai::CameraBoardSocket::CAM_B, "infra2"}, + {dai::CameraBoardSocket::CAM_C, "infra1"}, + {dai::CameraBoardSocket::CAM_D, "infra4"}, + {dai::CameraBoardSocket::CAM_E, "infra3"}, +}; +const std::unordered_map rsNodeNameMap = { + {NodeNameEnum::RGB, "color"}, + {NodeNameEnum::Left, "infra2"}, + {NodeNameEnum::Right, "infra1"}, + {NodeNameEnum::Stereo, "depth"}, + {NodeNameEnum::IMU, "imu"}, + {NodeNameEnum::NN, "nn"}, +}; + +const std::unordered_map NodeNameMap = { + {NodeNameEnum::RGB, "rgb"}, + {NodeNameEnum::Left, "left"}, + {NodeNameEnum::Right, "right"}, + {NodeNameEnum::Stereo, "stereo"}, + {NodeNameEnum::IMU, "imu"}, + {NodeNameEnum::NN, "nn"}, +}; + +bool rsCompabilityMode(std::shared_ptr node) { + return node->get_parameter("camera.i_rs_compat").as_bool(); +} +std::string getNodeName(std::shared_ptr node, NodeNameEnum name) { + if(rsCompabilityMode(node)) { + return rsNodeNameMap.at(name); + } + return NodeNameMap.at(name); +} + +std::string getSocketName(std::shared_ptr node, dai::CameraBoardSocket socket) { + if(rsCompabilityMode(node)) { + return rsSocketNameMap.at(socket); + } + return socketNameMap.at(socket); +} const std::unordered_map monoResolutionMap = { {"400P", dai::MonoCameraProperties::SensorResolution::THE_400_P}, {"480P", dai::MonoCameraProperties::SensorResolution::THE_480_P}, @@ -80,42 +125,8 @@ void basicCameraPub(const std::string& /*name*/, } } -void cameraPub(const std::string& /*name*/, - const std::shared_ptr& data, - dai::ros::ImageConverter& converter, - image_transport::CameraPublisher& pub, - std::shared_ptr infoManager, - bool lazyPub) { - if(rclcpp::ok() && (!lazyPub || pub.getNumSubscribers() > 0)) { - auto img = std::dynamic_pointer_cast(data); - auto info = infoManager->getCameraInfo(); - auto rawMsg = converter.toRosMsgRawPtr(img, info); - info.header = rawMsg.header; - pub.publish(rawMsg, info); - } -} - -void splitPub(const std::string& /*name*/, - const std::shared_ptr& data, - dai::ros::ImageConverter& converter, - rclcpp::Publisher::SharedPtr imgPub, - rclcpp::Publisher::SharedPtr infoPub, - std::shared_ptr infoManager, - bool lazyPub) { - if(rclcpp::ok() && (!lazyPub || detectSubscription(imgPub, infoPub))) { - auto img = std::dynamic_pointer_cast(data); - auto info = infoManager->getCameraInfo(); - auto rawMsg = converter.toRosMsgRawPtr(img, info); - info.header = rawMsg.header; - sensor_msgs::msg::CameraInfo::UniquePtr infoMsg = std::make_unique(info); - sensor_msgs::msg::Image::UniquePtr msg = std::make_unique(rawMsg); - imgPub->publish(std::move(msg)); - infoPub->publish(std::move(infoMsg)); - } -} - sensor_msgs::msg::CameraInfo getCalibInfo(const rclcpp::Logger& logger, - dai::ros::ImageConverter& converter, + std::shared_ptr converter, std::shared_ptr device, dai::CameraBoardSocket socket, int width, @@ -123,18 +134,12 @@ sensor_msgs::msg::CameraInfo getCalibInfo(const rclcpp::Logger& logger, sensor_msgs::msg::CameraInfo info; auto calibHandler = device->readCalibration(); try { - info = converter.calibrationToCameraInfo(calibHandler, socket, width, height); + info = converter->calibrationToCameraInfo(calibHandler, socket, width, height); } catch(std::runtime_error& e) { RCLCPP_ERROR(logger, "No calibration for socket %d! Publishing empty camera_info.", static_cast(socket)); } return info; } -std::shared_ptr createEncoder(std::shared_ptr pipeline, int quality, dai::VideoEncoderProperties::Profile profile) { - auto enc = pipeline->create(); - enc->setQuality(quality); - enc->setProfile(profile); - return enc; -} bool detectSubscription(const rclcpp::Publisher::SharedPtr& pub, const rclcpp::Publisher::SharedPtr& infoPub) { @@ -143,4 +148,4 @@ bool detectSubscription(const rclcpp::Publisher::Shared } } // namespace sensor_helpers } // namespace dai_nodes -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/src/dai_nodes/sensors/sensor_wrapper.cpp b/depthai_ros_driver/src/dai_nodes/sensors/sensor_wrapper.cpp index da0bd819..6693d579 100644 --- a/depthai_ros_driver/src/dai_nodes/sensors/sensor_wrapper.cpp +++ b/depthai_ros_driver/src/dai_nodes/sensors/sensor_wrapper.cpp @@ -15,7 +15,7 @@ namespace depthai_ros_driver { namespace dai_nodes { SensorWrapper::SensorWrapper(const std::string& daiNodeName, - rclcpp::Node* node, + std::shared_ptr node, std::shared_ptr pipeline, std::shared_ptr device, dai::CameraBoardSocket socket, @@ -54,7 +54,7 @@ SensorWrapper::SensorWrapper(const std::string& daiNodeName, } RCLCPP_DEBUG(node->get_logger(), "Node %s has sensor %s", daiNodeName.c_str(), sensorName.c_str()); sensorData = *sensorIt; - if(device->getDeviceName() == "OAK-D-SR") { + if(device->getDeviceName() == "OAK-D-SR" || device->getDeviceName() == "OAK-D-SR-POE") { (*sensorIt).color = true; // ov9282 is color sensor in this case } if((*sensorIt).color) { @@ -131,6 +131,13 @@ void SensorWrapper::link(dai::Node::Input in, int linkType) { } } +std::vector> SensorWrapper::getPublishers() { + if(ph->getParam("i_disable_node")) { + return std::vector>(); + } + return sensorNode->getPublishers(); +} + void SensorWrapper::updateParams(const std::vector& params) { sensorNode->updateParams(params); } diff --git a/depthai_ros_driver/src/dai_nodes/sensors/stereo.cpp b/depthai_ros_driver/src/dai_nodes/sensors/stereo.cpp new file mode 100644 index 00000000..1775f9f6 --- /dev/null +++ b/depthai_ros_driver/src/dai_nodes/sensors/stereo.cpp @@ -0,0 +1,331 @@ +#include "depthai_ros_driver/dai_nodes/sensors/stereo.hpp" + +#include "depthai/device/DeviceBase.hpp" +#include "depthai/pipeline/Pipeline.hpp" +#include "depthai/pipeline/datatype/ADatatype.hpp" +#include "depthai/pipeline/datatype/ImgFrame.hpp" +#include "depthai/pipeline/node/StereoDepth.hpp" +#include "depthai_ros_driver/dai_nodes/nn/nn_helpers.hpp" +#include "depthai_ros_driver/dai_nodes/nn/spatial_nn_wrapper.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/feature_tracker.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/img_pub.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/sensor_wrapper.hpp" +#include "depthai_ros_driver/param_handlers/stereo_param_handler.hpp" +#include "depthai_ros_driver/utils.hpp" +#include "rclcpp/node.hpp" + +namespace depthai_ros_driver { +namespace dai_nodes { +Stereo::Stereo(const std::string& daiNodeName, + std::shared_ptr node, + std::shared_ptr pipeline, + std::shared_ptr device, + dai::CameraBoardSocket leftSocket, + dai::CameraBoardSocket rightSocket) + : BaseNode(daiNodeName, node, pipeline) { + RCLCPP_DEBUG(getLogger(), "Creating node %s", daiNodeName.c_str()); + setNames(); + ph = std::make_unique(node, daiNodeName); + auto alignSocket = dai::CameraBoardSocket::CAM_A; + if(device->getDeviceName() == "OAK-D-SR" || device->getDeviceName() == "OAK-D-SR-POE") { + alignSocket = dai::CameraBoardSocket::CAM_C; + } + ph->updateSocketsFromParams(leftSocket, rightSocket, alignSocket); + auto features = device->getConnectedCameraFeatures(); + for(auto f : features) { + if(f.socket == leftSocket) { + leftSensInfo = f; + leftSensInfo.name = getSocketName(leftSocket); + } else if(f.socket == rightSocket) { + rightSensInfo = f; + rightSensInfo.name = getSocketName(rightSocket); + } else { + continue; + } + } + RCLCPP_DEBUG(getLogger(), + "Creating stereo node with left sensor %s and right sensor %s", + getSocketName(leftSensInfo.socket).c_str(), + getSocketName(rightSensInfo.socket).c_str()); + left = std::make_unique(getSocketName(leftSensInfo.socket), node, pipeline, device, leftSensInfo.socket, false); + right = std::make_unique(getSocketName(rightSensInfo.socket), node, pipeline, device, rightSensInfo.socket, false); + stereoCamNode = pipeline->create(); + ph->declareParams(stereoCamNode); + setXinXout(pipeline); + left->link(stereoCamNode->left); + right->link(stereoCamNode->right); + + if(ph->getParam("i_enable_spatial_nn")) { + if(ph->getParam("i_spatial_nn_source") == "left") { + nnNode = std::make_unique(getName() + "_spatial_nn", getROSNode(), pipeline, leftSensInfo.socket); + left->link(nnNode->getInput(static_cast(dai_nodes::nn_helpers::link_types::SpatialNNLinkType::input)), + static_cast(dai_nodes::link_types::RGBLinkType::preview)); + } else { + nnNode = std::make_unique(getName() + "_spatial_nn", getROSNode(), pipeline, rightSensInfo.socket); + right->link(nnNode->getInput(static_cast(dai_nodes::nn_helpers::link_types::SpatialNNLinkType::input)), + static_cast(dai_nodes::link_types::RGBLinkType::preview)); + } + stereoCamNode->depth.link(nnNode->getInput(static_cast(dai_nodes::nn_helpers::link_types::SpatialNNLinkType::inputDepth))); + } + + RCLCPP_DEBUG(getLogger(), "Node %s created", daiNodeName.c_str()); +} +Stereo::~Stereo() = default; +void Stereo::setNames() { + stereoQName = getName() + "_stereo"; + leftRectQName = getName() + "_left_rect"; + rightRectQName = getName() + "_right_rect"; +} + +void Stereo::setXinXout(std::shared_ptr pipeline) { + bool outputDisparity = ph->getParam("i_output_disparity"); + std::function stereoLinkChoice; + if(outputDisparity) { + stereoLinkChoice = [&](auto input) { stereoCamNode->disparity.link(input); }; + } else { + stereoLinkChoice = [&](auto input) { stereoCamNode->depth.link(input); }; + } + if(ph->getParam("i_publish_topic")) { + utils::VideoEncoderConfig encConf; + encConf.profile = static_cast(ph->getParam("i_low_bandwidth_profile")); + encConf.bitrate = ph->getParam("i_low_bandwidth_bitrate"); + encConf.frameFreq = ph->getParam("i_low_bandwidth_frame_freq"); + encConf.quality = ph->getParam("i_low_bandwidth_quality"); + encConf.enabled = ph->getParam("i_low_bandwidth"); + + stereoPub = setupOutput(pipeline, stereoQName, stereoLinkChoice, ph->getParam("i_synced"), encConf); + } + + if(ph->getParam("i_left_rect_publish_topic") || ph->getParam("i_publish_synced_rect_pair")) { + utils::VideoEncoderConfig encConf; + encConf.profile = static_cast(ph->getParam("i_left_rect_low_bandwidth_profile")); + encConf.bitrate = ph->getParam("i_left_rect_low_bandwidth_bitrate"); + encConf.frameFreq = ph->getParam("i_left_rect_low_bandwidth_frame_freq"); + encConf.quality = ph->getParam("i_left_rect_low_bandwidth_quality"); + encConf.enabled = ph->getParam("i_left_rect_low_bandwidth"); + + leftRectPub = setupOutput( + pipeline, leftRectQName, [&](auto input) { stereoCamNode->rectifiedLeft.link(input); }, ph->getParam("i_left_rect_synced"), encConf); + } + + if(ph->getParam("i_right_rect_publish_topic") || ph->getParam("i_publish_synced_rect_pair")) { + utils::VideoEncoderConfig encConf; + encConf.profile = static_cast(ph->getParam("i_right_rect_low_bandwidth_profile")); + encConf.bitrate = ph->getParam("i_right_rect_low_bandwidth_bitrate"); + encConf.frameFreq = ph->getParam("i_right_rect_low_bandwidth_frame_freq"); + encConf.quality = ph->getParam("i_right_rect_low_bandwidth_quality"); + encConf.enabled = ph->getParam("i_right_rect_low_bandwidth"); + rightRectPub = setupOutput( + pipeline, rightRectQName, [&](auto input) { stereoCamNode->rectifiedRight.link(input); }, ph->getParam("i_right_rect_synced"), encConf); + } + + if(ph->getParam("i_left_rect_enable_feature_tracker")) { + featureTrackerLeftR = std::make_unique(leftSensInfo.name + std::string("_rect_feature_tracker"), getROSNode(), pipeline); + + stereoCamNode->rectifiedLeft.link(featureTrackerLeftR->getInput()); + } + + if(ph->getParam("i_right_rect_enable_feature_tracker")) { + featureTrackerRightR = std::make_unique(rightSensInfo.name + std::string("_rect_feature_tracker"), getROSNode(), pipeline); + stereoCamNode->rectifiedRight.link(featureTrackerRightR->getInput()); + } +} + +void Stereo::setupRectQueue(std::shared_ptr device, + dai::CameraFeatures& sensorInfo, + std::shared_ptr pub, + bool isLeft) { + auto sensorName = getSocketName(sensorInfo.socket); + auto tfPrefix = getOpticalTFPrefix(sensorName); + utils::ImgConverterConfig convConfig; + convConfig.tfPrefix = tfPrefix; + convConfig.interleaved = false; + convConfig.getBaseDeviceTimestamp = ph->getParam("i_get_base_device_timestamp"); + convConfig.updateROSBaseTimeOnRosMsg = ph->getParam("i_update_ros_base_time_on_ros_msg"); + convConfig.lowBandwidth = ph->getParam(isLeft ? "i_left_rect_low_bandwidth" : "i_right_rect_low_bandwidth"); + convConfig.encoding = dai::RawImgFrame::Type::GRAY8; + convConfig.addExposureOffset = ph->getParam(isLeft ? "i_left_rect_add_exposure_offset" : "i_right_rect_add_exposure_offset"); + convConfig.expOffset = static_cast(ph->getParam(isLeft ? "i_left_rect_exposure_offset" : "i_right_rect_exposure_offset")); + convConfig.reverseSocketOrder = ph->getParam("i_reverse_stereo_socket_order"); + + utils::ImgPublisherConfig pubConfig; + pubConfig.daiNodeName = sensorName; + pubConfig.rectified = true; + pubConfig.width = ph->getOtherNodeParam(sensorName, "i_width"); + pubConfig.height = ph->getOtherNodeParam(sensorName, "i_height"); + pubConfig.topicName = "~/" + sensorName; + pubConfig.topicSuffix = rsCompabilityMode() ? "/image_rect_raw" : "/image_raw"; + pubConfig.maxQSize = ph->getOtherNodeParam(sensorName, "i_max_q_size"); + pubConfig.socket = sensorInfo.socket; + pubConfig.infoMgrSuffix = "rect"; + pubConfig.publishCompressed = ph->getParam(isLeft ? "i_left_rect_publish_compressed" : "i_right_rect_publish_compressed"); + + pub->setup(device, convConfig, pubConfig); +} + +void Stereo::setupLeftRectQueue(std::shared_ptr device) { + setupRectQueue(device, leftSensInfo, leftRectPub, true); +} + +void Stereo::setupRightRectQueue(std::shared_ptr device) { + setupRectQueue(device, rightSensInfo, rightRectPub, false); +} + +void Stereo::setupStereoQueue(std::shared_ptr device) { + std::string tfPrefix; + if(ph->getParam("i_align_depth")) { + tfPrefix = getOpticalTFPrefix(ph->getParam("i_socket_name")); + } else { + tfPrefix = getOpticalTFPrefix(getSocketName(rightSensInfo.socket).c_str()); + } + utils::ImgConverterConfig convConfig; + convConfig.getBaseDeviceTimestamp = ph->getParam("i_get_base_device_timestamp"); + convConfig.tfPrefix = tfPrefix; + convConfig.updateROSBaseTimeOnRosMsg = ph->getParam("i_update_ros_base_time_on_ros_msg"); + convConfig.lowBandwidth = ph->getParam("i_low_bandwidth"); + convConfig.encoding = dai::RawImgFrame::Type::RAW8; + convConfig.addExposureOffset = ph->getParam("i_add_exposure_offset"); + convConfig.expOffset = static_cast(ph->getParam("i_exposure_offset")); + convConfig.reverseSocketOrder = ph->getParam("i_reverse_stereo_socket_order"); + convConfig.alphaScalingEnabled = ph->getParam("i_enable_alpha_scaling"); + if(convConfig.alphaScalingEnabled) { + convConfig.alphaScaling = ph->getParam("i_alpha_scaling"); + } + convConfig.outputDisparity = ph->getParam("i_output_disparity"); + + utils::ImgPublisherConfig pubConf; + pubConf.daiNodeName = getName(); + pubConf.topicName = "~/" + getName(); + pubConf.topicSuffix = rsCompabilityMode() ? "/image_rect_raw" : "/image_raw"; + pubConf.rectified = !convConfig.alphaScalingEnabled; + pubConf.width = ph->getParam("i_width"); + pubConf.height = ph->getParam("i_height"); + pubConf.socket = static_cast(ph->getParam("i_board_socket_id")); + pubConf.calibrationFile = ph->getParam("i_calibration_file"); + pubConf.leftSocket = leftSensInfo.socket; + pubConf.rightSocket = rightSensInfo.socket; + pubConf.lazyPub = ph->getParam("i_enable_lazy_publisher"); + pubConf.maxQSize = ph->getParam("i_max_q_size"); + pubConf.publishCompressed = ph->getParam("i_publish_compressed"); + + stereoPub->setup(device, convConfig, pubConf); +} +void Stereo::syncTimerCB() { + auto left = leftRectQ->get(); + auto right = rightRectQ->get(); + if(left->getSequenceNum() != right->getSequenceNum()) { + RCLCPP_WARN(getROSNode()->get_logger(), "Left and right rectified frames are not synchronized!"); + } else { + leftRectPub->publish(left); + rightRectPub->publish(right); + } +} + +void Stereo::setupQueues(std::shared_ptr device) { + left->setupQueues(device); + right->setupQueues(device); + if(ph->getParam("i_publish_topic")) { + setupStereoQueue(device); + } + if(ph->getParam("i_left_rect_publish_topic") || ph->getParam("i_publish_synced_rect_pair")) { + setupLeftRectQueue(device); + } + if(ph->getParam("i_right_rect_publish_topic") || ph->getParam("i_publish_synced_rect_pair")) { + setupRightRectQueue(device); + } + if(ph->getParam("i_publish_synced_rect_pair")) { + int timerPeriod = 1000.0 / ph->getOtherNodeParam(leftSensInfo.name, "i_fps"); + RCLCPP_INFO(getROSNode()->get_logger(), "Setting up stereo pair sync timer with period %d ms based on left sensor FPS.", timerPeriod); + leftRectQ = leftRectPub->getQueue(); + rightRectQ = rightRectPub->getQueue(); + syncTimer = getROSNode()->create_wall_timer(std::chrono::milliseconds(timerPeriod), std::bind(&Stereo::syncTimerCB, this)); + } + if(ph->getParam("i_left_rect_enable_feature_tracker")) { + featureTrackerLeftR->setupQueues(device); + } + if(ph->getParam("i_right_rect_enable_feature_tracker")) { + featureTrackerRightR->setupQueues(device); + } + if(ph->getParam("i_enable_spatial_nn")) { + nnNode->setupQueues(device); + } +} +void Stereo::closeQueues() { + left->closeQueues(); + right->closeQueues(); + if(ph->getParam("i_publish_topic")) { + stereoPub->closeQueue(); + } + if(ph->getParam("i_left_rect_publish_topic")) { + leftRectPub->closeQueue(); + } + if(ph->getParam("i_right_rect_publish_topic")) { + rightRectPub->closeQueue(); + } + if(ph->getParam("i_publish_synced_rect_pair")) { + syncTimer->cancel(); + leftRectPub->closeQueue(); + rightRectPub->closeQueue(); + } + if(ph->getParam("i_left_rect_enable_feature_tracker")) { + featureTrackerLeftR->closeQueues(); + } + if(ph->getParam("i_right_rect_enable_feature_tracker")) { + featureTrackerRightR->closeQueues(); + } + if(ph->getParam("i_enable_spatial_nn")) { + nnNode->closeQueues(); + } +} + +void Stereo::link(dai::Node::Input in, int linkType) { + if(linkType == static_cast(link_types::StereoLinkType::stereo)) { + stereoCamNode->depth.link(in); + } else if(linkType == static_cast(link_types::StereoLinkType::left)) { + stereoCamNode->rectifiedLeft.link(in); + } else if(linkType == static_cast(link_types::StereoLinkType::right)) { + stereoCamNode->rectifiedRight.link(in); + } else { + throw std::runtime_error("Wrong link type specified!"); + } +} + +std::vector> Stereo::getPublishers() { + std::vector> pubs; + if(ph->getParam("i_publish_topic") && ph->getParam("i_synced")) { + pubs.push_back(stereoPub); + } + if(ph->getParam("i_left_rect_publish_topic") && ph->getParam("i_left_rect_synced")) { + pubs.push_back(leftRectPub); + } + if(ph->getParam("i_right_rect_publish_topic") && ph->getParam("i_right_rect_synced")) { + pubs.push_back(rightRectPub); + } + auto pubsLeft = left->getPublishers(); + if(!pubsLeft.empty()) { + pubs.insert(pubs.end(), pubsLeft.begin(), pubsLeft.end()); + } + auto pubsRight = right->getPublishers(); + if(!pubsRight.empty()) { + pubs.insert(pubs.end(), pubsRight.begin(), pubsRight.end()); + } + return pubs; +} + +dai::Node::Input Stereo::getInput(int linkType) { + if(linkType == static_cast(link_types::StereoLinkType::left)) { + return stereoCamNode->left; + } else if(linkType == static_cast(link_types::StereoLinkType::right)) { + return stereoCamNode->right; + } else { + throw std::runtime_error("Wrong link type specified!"); + } +} + +void Stereo::updateParams(const std::vector& params) { + ph->setRuntimeParams(params); +} + +} // namespace dai_nodes +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/src/dai_nodes/sensors/sync.cpp b/depthai_ros_driver/src/dai_nodes/sensors/sync.cpp new file mode 100644 index 00000000..d91030b3 --- /dev/null +++ b/depthai_ros_driver/src/dai_nodes/sensors/sync.cpp @@ -0,0 +1,82 @@ +#include "depthai_ros_driver/dai_nodes/sensors/sync.hpp" + +#include + +#include "depthai/pipeline/Pipeline.hpp" +#include "depthai/pipeline/datatype/MessageGroup.hpp" +#include "depthai/pipeline/node/Sync.hpp" +#include "depthai/pipeline/node/XLinkOut.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/img_pub.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp" +#include "depthai_ros_driver/param_handlers/sync_param_handler.hpp" + +namespace depthai_ros_driver { +namespace dai_nodes { + +Sync::Sync(const std::string& daiNodeName, std::shared_ptr node, std::shared_ptr pipeline) + : BaseNode(daiNodeName, node, pipeline) { + syncNode = pipeline->create(); + paramHandler = std::make_unique(node, daiNodeName); + paramHandler->declareParams(syncNode); + setNames(); + setXinXout(pipeline); +} + +Sync::~Sync() = default; + +void Sync::setNames() { + syncOutputName = getName() + "_out"; +} +void Sync::setXinXout(std::shared_ptr pipeline) { + xoutFrame = pipeline->create(); + xoutFrame->setStreamName(syncOutputName); + xoutFrame->input.setBlocking(false); + syncNode->out.link(xoutFrame->input); +} + +void Sync::setupQueues(std::shared_ptr device) { + outQueue = device->getOutputQueue(syncOutputName, 8, false); + outQueue->addCallback([this](const std::shared_ptr& in) { + auto group = std::dynamic_pointer_cast(in); + if(group) { + bool firstMsg = true; + rclcpp::Time timestamp; + for(auto& msg : *group) { + // find publisher by message namespace + for(auto& pub : publishers) { + if(pub->getQueueName() == msg.first) { + auto data = pub->convertData(msg.second); + if(firstMsg) { + timestamp = data->info->header.stamp; + firstMsg = false; + } + pub->publish(std::move(data), timestamp); + } + } + } + } + }); +} + +void Sync::link(dai::Node::Input in, int /* linkType */) { + syncNode->out.link(in); +} + +dai::Node::Input Sync::getInputByName(const std::string& name) { + syncNode->inputs[name].setBlocking(false); + return syncNode->inputs[name]; +} + +void Sync::closeQueues() { + outQueue->close(); +} + +void Sync::addPublishers(const std::vector>& pubs) { + for(auto& pub : pubs) { + pub->link(getInputByName(pub->getQueueName())); + } + publishers.insert(publishers.end(), pubs.begin(), pubs.end()); +} + +} // namespace dai_nodes +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/src/dai_nodes/sensors/tof.cpp b/depthai_ros_driver/src/dai_nodes/sensors/tof.cpp new file mode 100644 index 00000000..84457165 --- /dev/null +++ b/depthai_ros_driver/src/dai_nodes/sensors/tof.cpp @@ -0,0 +1,111 @@ +#include "depthai_ros_driver/dai_nodes/sensors/tof.hpp" + +#include "depthai/device/Device.hpp" +#include "depthai/pipeline/Pipeline.hpp" +#include "depthai/pipeline/node/Camera.hpp" +#include "depthai/pipeline/node/ImageAlign.hpp" +#include "depthai/pipeline/node/ToF.hpp" +#include "depthai/pipeline/node/XLinkIn.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/img_pub.hpp" +#include "depthai_ros_driver/param_handlers/tof_param_handler.hpp" +#include "depthai_ros_driver/utils.hpp" +#include "rclcpp/node.hpp" + +namespace depthai_ros_driver { +namespace dai_nodes { +ToF::ToF(const std::string& daiNodeName, std::shared_ptr node, std::shared_ptr pipeline, dai::CameraBoardSocket socket) + : BaseNode(daiNodeName, node, pipeline) { + RCLCPP_DEBUG(node->get_logger(), "Creating node %s", daiNodeName.c_str()); + setNames(); + camNode = pipeline->create(); + tofNode = pipeline->create(); + boardSocket = socket; + ph = std::make_unique(node, daiNodeName); + ph->declareParams(camNode, tofNode); + setXinXout(pipeline); + RCLCPP_DEBUG(node->get_logger(), "Node %s created", daiNodeName.c_str()); +} +ToF::~ToF() = default; +void ToF::setNames() { + tofQName = getName() + "_tof"; +} + +void ToF::setXinXout(std::shared_ptr pipeline) { + if(ph->getParam("i_publish_topic")) { + camNode->raw.link(tofNode->input); + bool align = boardSocket == dai::CameraBoardSocket::CAM_A; + std::function tofLinkChoice; + if(align) { + tofLinkChoice = [&](auto input) { tofNode->depth.link(input); }; + } else { + alignNode = pipeline->create(); + tofNode->depth.link(alignNode->input); + tofLinkChoice = [&](auto input) { alignNode->outputAligned.link(input); }; + } + utils::VideoEncoderConfig encConfig; + encConfig.profile = static_cast(ph->getParam("i_low_bandwidth_profile")); + encConfig.bitrate = ph->getParam("i_low_bandwidth_bitrate"); + encConfig.frameFreq = ph->getParam("i_low_bandwidth_frame_freq"); + encConfig.quality = ph->getParam("i_low_bandwidth_quality"); + encConfig.enabled = ph->getParam("i_low_bandwidth"); + + tofPub = setupOutput(pipeline, tofQName, tofLinkChoice, ph->getParam("i_synced"), encConfig); + } +} + +void ToF::setupQueues(std::shared_ptr device) { + if(ph->getParam("i_publish_topic")) { + auto tfPrefix = getOpticalTFPrefix(getSocketName(boardSocket)); + + utils::ImgConverterConfig convConfig; + convConfig.tfPrefix = tfPrefix; + convConfig.getBaseDeviceTimestamp = ph->getParam("i_get_base_device_timestamp"); + convConfig.updateROSBaseTimeOnRosMsg = ph->getParam("i_update_ros_base_time_on_ros_msg"); + convConfig.lowBandwidth = ph->getParam("i_low_bandwidth"); + convConfig.encoding = dai::RawImgFrame::Type::RAW8; + convConfig.addExposureOffset = ph->getParam("i_add_exposure_offset"); + convConfig.expOffset = static_cast(ph->getParam("i_exposure_offset")); + convConfig.reverseSocketOrder = ph->getParam("i_reverse_stereo_socket_order"); + + utils::ImgPublisherConfig pubConfig; + pubConfig.daiNodeName = getName(); + pubConfig.topicName = "~/" + getName(); + pubConfig.lazyPub = ph->getParam("i_enable_lazy_publisher"); + pubConfig.socket = static_cast(ph->getParam("i_board_socket_id")); + pubConfig.calibrationFile = ph->getParam("i_calibration_file"); + pubConfig.rectified = false; + pubConfig.width = ph->getParam("i_width"); + pubConfig.height = ph->getParam("i_height"); + pubConfig.maxQSize = ph->getParam("i_max_q_size"); + + tofPub->setup(device, convConfig, pubConfig); + } +} +void ToF::closeQueues() { + if(ph->getParam("i_publish_topic")) { + tofPub->closeQueue(); + } +} + +dai::Node::Input ToF::getInput(int /*linkType*/) { + return alignNode->inputAlignTo; +} + +void ToF::link(dai::Node::Input in, int /*linkType*/) { + tofNode->depth.link(in); +} + +std::vector> ToF::getPublishers() { + std::vector> pubs; + if(ph->getParam("i_publish_topic") && ph->getParam("i_synced")) { + pubs.push_back(tofPub); + } + return pubs; +} + +void ToF::updateParams(const std::vector& params) { + ph->setRuntimeParams(params); +} + +} // namespace dai_nodes +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/src/dai_nodes/sys_logger.cpp b/depthai_ros_driver/src/dai_nodes/sys_logger.cpp index d6362822..cf4dd7b3 100644 --- a/depthai_ros_driver/src/dai_nodes/sys_logger.cpp +++ b/depthai_ros_driver/src/dai_nodes/sys_logger.cpp @@ -10,7 +10,8 @@ namespace depthai_ros_driver { namespace dai_nodes { -SysLogger::SysLogger(const std::string& daiNodeName, rclcpp::Node* node, std::shared_ptr pipeline) : BaseNode(daiNodeName, node, pipeline) { +SysLogger::SysLogger(const std::string& daiNodeName, std::shared_ptr node, std::shared_ptr pipeline) + : BaseNode(daiNodeName, node, pipeline) { RCLCPP_DEBUG(node->get_logger(), "Creating node %s", daiNodeName.c_str()); setNames(); sysNode = pipeline->create(); diff --git a/depthai_ros_driver/src/param_handlers/camera_param_handler.cpp b/depthai_ros_driver/src/param_handlers/camera_param_handler.cpp index 097b57f9..d043340e 100644 --- a/depthai_ros_driver/src/param_handlers/camera_param_handler.cpp +++ b/depthai_ros_driver/src/param_handlers/camera_param_handler.cpp @@ -7,7 +7,7 @@ namespace depthai_ros_driver { namespace param_handlers { -CameraParamHandler::CameraParamHandler(rclcpp::Node* node, const std::string& name) : BaseParamHandler(node, name) { +CameraParamHandler::CameraParamHandler(std::shared_ptr node, const std::string& name) : BaseParamHandler(node, name) { usbSpeedMap = { {"LOW", dai::UsbSpeed::LOW}, {"FULL", dai::UsbSpeed::FULL}, @@ -25,6 +25,8 @@ void CameraParamHandler::declareParams() { declareAndLogParam("i_pipeline_type", "RGBD"); declareAndLogParam("i_nn_type", "spatial"); declareAndLogParam("i_enable_imu", true); + declareAndLogParam("i_enable_diagnostics", true); + declareAndLogParam("i_enable_sync", true); declareAndLogParam("i_enable_ir", true); declareAndLogParam("i_usb_speed", "SUPER_PLUS"); declareAndLogParam("i_mx_id", ""); @@ -36,6 +38,7 @@ void CameraParamHandler::declareParams() { declareAndLogParam("i_laser_dot_brightness", 800, getRangedIntDescriptor(0, 1200)); declareAndLogParam("i_floodlight_brightness", 0, getRangedIntDescriptor(0, 1500)); declareAndLogParam("i_restart_on_diagnostics_error", false); + declareAndLogParam("i_rs_compat", false); declareAndLogParam("i_publish_tf_from_calibration", false); declareAndLogParam("i_tf_camera_name", getROSNode()->get_name()); @@ -57,4 +60,4 @@ dai::CameraControl CameraParamHandler::setRuntimeParams(const std::vector node, const std::string& name) : BaseParamHandler(node, name) {} FeatureTrackerParamHandler::~FeatureTrackerParamHandler() = default; void FeatureTrackerParamHandler::declareParams(std::shared_ptr featureTracker) { declareAndLogParam("i_get_base_device_timestamp", false); @@ -27,4 +27,4 @@ dai::CameraControl FeatureTrackerParamHandler::setRuntimeParams(const std::vecto return ctrl; } } // namespace param_handlers -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/src/param_handlers/imu_param_handler.cpp b/depthai_ros_driver/src/param_handlers/imu_param_handler.cpp index f0291557..68da77b7 100644 --- a/depthai_ros_driver/src/param_handlers/imu_param_handler.cpp +++ b/depthai_ros_driver/src/param_handlers/imu_param_handler.cpp @@ -8,9 +8,7 @@ namespace depthai_ros_driver { namespace param_handlers { -ImuParamHandler::ImuParamHandler(rclcpp::Node* node, const std::string& name) : BaseParamHandler(node, name) {} -ImuParamHandler::~ImuParamHandler() = default; -void ImuParamHandler::declareParams(std::shared_ptr imu, const std::string& imuType) { +ImuParamHandler::ImuParamHandler(std::shared_ptr node, const std::string& name) : BaseParamHandler(node, name) { imuSyncMethodMap = { {"COPY", dai::ros::ImuSyncMethod::COPY}, {"LINEAR_INTERPOLATE_GYRO", dai::ros::ImuSyncMethod::LINEAR_INTERPOLATE_GYRO}, @@ -23,7 +21,11 @@ void ImuParamHandler::declareParams(std::shared_ptr imu, const s {"GEOMAGNETIC_ROTATION_VECTOR", dai::IMUSensor::GEOMAGNETIC_ROTATION_VECTOR}, {"ARVR_STABILIZED_ROTATION_VECTOR", dai::IMUSensor::ARVR_STABILIZED_ROTATION_VECTOR}, {"ARVR_STABILIZED_GAME_ROTATION_VECTOR", dai::IMUSensor::ARVR_STABILIZED_GAME_ROTATION_VECTOR}}; +} +ImuParamHandler::~ImuParamHandler() = default; +void ImuParamHandler::declareParams(std::shared_ptr imu, const std::string& imuType) { declareAndLogParam("i_get_base_device_timestamp", false); + declareAndLogParam("i_max_q_size", 8); auto messageType = declareAndLogParam("i_message_type", "IMU"); declareAndLogParam("i_sync_method", "LINEAR_INTERPOLATE_ACCEL"); declareAndLogParam("i_acc_cov", 0.0); @@ -65,4 +67,4 @@ dai::CameraControl ImuParamHandler::setRuntimeParams(const std::vector node, const std::string& name, const dai::CameraBoardSocket& socket) + : BaseParamHandler(node, name) { nnFamilyMap = { {"segmentation", nn::NNFamily::Segmentation}, {"mobilenet", nn::NNFamily::Mobilenet}, @@ -148,4 +149,4 @@ dai::CameraControl NNParamHandler::setRuntimeParams(const std::vector node, const std::string& name) : BaseParamHandler(node, name) {} +PipelineGenParamHandler::~PipelineGenParamHandler() = default; + +void PipelineGenParamHandler::declareParams() { + declareAndLogParam("i_enable_imu", true); + declareAndLogParam("i_enable_diagnostics", true); + declareAndLogParam("i_enable_sync", false); +} +dai::CameraControl PipelineGenParamHandler::setRuntimeParams(const std::vector& /*params*/) { + dai::CameraControl ctrl; + return ctrl; +} +} // namespace param_handlers +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/src/param_handlers/sensor_param_handler.cpp b/depthai_ros_driver/src/param_handlers/sensor_param_handler.cpp index 5b168899..23cc3866 100644 --- a/depthai_ros_driver/src/param_handlers/sensor_param_handler.cpp +++ b/depthai_ros_driver/src/param_handlers/sensor_param_handler.cpp @@ -11,7 +11,8 @@ namespace depthai_ros_driver { namespace param_handlers { -SensorParamHandler::SensorParamHandler(rclcpp::Node* node, const std::string& name, dai::CameraBoardSocket socket) : BaseParamHandler(node, name) { +SensorParamHandler::SensorParamHandler(std::shared_ptr node, const std::string& name, dai::CameraBoardSocket socket) + : BaseParamHandler(node, name) { declareCommonParams(socket); }; SensorParamHandler::~SensorParamHandler() = default; @@ -19,7 +20,11 @@ SensorParamHandler::~SensorParamHandler() = default; void SensorParamHandler::declareCommonParams(dai::CameraBoardSocket socket) { declareAndLogParam("i_max_q_size", 30); declareAndLogParam("i_low_bandwidth", false); + declareAndLogParam("i_low_bandwidth_profile", 4); + declareAndLogParam("i_low_bandwidth_frame_freq", 30); + declareAndLogParam("i_low_bandwidth_bitrate", 0); declareAndLogParam("i_low_bandwidth_quality", 50); + declareAndLogParam("i_low_bandwidth_ffmpeg_encoder", "libx264"); declareAndLogParam("i_calibration_file", ""); declareAndLogParam("i_simulate_from_topic", false); declareAndLogParam("i_simulated_topic_name", ""); @@ -33,6 +38,8 @@ void SensorParamHandler::declareCommonParams(dai::CameraBoardSocket socket) { declareAndLogParam("i_add_exposure_offset", false); declareAndLogParam("i_exposure_offset", 0); declareAndLogParam("i_reverse_stereo_socket_order", false); + declareAndLogParam("i_synced", false); + declareAndLogParam("i_publish_compressed", false); } void SensorParamHandler::declareParams(std::shared_ptr monoCam, dai_nodes::sensor_helpers::ImageSensor sensor, bool publish) { @@ -214,4 +221,4 @@ dai::CameraControl SensorParamHandler::setRuntimeParams(const std::vector node, const std::string& name) : BaseParamHandler(node, name) { depthPresetMap = { {"HIGH_ACCURACY", dai::node::StereoDepth::PresetMode::HIGH_ACCURACY}, {"HIGH_DENSITY", dai::node::StereoDepth::PresetMode::HIGH_DENSITY}, @@ -56,6 +56,10 @@ void StereoParamHandler::declareParams(std::shared_ptr s declareAndLogParam("i_max_q_size", 30); declareAndLogParam("i_low_bandwidth", false); declareAndLogParam("i_low_bandwidth_quality", 50); + declareAndLogParam("i_low_bandwidth_profile", 4); + declareAndLogParam("i_low_bandwidth_frame_freq", 30); + declareAndLogParam("i_low_bandwidth_bitrate", 0); + declareAndLogParam("i_low_bandwidth_ffmpeg_encoder", "libx264"); declareAndLogParam("i_output_disparity", false); declareAndLogParam("i_get_base_device_timestamp", false); declareAndLogParam("i_update_ros_base_time_on_ros_msg", false); @@ -64,30 +68,46 @@ void StereoParamHandler::declareParams(std::shared_ptr s declareAndLogParam("i_exposure_offset", 0); declareAndLogParam("i_enable_lazy_publisher", true); declareAndLogParam("i_reverse_stereo_socket_order", false); + declareAndLogParam("i_publish_compressed", false); + declareAndLogParam("i_calibration_file", ""); declareAndLogParam("i_publish_synced_rect_pair", false); - declareAndLogParam("i_publish_left_rect", false); + declareAndLogParam("i_left_rect_publish_topic", false); declareAndLogParam("i_left_rect_low_bandwidth", false); + declareAndLogParam("i_left_rect_low_bandwidth_profile", 4); + declareAndLogParam("i_left_rect_low_bandwidth_frame_freq", 30); + declareAndLogParam("i_left_rect_low_bandwidth_bitrate", 0); declareAndLogParam("i_left_rect_low_bandwidth_quality", 50); + declareAndLogParam("i_left_rect_low_bandwidth_ffmpeg_encoder", "libx264"); declareAndLogParam("i_left_rect_add_exposure_offset", false); declareAndLogParam("i_left_rect_exposure_offset", 0); declareAndLogParam("i_left_rect_enable_feature_tracker", false); + declareAndLogParam("i_left_rect_synced", true); + declareAndLogParam("i_left_rect_publish_compressed", false); - declareAndLogParam("i_publish_right_rect", false); + declareAndLogParam("i_right_rect_publish_topic", false); declareAndLogParam("i_right_rect_low_bandwidth", false); declareAndLogParam("i_right_rect_low_bandwidth_quality", 50); + declareAndLogParam("i_right_rect_low_bandwidth_profile", 4); + declareAndLogParam("i_right_rect_low_bandwidth_frame_freq", 30); + declareAndLogParam("i_right_rect_low_bandwidth_bitrate", 0); + declareAndLogParam("i_right_rect_low_bandwidth_ffmpeg_encoder", "libx264"); declareAndLogParam("i_right_rect_enable_feature_tracker", false); declareAndLogParam("i_right_rect_add_exposure_offset", false); declareAndLogParam("i_right_rect_exposure_offset", 0); + declareAndLogParam("i_right_rect_synced", true); + declareAndLogParam("i_right_rect_publish_compressed", false); + declareAndLogParam("i_enable_spatial_nn", false); declareAndLogParam("i_spatial_nn_source", "right"); + declareAndLogParam("i_synced", false); stereo->setLeftRightCheck(declareAndLogParam("i_lr_check", true)); int width = 1280; int height = 720; std::string socketName; if(declareAndLogParam("i_align_depth", true)) { - socketName = utils::getSocketName(alignSocket); + socketName = getSocketName(alignSocket); try { width = getROSNode()->get_parameter(socketName + ".i_width").as_int(); height = getROSNode()->get_parameter(socketName + ".i_height").as_int(); @@ -116,7 +136,7 @@ void StereoParamHandler::declareParams(std::shared_ptr s stereo->initialConfig.setLeftRightCheckThreshold(declareAndLogParam("i_lrc_threshold", 10)); stereo->initialConfig.setMedianFilter(static_cast(declareAndLogParam("i_depth_filter_size", 5))); stereo->initialConfig.setConfidenceThreshold(declareAndLogParam("i_stereo_conf_threshold", 240)); - if(declareAndLogParam("i_subpixel", false)) { + if(declareAndLogParam("i_subpixel", true)) { stereo->initialConfig.setSubpixel(true); stereo->initialConfig.setSubpixelFractionalBits(declareAndLogParam("i_subpixel_fractional_bits", 3)); } diff --git a/depthai_ros_driver/src/param_handlers/sync_param_handler.cpp b/depthai_ros_driver/src/param_handlers/sync_param_handler.cpp new file mode 100644 index 00000000..ce1259d9 --- /dev/null +++ b/depthai_ros_driver/src/param_handlers/sync_param_handler.cpp @@ -0,0 +1,20 @@ +#include "depthai_ros_driver/param_handlers/sync_param_handler.hpp" + +#include "depthai/pipeline/node/Sync.hpp" +#include "rclcpp/node.hpp" + +namespace depthai_ros_driver { +namespace param_handlers { +SyncParamHandler::SyncParamHandler(std::shared_ptr node, const std::string& name) : BaseParamHandler(node, name) {} +SyncParamHandler::~SyncParamHandler() = default; +void SyncParamHandler::declareParams(std::shared_ptr sync) { + sync->setSyncThreshold(std::chrono::milliseconds(declareAndLogParam("sync_threshold", 10))); + sync->setSyncAttempts(declareAndLogParam("sync_attempts", 10)); +} + +dai::CameraControl SyncParamHandler::setRuntimeParams(const std::vector& /*params*/) { + dai::CameraControl ctrl; + return ctrl; +} +} // namespace param_handlers +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/src/param_handlers/tof_param_handler.cpp b/depthai_ros_driver/src/param_handlers/tof_param_handler.cpp new file mode 100644 index 00000000..1e43852d --- /dev/null +++ b/depthai_ros_driver/src/param_handlers/tof_param_handler.cpp @@ -0,0 +1,73 @@ +#include "depthai_ros_driver/param_handlers/tof_param_handler.hpp" + +#include "depthai-shared/common/CameraBoardSocket.hpp" +#include "depthai/pipeline/datatype/ToFConfig.hpp" +#include "depthai/pipeline/node/Camera.hpp" +#include "depthai/pipeline/node/ToF.hpp" +#include "depthai_ros_driver/utils.hpp" +#include "rclcpp/logger.hpp" +#include "rclcpp/node.hpp" + +namespace depthai_ros_driver { +namespace param_handlers { +ToFParamHandler::ToFParamHandler(std::shared_ptr node, const std::string& name) : BaseParamHandler(node, name) { + medianFilterMap = {{"MEDIAN_OFF", dai::MedianFilter::MEDIAN_OFF}, + {"KERNEL_3x3", dai::MedianFilter::KERNEL_3x3}, + {"KERNEL_5x5", dai::MedianFilter::KERNEL_5x5}, + {"KERNEL_7x7", dai::MedianFilter::KERNEL_7x7}}; +} +ToFParamHandler::~ToFParamHandler() = default; +void ToFParamHandler::declareParams(std::shared_ptr cam, std::shared_ptr tof) { + declareAndLogParam("i_publish_topic", true); + declareAndLogParam("i_synced", true); + declareAndLogParam("i_low_bandwidth", false); + declareAndLogParam("i_low_bandwidth_profile", 4); + declareAndLogParam("i_low_bandwidth_bitrate", 0); + declareAndLogParam("i_low_bandwidth_frame_freq", 30); + declareAndLogParam("i_low_bandwidth_quality", 80); + declareAndLogParam("i_get_base_device_timestamp", false); + declareAndLogParam("i_update_ros_base_time_on_ros_msg", false); + declareAndLogParam("i_add_exposure_offset", false); + declareAndLogParam("i_exposure_offset", 0); + declareAndLogParam("i_enable_lazy_publisher", false); + declareAndLogParam("i_reverse_stereo_socket_order", false); + declareAndLogParam("i_calibration_file", ""); + declareAndLogParam("i_max_q_size", 8); + + int socket = declareAndLogParam("i_board_socket_id", static_cast(dai::CameraBoardSocket::CAM_A)); + cam->setBoardSocket(static_cast(socket)); + cam->setSize(declareAndLogParam("i_width", 640), declareAndLogParam("i_height", 480)); + cam->setFps(declareAndLogParam("i_fps", 30)); + auto tofConf = tof->initialConfig.get(); + if(declareAndLogParam("i_enable_optical_correction", false)) { + tofConf.enableOpticalCorrection = true; + } + if(declareAndLogParam("i_enable_fppn_correction", false)) { + tofConf.enableFPPNCorrection = true; + } + if(declareAndLogParam("i_enable_temperature_correction", false)) { + tofConf.enableTemperatureCorrection = true; + } + if(declareAndLogParam("i_enable_wiggle_correction", false)) { + tofConf.enableWiggleCorrection = true; + } + if(declareAndLogParam("i_enable_phase_unwrapping", false)) { + tofConf.enablePhaseUnwrapping = true; + } + + tofConf.enablePhaseShuffleTemporalFilter = declareAndLogParam("i_enable_phase_shuffle_temporal_filter", true); + tofConf.phaseUnwrappingLevel = declareAndLogParam("i_phase_unwrapping_level", 4); + tofConf.phaseUnwrapErrorThreshold = declareAndLogParam("i_phase_unwrap_error_threshold", 100); + std::vector medianSettings = { + dai::MedianFilter::MEDIAN_OFF, dai::MedianFilter::KERNEL_3x3, dai::MedianFilter::KERNEL_5x5, dai::MedianFilter::KERNEL_7x7}; + tofConf.median = utils::getValFromMap(declareAndLogParam("i_median_filter", "MEDIAN_OFF"), medianFilterMap); + + tof->initialConfig.set(tofConf); +} + +dai::CameraControl ToFParamHandler::setRuntimeParams(const std::vector& /*params*/) { + dai::CameraControl ctrl; + return ctrl; +} +} // namespace param_handlers +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/src/pipeline/base_types.cpp b/depthai_ros_driver/src/pipeline/base_types.cpp index ccc7f537..ced9ed8d 100644 --- a/depthai_ros_driver/src/pipeline/base_types.cpp +++ b/depthai_ros_driver/src/pipeline/base_types.cpp @@ -9,7 +9,8 @@ #include "depthai_ros_driver/dai_nodes/sensors/imu.hpp" #include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp" #include "depthai_ros_driver/dai_nodes/sensors/sensor_wrapper.hpp" -#include "depthai_ros_driver/dai_nodes/stereo.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/stereo.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/tof.hpp" #include "depthai_ros_driver/pipeline/base_pipeline.hpp" #include "depthai_ros_driver/utils.hpp" #include "rclcpp/node.hpp" @@ -17,15 +18,16 @@ namespace depthai_ros_driver { namespace pipeline_gen { -std::vector> RGB::createPipeline(rclcpp::Node* node, +std::vector> RGB::createPipeline(std::shared_ptr node, std::shared_ptr device, std::shared_ptr pipeline, const std::string& nnType) { + using namespace dai_nodes::sensor_helpers; std::string nTypeUpCase = utils::getUpperCaseStr(nnType); auto nType = utils::getValFromMap(nTypeUpCase, nnTypeMap); std::vector> daiNodes; - auto rgb = std::make_unique("rgb", node, pipeline, device, dai::CameraBoardSocket::CAM_A); + auto rgb = std::make_unique(getNodeName(node, NodeNameEnum::RGB), node, pipeline, device, dai::CameraBoardSocket::CAM_A); switch(nType) { case NNType::None: break; @@ -43,16 +45,17 @@ std::vector> RGB::createPipeline(rclcpp::No daiNodes.push_back(std::move(rgb)); return daiNodes; } -std::vector> RGBD::createPipeline(rclcpp::Node* node, +std::vector> RGBD::createPipeline(std::shared_ptr node, std::shared_ptr device, std::shared_ptr pipeline, const std::string& nnType) { + using namespace dai_nodes::sensor_helpers; std::string nTypeUpCase = utils::getUpperCaseStr(nnType); auto nType = utils::getValFromMap(nTypeUpCase, nnTypeMap); std::vector> daiNodes; - auto rgb = std::make_unique("rgb", node, pipeline, device, dai::CameraBoardSocket::CAM_A); - auto stereo = std::make_unique("stereo", node, pipeline, device); + auto rgb = std::make_unique(getNodeName(node, NodeNameEnum::RGB), node, pipeline, device, dai::CameraBoardSocket::CAM_A); + auto stereo = std::make_unique(getNodeName(node, NodeNameEnum::Stereo), node, pipeline, device); switch(nType) { case NNType::None: break; @@ -73,17 +76,18 @@ std::vector> RGBD::createPipeline(rclcpp::N daiNodes.push_back(std::move(stereo)); return daiNodes; } -std::vector> RGBStereo::createPipeline(rclcpp::Node* node, +std::vector> RGBStereo::createPipeline(std::shared_ptr node, std::shared_ptr device, std::shared_ptr pipeline, const std::string& nnType) { + using namespace dai_nodes::sensor_helpers; std::string nTypeUpCase = utils::getUpperCaseStr(nnType); auto nType = utils::getValFromMap(nTypeUpCase, nnTypeMap); std::vector> daiNodes; - auto rgb = std::make_unique("rgb", node, pipeline, device, dai::CameraBoardSocket::CAM_A); - auto left = std::make_unique("left", node, pipeline, device, dai::CameraBoardSocket::CAM_B); - auto right = std::make_unique("right", node, pipeline, device, dai::CameraBoardSocket::CAM_C); + auto rgb = std::make_unique(getNodeName(node, NodeNameEnum::RGB), node, pipeline, device, dai::CameraBoardSocket::CAM_A); + auto left = std::make_unique(getNodeName(node, NodeNameEnum::Left), node, pipeline, device, dai::CameraBoardSocket::CAM_B); + auto right = std::make_unique(getNodeName(node, NodeNameEnum::Right), node, pipeline, device, dai::CameraBoardSocket::CAM_C); switch(nType) { case NNType::None: break; @@ -103,39 +107,107 @@ std::vector> RGBStereo::createPipeline(rclc daiNodes.push_back(std::move(right)); return daiNodes; } -std::vector> Stereo::createPipeline(rclcpp::Node* node, +std::vector> Stereo::createPipeline(std::shared_ptr node, std::shared_ptr device, std::shared_ptr pipeline, const std::string& /*nnType*/) { + using namespace dai_nodes::sensor_helpers; std::vector> daiNodes; - auto left = std::make_unique("left", node, pipeline, device, dai::CameraBoardSocket::CAM_B); - auto right = std::make_unique("right", node, pipeline, device, dai::CameraBoardSocket::CAM_C); + auto left = std::make_unique(getNodeName(node, NodeNameEnum::Left), node, pipeline, device, dai::CameraBoardSocket::CAM_B); + auto right = std::make_unique(getNodeName(node, NodeNameEnum::Right), node, pipeline, device, dai::CameraBoardSocket::CAM_C); daiNodes.push_back(std::move(left)); daiNodes.push_back(std::move(right)); return daiNodes; } -std::vector> Depth::createPipeline(rclcpp::Node* node, +std::vector> Depth::createPipeline(std::shared_ptr node, std::shared_ptr device, std::shared_ptr pipeline, const std::string& /*nnType*/) { + using namespace dai_nodes::sensor_helpers; std::vector> daiNodes; - auto stereo = std::make_unique("stereo", node, pipeline, device); + auto stereo = std::make_unique(getNodeName(node, NodeNameEnum::Stereo), node, pipeline, device); daiNodes.push_back(std::move(stereo)); return daiNodes; } -std::vector> CamArray::createPipeline(rclcpp::Node* node, +std::vector> CamArray::createPipeline(std::shared_ptr node, std::shared_ptr device, std::shared_ptr pipeline, const std::string& /*nnType*/) { + using namespace dai_nodes::sensor_helpers; std::vector> daiNodes; for(auto& feature : device->getConnectedCameraFeatures()) { - auto name = utils::getSocketName(feature.socket); + auto name = getSocketName(node, feature.socket); auto daiNode = std::make_unique(name, node, pipeline, device, feature.socket); daiNodes.push_back(std::move(daiNode)); }; return daiNodes; } + +std::vector> DepthToF::createPipeline(std::shared_ptr node, + std::shared_ptr device, + std::shared_ptr pipeline, + const std::string& /*nnType*/) { + std::vector> daiNodes; + auto tof = std::make_unique("tof", node, pipeline); + auto stereo = std::make_unique("stereo", node, pipeline, device); + daiNodes.push_back(std::move(tof)); + daiNodes.push_back(std::move(stereo)); + return daiNodes; +} +std::vector> StereoToF::createPipeline(std::shared_ptr node, + std::shared_ptr device, + std::shared_ptr pipeline, + const std::string& /*nnType*/) { + std::vector> daiNodes; + auto tof = std::make_unique("tof", node, pipeline, dai::CameraBoardSocket::CAM_C); + auto left = std::make_unique("left", node, pipeline, device, dai::CameraBoardSocket::CAM_B); + auto right = std::make_unique("right", node, pipeline, device, dai::CameraBoardSocket::CAM_C); + right->link(tof->getInput()); + daiNodes.push_back(std::move(left)); + daiNodes.push_back(std::move(right)); + daiNodes.push_back(std::move(tof)); + return daiNodes; +} + +std::vector> ToF::createPipeline(std::shared_ptr node, + std::shared_ptr /*device*/, + std::shared_ptr pipeline, + const std::string& /*nnType*/) { + std::vector> daiNodes; + auto tof = std::make_unique("tof", node, pipeline); + daiNodes.push_back(std::move(tof)); + return daiNodes; +} +std::vector> RGBToF::createPipeline(std::shared_ptr node, + std::shared_ptr device, + std::shared_ptr pipeline, + const std::string& nnType) { + std::string nTypeUpCase = utils::getUpperCaseStr(nnType); + auto nType = utils::getValFromMap(nTypeUpCase, nnTypeMap); + + std::vector> daiNodes; + auto rgb = std::make_unique("right", node, pipeline, device, dai::CameraBoardSocket::CAM_C); + auto tof = std::make_unique("tof", node, pipeline, dai::CameraBoardSocket::CAM_C); + rgb->link(tof->getInput()); + switch(nType) { + case NNType::None: + break; + case NNType::RGB: { + auto nn = createNN(node, pipeline, *rgb); + daiNodes.push_back(std::move(nn)); + break; + } + case NNType::Spatial: { + RCLCPP_WARN(node->get_logger(), "Spatial NN selected, but configuration is RGBToF. Please change camera.i_nn_type parameter to RGB."); + } + default: + break; + } + daiNodes.push_back(std::move(rgb)); + daiNodes.push_back(std::move(tof)); + return daiNodes; +} } // namespace pipeline_gen } // namespace depthai_ros_driver @@ -146,4 +218,8 @@ PLUGINLIB_EXPORT_CLASS(depthai_ros_driver::pipeline_gen::RGBD, depthai_ros_drive PLUGINLIB_EXPORT_CLASS(depthai_ros_driver::pipeline_gen::RGBStereo, depthai_ros_driver::pipeline_gen::BasePipeline) PLUGINLIB_EXPORT_CLASS(depthai_ros_driver::pipeline_gen::Stereo, depthai_ros_driver::pipeline_gen::BasePipeline) PLUGINLIB_EXPORT_CLASS(depthai_ros_driver::pipeline_gen::Depth, depthai_ros_driver::pipeline_gen::BasePipeline) -PLUGINLIB_EXPORT_CLASS(depthai_ros_driver::pipeline_gen::CamArray, depthai_ros_driver::pipeline_gen::BasePipeline) \ No newline at end of file +PLUGINLIB_EXPORT_CLASS(depthai_ros_driver::pipeline_gen::CamArray, depthai_ros_driver::pipeline_gen::BasePipeline) +PLUGINLIB_EXPORT_CLASS(depthai_ros_driver::pipeline_gen::StereoToF, depthai_ros_driver::pipeline_gen::BasePipeline) +PLUGINLIB_EXPORT_CLASS(depthai_ros_driver::pipeline_gen::DepthToF, depthai_ros_driver::pipeline_gen::BasePipeline) +PLUGINLIB_EXPORT_CLASS(depthai_ros_driver::pipeline_gen::ToF, depthai_ros_driver::pipeline_gen::BasePipeline) +PLUGINLIB_EXPORT_CLASS(depthai_ros_driver::pipeline_gen::RGBToF, depthai_ros_driver::pipeline_gen::BasePipeline) diff --git a/depthai_ros_driver/src/pipeline/pipeline_generator.cpp b/depthai_ros_driver/src/pipeline/pipeline_generator.cpp index b2bfe69d..330a91c1 100644 --- a/depthai_ros_driver/src/pipeline/pipeline_generator.cpp +++ b/depthai_ros_driver/src/pipeline/pipeline_generator.cpp @@ -2,8 +2,11 @@ #include "depthai/device/Device.hpp" #include "depthai/pipeline/Pipeline.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/img_pub.hpp" #include "depthai_ros_driver/dai_nodes/sensors/imu.hpp" +#include "depthai_ros_driver/dai_nodes/sensors/sync.hpp" #include "depthai_ros_driver/dai_nodes/sys_logger.hpp" +#include "depthai_ros_driver/param_handlers/pipeline_gen_param_handler.hpp" #include "depthai_ros_driver/pipeline/base_pipeline.hpp" #include "depthai_ros_driver/utils.hpp" #include "pluginlib/class_loader.hpp" @@ -11,19 +14,44 @@ namespace depthai_ros_driver { namespace pipeline_gen { -std::vector> PipelineGenerator::createPipeline(rclcpp::Node* node, +PipelineGenerator::PipelineGenerator() { + pluginTypeMap = {{"RGB", "depthai_ros_driver::pipeline_gen::RGB"}, + {"RGBD", "depthai_ros_driver::pipeline_gen::RGBD"}, + {"RGBSTEREO", "depthai_ros_driver::pipeline_gen::RGBStereo"}, + {"STEREO", "depthai_ros_driver::pipeline_gen::Stereo"}, + {"DEPTH", "depthai_ros_driver::pipeline_gen::Depth"}, + {"CAMARRAY", "depthai_ros_driver::pipeline_gen::CamArray"}, + {"DEPTHTOF", "depthai_ros_driver::pipeline_gen::DepthToF"}, + {"STEREOTOF", "depthai_ros_driver::pipeline_gen::StereoToF"}, + {"TOF", "depthai_ros_driver::pipeline_gen::ToF"}, + {"RGBTOF", "depthai_ros_driver::pipeline_gen::RGBToF"}}; + pipelineTypeMap = {{"RGB", PipelineType::RGB}, + {"RGBD", PipelineType::RGBD}, + {"RGBSTEREO", PipelineType::RGBStereo}, + {"STEREO", PipelineType::Stereo}, + {"DEPTH", PipelineType::Depth}, + {"CAMARRAY", PipelineType::CamArray}, + {"DEPTHTOF", PipelineType::DepthToF}, + {"STEREOTOF", PipelineType::StereoToF}, + {"TOF", PipelineType::ToF}, + {"RGBTOF", PipelineType::RGBToF}}; +} + +PipelineGenerator::~PipelineGenerator() = default; +std::vector> PipelineGenerator::createPipeline(std::shared_ptr node, std::shared_ptr device, std::shared_ptr pipeline, const std::string& pipelineType, - const std::string& nnType, - bool enableImu) { + const std::string& nnType) { + ph = std::make_unique(node, "pipeline_gen"); + ph->declareParams(); RCLCPP_INFO(node->get_logger(), "Pipeline type: %s", pipelineType.c_str()); std::string pluginType = pipelineType; std::vector> daiNodes; // Check if one of the default types. try { std::string pTypeUpCase = utils::getUpperCaseStr(pipelineType); - auto pTypeValidated = validatePipeline(node, pTypeUpCase, device->getCameraSensorNames().size()); + auto pTypeValidated = validatePipeline(node, pTypeUpCase, device->getCameraSensorNames().size(), device->getDeviceName()); pluginType = utils::getValFromMap(pTypeValidated, pluginTypeMap); } catch(std::out_of_range& e) { RCLCPP_DEBUG(node->get_logger(), "Pipeline type [%s] not found in base types, trying to load as a plugin.", pipelineType.c_str()); @@ -35,24 +63,41 @@ std::vector> PipelineGenerator::createPipel daiNodes = pipelinePlugin->createPipeline(node, device, pipeline, nnType); } catch(pluginlib::PluginlibException& ex) { RCLCPP_ERROR(node->get_logger(), "The plugin failed to load for some reason. Error: %s\n", ex.what()); + throw std::runtime_error("Plugin loading failed."); } - if(enableImu) { - if(device->getConnectedIMU() == "NONE" || device->getConnectedIMU().empty()){ + if(ph->getParam("i_enable_imu")) { + if(device->getConnectedIMU() == "NONE" || device->getConnectedIMU().empty()) { RCLCPP_WARN(node->get_logger(), "IMU enabled but not available!"); } else { auto imu = std::make_unique("imu", node, pipeline, device); daiNodes.push_back(std::move(imu)); } } - auto sysLogger = std::make_unique("sys_logger", node, pipeline); - daiNodes.push_back(std::move(sysLogger)); + if(ph->getParam("i_enable_diagnostics")) { + auto sysLogger = std::make_unique("sys_logger", node, pipeline); + daiNodes.push_back(std::move(sysLogger)); + } + if(ph->getParam("i_enable_sync")) { + auto sync = std::make_unique("sync", node, pipeline); + for(auto& daiNode : daiNodes) { + auto pubs = daiNode->getPublishers(); + RCLCPP_DEBUG(node->get_logger(), "Number of synced publishers found for %s: %zu", daiNode->getName().c_str(), pubs.size()); + if(!pubs.empty()) { + sync->addPublishers(pubs); + } + } + daiNodes.push_back(std::move(sync)); + } RCLCPP_INFO(node->get_logger(), "Finished setting up pipeline."); return daiNodes; } -std::string PipelineGenerator::validatePipeline(rclcpp::Node* node, const std::string& typeStr, int sensorNum) { +std::string PipelineGenerator::validatePipeline(std::shared_ptr node, const std::string& typeStr, int sensorNum, const std::string& deviceName) { auto pType = utils::getValFromMap(typeStr, pipelineTypeMap); + if(deviceName == "OAK-D-SR-POE") { + RCLCPP_WARN(node->get_logger(), "OAK-D-SR-POE device detected. Pipeline types other than StereoToF/ToF/RGBToF might not work without reconfiguration."); + } if(sensorNum == 1) { if(pType != PipelineType::RGB) { RCLCPP_ERROR(node->get_logger(), "Invalid pipeline chosen for camera as it has only one sensor. Switching to RGB."); @@ -67,4 +112,4 @@ std::string PipelineGenerator::validatePipeline(rclcpp::Node* node, const std::s return typeStr; } } // namespace pipeline_gen -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_driver/src/utils.cpp b/depthai_ros_driver/src/utils.cpp index d4813e55..10a8a6b0 100644 --- a/depthai_ros_driver/src/utils.cpp +++ b/depthai_ros_driver/src/utils.cpp @@ -1,8 +1,7 @@ #include "depthai_ros_driver/utils.hpp" -#include "depthai-shared/common/CameraBoardSocket.hpp" -#include "depthai-shared/common/CameraFeatures.hpp" -#include "depthai_ros_driver/dai_nodes/sensors/sensor_helpers.hpp" +#include "depthai/pipeline/Pipeline.hpp" +#include "depthai/pipeline/node/XLinkOut.hpp" namespace depthai_ros_driver { namespace utils { std::string getUpperCaseStr(const std::string& string) { @@ -10,8 +9,13 @@ std::string getUpperCaseStr(const std::string& string) { for(auto& c : upper) c = toupper(c); return upper; } -std::string getSocketName(dai::CameraBoardSocket socket) { - return dai_nodes::sensor_helpers::socketNameMap.at(socket); -} +std::shared_ptr setupXout(std::shared_ptr pipeline, const std::string& name) { + auto xout = pipeline->create(); + xout->setStreamName(name); + xout->input.setBlocking(false); + xout->input.setWaitForMessage(false); + xout->input.setQueueSize(1); + return xout; +}; } // namespace utils -} // namespace depthai_ros_driver \ No newline at end of file +} // namespace depthai_ros_driver diff --git a/depthai_ros_msgs/CMakeLists.txt b/depthai_ros_msgs/CMakeLists.txt index 01d3af61..4011f812 100644 --- a/depthai_ros_msgs/CMakeLists.txt +++ b/depthai_ros_msgs/CMakeLists.txt @@ -1,6 +1,6 @@ cmake_minimum_required(VERSION 3.10.2) # CMake version in Ubuntu 18.04 LTS -project(depthai_ros_msgs VERSION 2.9.0) +project(depthai_ros_msgs VERSION 2.10.0) if(POLICY CMP0057) cmake_policy(SET CMP0057 NEW) @@ -31,6 +31,8 @@ rosidl_generate_interfaces(${PROJECT_NAME} # "msg/ImageMarkerArray.msg" "msg/SpatialDetection.msg" "msg/SpatialDetectionArray.msg" + "msg/TrackDetection2D.msg" + "msg/TrackDetection2DArray.msg" "srv/TriggerNamed.srv" "srv/NormalizedImageCrop.srv" DEPENDENCIES builtin_interfaces geometry_msgs std_msgs vision_msgs sensor_msgs diff --git a/depthai_ros_msgs/msg/TrackDetection2D.msg b/depthai_ros_msgs/msg/TrackDetection2D.msg new file mode 100644 index 00000000..c300c602 --- /dev/null +++ b/depthai_ros_msgs/msg/TrackDetection2D.msg @@ -0,0 +1,25 @@ + +# Class probabilities +vision_msgs/ObjectHypothesisWithPose[] results + +# 2D bounding box surrounding the object. +vision_msgs/BoundingBox2D bbox + +# If true, this message contains object tracking information. +bool is_tracking + +# ID used for consistency across multiple detection messages. This value will +# likely differ from the id field set in each individual ObjectHypothesis. +# If you set this field, be sure to also set is_tracking to True. +string tracking_id + +# Age: number of frames the object is being tracked +int32 tracking_age + +# Status of the tracking: +# 0 = NEW -> the object is newly added. +# 1 = TRACKED -> the object is being tracked. +# 2 = LOST -> the object gets lost now. The object can be tracked again automatically (long term tracking) +# or by specifying detected object manually (short term and zero term tracking). +# 3 = REMOVED -> the object is removed. +int32 tracking_status diff --git a/depthai_ros_msgs/msg/TrackDetection2DArray.msg b/depthai_ros_msgs/msg/TrackDetection2DArray.msg new file mode 100644 index 00000000..92bffbde --- /dev/null +++ b/depthai_ros_msgs/msg/TrackDetection2DArray.msg @@ -0,0 +1,7 @@ +# A list of 2D tracklets, for a multi-object 2D tracker. + +std_msgs/Header header +# A list of the tracking proposals. +TrackDetection2D[] detections + +## These messages are created as close as possible to the official vision_msgs(http://wiki.ros.org/vision_msgs) \ No newline at end of file diff --git a/depthai_ros_msgs/package.xml b/depthai_ros_msgs/package.xml index 7751f6b6..5bebaec3 100644 --- a/depthai_ros_msgs/package.xml +++ b/depthai_ros_msgs/package.xml @@ -1,10 +1,10 @@ depthai_ros_msgs - 2.9.0 + 2.10.0 Package to keep interface independent of the driver - Sachin Guruswamy + Adam Serafin Sachin Guruswamy MIT diff --git a/docs/multicam.gif b/docs/multicam.gif deleted file mode 100644 index 62969a37..00000000 Binary files a/docs/multicam.gif and /dev/null differ diff --git a/docs/param_rgb.gif b/docs/param_rgb.gif deleted file mode 100644 index bc95ef78..00000000 Binary files a/docs/param_rgb.gif and /dev/null differ diff --git a/docs/param_stereo.gif b/docs/param_stereo.gif deleted file mode 100644 index 881e502a..00000000 Binary files a/docs/param_stereo.gif and /dev/null differ diff --git a/docs/rtabmap.gif b/docs/rtabmap.gif deleted file mode 100644 index 7cab07ef..00000000 Binary files a/docs/rtabmap.gif and /dev/null differ diff --git a/docs/start_stop.gif b/docs/start_stop.gif deleted file mode 100644 index 0fec32f0..00000000 Binary files a/docs/start_stop.gif and /dev/null differ