From 6e00c9c65246ee8528492ba8991d2c60a695e407 Mon Sep 17 00:00:00 2001 From: a-maumau Date: Mon, 16 Dec 2024 16:41:20 +0900 Subject: [PATCH 1/2] add camera_projection class and projection cache Signed-off-by: a-maumau --- .../CMakeLists.txt | 1 + .../README.md | 6 + .../config/fusion_common.param.yaml | 9 +- .../camera_projection.hpp | 88 ++++++++ .../fusion_node.hpp | 11 +- .../pointpainting_fusion/node.hpp | 3 - .../roi_cluster_fusion/node.hpp | 1 - .../roi_detected_object_fusion/node.hpp | 7 +- .../roi_pointcloud_fusion/node.hpp | 2 +- .../segmentation_pointcloud_fusion/node.hpp | 2 +- .../utils/utils.hpp | 4 - .../src/camera_projection.cpp | 196 ++++++++++++++++++ .../src/fusion_node.cpp | 46 +++- .../src/pointpainting_fusion/node.cpp | 58 +++--- .../src/roi_cluster_fusion/node.cpp | 31 ++- .../src/roi_detected_object_fusion/node.cpp | 38 ++-- .../src/roi_pointcloud_fusion/node.cpp | 72 ++++--- .../segmentation_pointcloud_fusion/node.cpp | 21 +- .../src/utils/utils.cpp | 14 -- 19 files changed, 447 insertions(+), 163 deletions(-) create mode 100644 perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp create mode 100644 perception/autoware_image_projection_based_fusion/src/camera_projection.cpp diff --git a/perception/autoware_image_projection_based_fusion/CMakeLists.txt b/perception/autoware_image_projection_based_fusion/CMakeLists.txt index 73d305d2ab2a8..7c8160d6fe1b7 100644 --- a/perception/autoware_image_projection_based_fusion/CMakeLists.txt +++ b/perception/autoware_image_projection_based_fusion/CMakeLists.txt @@ -16,6 +16,7 @@ endif() # Build non-CUDA dependent nodes ament_auto_add_library(${PROJECT_NAME} SHARED + src/camera_projection.cpp src/fusion_node.cpp src/debugger.cpp src/utils/geometry.cpp diff --git a/perception/autoware_image_projection_based_fusion/README.md b/perception/autoware_image_projection_based_fusion/README.md index c976697b0396d..dcf35e45bbd9d 100644 --- a/perception/autoware_image_projection_based_fusion/README.md +++ b/perception/autoware_image_projection_based_fusion/README.md @@ -66,6 +66,12 @@ ros2 launch autoware_image_projection_based_fusion pointpainting_fusion.launch.x The rclcpp::TimerBase timer could not break a for loop, therefore even if time is out when fusing a roi msg at the middle, the program will run until all msgs are fused. +### Approximate camera projection + +For algorithms like `pointpainting_fusion`, the computation required to project points onto an unrectified (raw) image can be substantial. To address this, an option is provided to reduce the computational load. Set the [`approximate_camera_projection parameter`](config/fusion_common.param.yaml) to `true` for each camera (ROIs). If the corresponding `point_project_to_unrectified_image` parameter is also set to true, the projections will be pre-cached. + +The cached projections are calculated using a grid, with the grid size specified by the `approximation_grid_width_size` and `approximation_grid_height_size` parameters in the [configuration file](config/fusion_common.param.yaml). The centers of the grid are used as the projected points. + ### Detail description of each fusion's algorithm is in the following links | Fusion Name | Description | Detail | diff --git a/perception/autoware_image_projection_based_fusion/config/fusion_common.param.yaml b/perception/autoware_image_projection_based_fusion/config/fusion_common.param.yaml index 347cb57b484e9..553bb7e29220e 100644 --- a/perception/autoware_image_projection_based_fusion/config/fusion_common.param.yaml +++ b/perception/autoware_image_projection_based_fusion/config/fusion_common.param.yaml @@ -4,7 +4,8 @@ timeout_ms: 70.0 match_threshold_ms: 50.0 image_buffer_size: 15 - point_project_to_unrectified_image: false + # projection setting for each ROI whether unrectify image + point_project_to_unrectified_image: [false, false, false, false, false, false] debug_mode: false filter_scope_min_x: -100.0 filter_scope_min_y: -100.0 @@ -13,5 +14,11 @@ filter_scope_max_y: 100.0 filter_scope_max_z: 100.0 + # camera cache setting for each ROI + approximate_camera_projection: [true, true, true, true, true, true] + # grid size in pixels + approximation_grid_width_size: 1.0 + approximation_grid_height_size: 1.0 + # debug parameters publish_processing_time_detail: false diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp new file mode 100644 index 0000000000000..cc66dcd15ce62 --- /dev/null +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp @@ -0,0 +1,88 @@ +// Copyright 2024 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef AUTOWARE__IMAGE_PROJECTION_BASED_FUSION__CAMERA_PROJECTION_HPP_ +#define AUTOWARE__IMAGE_PROJECTION_BASED_FUSION__CAMERA_PROJECTION_HPP_ + +#define EIGEN_MPL2_ONLY + +#include + +#include + +#include + +#include + +#include + +#include +#include + +namespace autoware::image_projection_based_fusion +{ +struct PixelPos +{ + float x; + float y; +}; + +class CameraProjection +{ +public: + explicit CameraProjection( + const sensor_msgs::msg::CameraInfo & camera_info, + const float grid_width, const float grid_height, + const bool unrectify, + const bool use_approximation + ); + CameraProjection(): grid_w_size_(1.0), grid_h_size_(1.0), unrectify_(false) {}; + void initialize(); + std::function calcImageProjectedPoint; + sensor_msgs::msg::CameraInfo getCameraInfo(); + bool isOutsideHorizontalView(const float px, const float pz); + bool isOutsideVerticalView(const float py, const float pz); + +protected: + bool calcRectifiedImageProjectedPoint(const cv::Point3d & point3d, Eigen::Vector2d & projected_point); + bool calcRawImageProjectedPoint(const cv::Point3d & point3d, Eigen::Vector2d & projected_point); + bool calcRawImageProjectedPointWithApproximation(const cv::Point3d & point3d, Eigen::Vector2d & projected_point); + void initializeCache(); + + sensor_msgs::msg::CameraInfo camera_info_; + uint32_t image_h_, image_w_; + double tan_h_x_, tan_h_y_; + + uint32_t cache_size_; + float grid_w_size_; + float grid_h_size_; + float half_grid_w_size_; + float half_grid_h_size_; + float inv_grid_w_size_; + float inv_grid_h_size_; + uint32_t grid_x_num_; + uint32_t grid_y_num_; + float index_grid_out_h_; + float index_grid_out_w_; + + bool unrectify_; + bool use_approximation_; + + std::unique_ptr projection_cache_; + image_geometry::PinholeCameraModel camera_model_; +}; + +} // namespace autoware::image_projection_based_fusion + +#endif // AUTOWARE__IMAGE_PROJECTION_BASED_FUSION__CAMERA_PROJECTION_HPP_ diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp index 04ecf89faa0d4..d522ef1287847 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp @@ -15,6 +15,7 @@ #ifndef AUTOWARE__IMAGE_PROJECTION_BASED_FUSION__FUSION_NODE_HPP_ #define AUTOWARE__IMAGE_PROJECTION_BASED_FUSION__FUSION_NODE_HPP_ +#include #include #include #include @@ -56,6 +57,7 @@ using tier4_perception_msgs::msg::DetectedObjectsWithFeature; using tier4_perception_msgs::msg::DetectedObjectWithFeature; using PointCloud = pcl::PointCloud; using autoware_perception_msgs::msg::ObjectClassification; +using autoware::image_projection_based_fusion::CameraProjection; template class FusionNode : public rclcpp::Node @@ -86,7 +88,7 @@ class FusionNode : public rclcpp::Node virtual void fuseOnSingleImage( const TargetMsg3D & input_msg, const std::size_t image_id, const Msg2D & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, TargetMsg3D & output_msg) = 0; + TargetMsg3D & output_msg) = 0; // set args if you need virtual void postprocess(TargetMsg3D & output_msg); @@ -100,11 +102,16 @@ class FusionNode : public rclcpp::Node tf2_ros::Buffer tf_buffer_; tf2_ros::TransformListener tf_listener_; - bool point_project_to_unrectified_image_{false}; + std::vector point_project_to_unrectified_image_{false}; // camera_info std::map camera_info_map_; std::vector::SharedPtr> camera_info_subs_; + // camera projection + std::vector camera_projectors_; + std::vector approx_camera_projection_; + float approx_grid_w_size_; + float approx_grid_h_size_; rclcpp::TimerBase::SharedPtr timer_; double timeout_ms_{}; diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/pointpainting_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/pointpainting_fusion/node.hpp index 8c0e2ed78fc6c..cd6cd87976522 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/pointpainting_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/pointpainting_fusion/node.hpp @@ -53,15 +53,12 @@ class PointPaintingFusionNode void fuseOnSingleImage( const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg) override; void postprocess(sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg) override; rclcpp::Publisher::SharedPtr obj_pub_ptr_; - std::vector tan_h_; // horizontal field of view - int omp_num_threads_{1}; float score_threshold_{0.0}; std::vector class_names_; diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_cluster_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_cluster_fusion/node.hpp index be05c0a1c4bc6..903b153af0681 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_cluster_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_cluster_fusion/node.hpp @@ -38,7 +38,6 @@ class RoiClusterFusionNode void fuseOnSingleImage( const DetectedObjectsWithFeature & input_cluster_msg, const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjectsWithFeature & output_cluster_msg) override; std::string trust_object_iou_mode_{"iou"}; diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp index 7103537ec852d..f355fd2129b24 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp @@ -43,12 +43,11 @@ class RoiDetectedObjectFusionNode void fuseOnSingleImage( const DetectedObjects & input_object_msg, const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjects & output_object_msg) override; + DetectedObjects & output_object_msg) override; std::map generateDetectedObjectRoIs( - const DetectedObjects & input_object_msg, const double image_width, const double image_height, - const Eigen::Affine3d & object2camera_affine, - const image_geometry::PinholeCameraModel & pinhole_camera_model); + const DetectedObjects & input_object_msg, const std::size_t & image_id, + const Eigen::Affine3d & object2camera_affine); void fuseObjectsOnImage( const DetectedObjects & input_object_msg, diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp index 9baf754e224a7..0c454aebac42c 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp @@ -50,7 +50,7 @@ class RoiPointCloudFusionNode void fuseOnSingleImage( const PointCloud2 & input_pointcloud_msg, const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, PointCloud2 & output_pointcloud_msg) override; + PointCloud2 & output_pointcloud_msg) override; bool out_of_scope(const DetectedObjectWithFeature & obj) override; }; diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp index 0bec3195bb402..7454cb7bcd173 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp @@ -61,7 +61,7 @@ class SegmentPointCloudFusionNode : public FusionNode getTransformStamped( const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id, const std::string & source_frame_id, const rclcpp::Time & time); diff --git a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp new file mode 100644 index 0000000000000..af17d85989d35 --- /dev/null +++ b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp @@ -0,0 +1,196 @@ +// Copyright 2024 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "autoware/image_projection_based_fusion/camera_projection.hpp" + +#include +#include + +#ifdef ROS_DISTRO_GALACTIC +#include +#include +#else +#include +#include +#endif + +namespace autoware::image_projection_based_fusion +{ +CameraProjection::CameraProjection( + const sensor_msgs::msg::CameraInfo & camera_info, const float grid_width, const float grid_height, + const bool unrectify, const bool use_approximation=false +): camera_info_(camera_info), grid_w_size_(grid_width), grid_h_size_(grid_height), unrectify_(unrectify), use_approximation_(use_approximation){ + if (grid_w_size_ <= 0.0 || grid_h_size_ <= 0.0) { + throw std::runtime_error("Both grid_width and grid_height must be > 0.0"); + } + + image_w_ = camera_info.width; + image_h_ = camera_info.height; + + // prepare camera model + camera_model_.fromCameraInfo(camera_info); + + // cache settings + // for shifting to the grid center + half_grid_w_size_ = grid_w_size_ / 2.0; + half_grid_h_size_ = grid_h_size_ / 2.0; + inv_grid_w_size_ = 1/grid_w_size_; + inv_grid_h_size_ = 1/grid_h_size_; + grid_x_num_ = static_cast(std::ceil(image_w_ / grid_w_size_)); + grid_y_num_ = static_cast(std::ceil(image_h_ / grid_h_size_)); + cache_size_ = grid_x_num_*grid_y_num_; + + // for checking the views + // cx/fx + tan_h_x_ = camera_info.k.at(2)/camera_info.k.at(0); + // cy/fy + tan_h_y_ = camera_info.k.at(5)/camera_info.k.at(4); +} + +void CameraProjection::initialize(){ + if (unrectify_) { + if (use_approximation_) { + // create the cache with size of grid center + // store only xy position in float to reduce memory consumption + projection_cache_ = std::make_unique(cache_size_); + initializeCache(); + + calcImageProjectedPoint = [this](const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { + return this->calcRawImageProjectedPointWithApproximation(point3d, projected_point); + }; + } else { + calcImageProjectedPoint = [this](const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { + return this->calcRawImageProjectedPoint(point3d, projected_point); + }; + } + } else { + calcImageProjectedPoint = [this](const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { + return this->calcRectifiedImageProjectedPoint(point3d, projected_point); + }; + } +} + +void CameraProjection::initializeCache(){ + // sample grid centers till the camera height, width to precompute the projection + // + // grid_size + // / + // v + // |---| w + // 0-----------------> + // 0 | . | . | . | + // |___|___|___| + // | . | . | . | + // | ^ + // h | | + // v grid center + // + // each pixel will be rounded in this grid center + // edge pixels in the image will be assign to centers that is the outside of the image + + for (float y = half_grid_h_size_; y < image_h_; y += grid_h_size_) { + for (float x = half_grid_w_size_; x < image_w_; x += grid_w_size_) { + const float qx = std::round((x-half_grid_w_size_)*inv_grid_w_size_)*grid_w_size_+half_grid_w_size_; + const float qy = std::round((y-half_grid_h_size_)*inv_grid_h_size_)*grid_h_size_+half_grid_h_size_; + + const int grid_x = static_cast(std::floor(qx / grid_w_size_)); + const int grid_y = static_cast(std::floor(qy / grid_h_size_)); + const int index = (grid_y) * grid_x_num_ + grid_x; + + // precompute projected point + cv::Point2d raw_image_point = camera_model_.unrectifyPoint(cv::Point2d(qx, qy)); + projection_cache_[index] = PixelPos{static_cast(raw_image_point.x), static_cast(raw_image_point.y)}; + } + } +} + + +/** + * @brief Calculate a projection of 3D point to rectified image plane 2D point. + * @return Return a boolean indicating whether the projected point is on the image plane. + */ +bool CameraProjection::calcRectifiedImageProjectedPoint( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point +){ + const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); + + if (rectified_image_point.x < 0.0 || rectified_image_point.x >= image_w_ || rectified_image_point.y < 0.0 || rectified_image_point.y >= image_h_){ + return false; + } else { + projected_point << rectified_image_point.x, rectified_image_point.y; + return true; + } +} + +/** + * @brief Calculate a projection of 3D point to raw image plane 2D point. + * @return Return a boolean indicating whether the projected point is on the image plane. + */ +bool CameraProjection::calcRawImageProjectedPoint( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point +){ + const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); + const cv::Point2d raw_image_point = camera_model_.unrectifyPoint(rectified_image_point); + + if (rectified_image_point.x < 0.0 || rectified_image_point.x >= image_w_ || rectified_image_point.y < 0.0 || rectified_image_point.y >= image_h_){ + return false; + } else { + projected_point << raw_image_point.x, raw_image_point.y; + return true; + } +} + +/** + * @brief Calculate a projection of 3D point to raw image plane 2D point with approximation. + * @return Return a boolean indicating whether the projected point is on the image plane. + */ +bool CameraProjection::calcRawImageProjectedPointWithApproximation( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point +){ + const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); + + // round to a near grid center + const float qx = std::round((rectified_image_point.x-half_grid_w_size_)*inv_grid_w_size_)*grid_w_size_+half_grid_w_size_; + if (qx < 0.0 || qx >= image_w_) { + return false; + } + const float qy = std::round((rectified_image_point.y-half_grid_h_size_)*inv_grid_h_size_)*grid_h_size_+half_grid_h_size_; + if (qy < 0.0 || qy >= image_h_) { + return false; + } + + const int grid_x = static_cast(std::floor(qx / grid_w_size_)); + const int grid_y = static_cast(std::floor(qy / grid_h_size_)); + const int index = grid_y * grid_x_num_ + grid_x; + + projected_point << projection_cache_[index].x, projection_cache_[index].y; + + return true; +} + +sensor_msgs::msg::CameraInfo CameraProjection::getCameraInfo(){ + return camera_info_; +} + +bool CameraProjection::isOutsideHorizontalView(const float px, const float pz){ + // assuming the points' origin is centered on the camera + return pz <= 0.0 || px > tan_h_x_ * pz || px < -tan_h_x_ * pz; +} + +bool CameraProjection::isOutsideVerticalView(const float py, const float pz){ + // assuming the points' origin is centered on the camera + return pz <= 0.0 || py > tan_h_y_ * pz || py < -tan_h_y_ * pz; +} + +} // namespace autoware::image_projection_based_fusion diff --git a/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp b/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp index bd4d57e45c582..48fdfc74ae32b 100644 --- a/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp @@ -18,6 +18,7 @@ #include #include +#include #include @@ -124,6 +125,29 @@ FusionNode::FusionNode( timer_ = rclcpp::create_timer( this, get_clock(), period_ns, std::bind(&FusionNode::timer_callback, this)); + // camera projection settings + camera_projectors_.resize(rois_number_); + point_project_to_unrectified_image_ = + declare_parameter>("point_project_to_unrectified_image"); + approx_camera_projection_ = declare_parameter>("approximate_camera_projection"); + if (rois_number_ != approx_camera_projection_.size()) { + const std::size_t current_size = approx_camera_projection_.size(); + RCLCPP_WARN( + this->get_logger(), + "The number of elements in approximate_camera_projection should be the same as in " + "rois_number. " + "It has %zu elements.", + current_size); + if (current_size < rois_number_) { + approx_camera_projection_.resize(rois_number_); + for (std::size_t i = current_size; i < rois_number_; i++) { + approx_camera_projection_.at(i) = true; + } + } + } + approx_grid_w_size_ = declare_parameter("approximation_grid_width_size"); + approx_grid_h_size_ = declare_parameter("approximation_grid_height_size"); + // debugger if (declare_parameter("debug_mode", false)) { std::size_t image_buffer_size = @@ -142,9 +166,6 @@ FusionNode::FusionNode( time_keeper_ = std::make_shared(time_keeper); } - point_project_to_unrectified_image_ = - declare_parameter("point_project_to_unrectified_image"); - // initialize debug tool { using autoware::universe_utils::DebugPublisher; @@ -168,7 +189,18 @@ void FusionNode::cameraInfoCallback( const sensor_msgs::msg::CameraInfo::ConstSharedPtr input_camera_info_msg, const std::size_t camera_id) { - camera_info_map_[camera_id] = *input_camera_info_msg; + // create the CameraProjection when the camera info arrives for the first time + // assuming the camera info does not change while the node is running + if ( + camera_info_map_.find(camera_id) == camera_info_map_.end() && + checkCameraInfo(*input_camera_info_msg)) { + camera_projectors_.at(camera_id) = CameraProjection( + *input_camera_info_msg, approx_grid_w_size_, approx_grid_h_size_, + point_project_to_unrectified_image_.at(camera_id), approx_camera_projection_.at(camera_id)); + camera_projectors_.at(camera_id).initialize(); + + camera_info_map_[camera_id] = *input_camera_info_msg; + } } template @@ -274,7 +306,7 @@ void FusionNode::subCallback( fuseOnSingleImage( *input_msg, roi_i, *((cached_roi_msgs_.at(roi_i))[matched_stamp]), - camera_info_map_.at(roi_i), *output_msg); + *output_msg); (cached_roi_msgs_.at(roi_i)).erase(matched_stamp); is_fused_.at(roi_i) = true; @@ -346,9 +378,7 @@ void FusionNode::roiCallback( debugger_->clear(); } - fuseOnSingleImage( - *(cached_msg_.second), roi_i, *input_roi_msg, camera_info_map_.at(roi_i), - *(cached_msg_.second)); + fuseOnSingleImage(*(cached_msg_.second), roi_i, *input_roi_msg, *(cached_msg_.second)); is_fused_.at(roi_i) = true; if (debug_publisher_) { diff --git a/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp index 4d12de2685c95..d1c80ec052818 100644 --- a/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp @@ -161,13 +161,6 @@ PointPaintingFusionNode::PointPaintingFusionNode(const rclcpp::NodeOptions & opt sub_ = this->create_subscription( "~/input/pointcloud", rclcpp::SensorDataQoS().keep_last(3), sub_callback); - tan_h_.resize(rois_number_); - for (std::size_t roi_i = 0; roi_i < rois_number_; ++roi_i) { - auto fx = camera_info_map_[roi_i].k.at(0); - auto x0 = camera_info_map_[roi_i].k.at(2); - tan_h_[roi_i] = x0 / fx; - } - detection_class_remapper_.setParameters( allow_remapping_by_area_matrix, min_area_matrix, max_area_matrix); @@ -260,9 +253,7 @@ void PointPaintingFusionNode::preprocess(sensor_msgs::msg::PointCloud2 & painted void PointPaintingFusionNode::fuseOnSingleImage( __attribute__((unused)) const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, - __attribute__((unused)) const std::size_t image_id, - const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, + const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg) { if (painted_pointcloud_msg.data.empty() || painted_pointcloud_msg.fields.empty()) { @@ -276,8 +267,6 @@ void PointPaintingFusionNode::fuseOnSingleImage( return; } - if (!checkCameraInfo(camera_info)) return; - std::unique_ptr st_ptr; if (time_keeper_) st_ptr = std::make_unique(__func__, *time_keeper_); @@ -312,9 +301,6 @@ void PointPaintingFusionNode::fuseOnSingleImage( .offset; const auto class_offset = painted_pointcloud_msg.fields.at(4).offset; const auto p_step = painted_pointcloud_msg.point_step; - // projection matrix - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); Eigen::Vector3f point_lidar, point_camera; /** dc : don't care @@ -346,24 +332,28 @@ dc | dc dc dc dc ||zc| p_y = point_camera.y(); p_z = point_camera.z(); - if (p_z <= 0.0 || p_x > (tan_h_.at(image_id) * p_z) || p_x < (-tan_h_.at(image_id) * p_z)) { + if (camera_projectors_[image_id].isOutsideHorizontalView(p_x, p_z)) { continue; } + // project - Eigen::Vector2d projected_point = calcRawImageProjectedPoint( - pinhole_camera_model, cv::Point3d(p_x, p_y, p_z), point_project_to_unrectified_image_); - - // iterate 2d bbox - for (const auto & feature_object : objects) { - sensor_msgs::msg::RegionOfInterest roi = feature_object.feature.roi; - // paint current point if it is inside bbox - int label2d = feature_object.object.classification.front().label; - if (!isUnknown(label2d) && isInsideBbox(projected_point.x(), projected_point.y(), roi, 1.0)) { - // cppcheck-suppress invalidPointerCast - auto p_class = reinterpret_cast(&output[stride + class_offset]); - for (const auto & cls : isClassTable_) { - // add up the class values if the point belongs to multiple classes - *p_class = cls.second(label2d) ? (class_index_[cls.first] + *p_class) : *p_class; + Eigen::Vector2d projected_point; + if (camera_projectors_[image_id].calcImageProjectedPoint( + cv::Point3d(p_x, p_y, p_z), projected_point)) { + // iterate 2d bbox + for (const auto & feature_object : objects) { + sensor_msgs::msg::RegionOfInterest roi = feature_object.feature.roi; + // paint current point if it is inside bbox + int label2d = feature_object.object.classification.front().label; + if ( + !isUnknown(label2d) && + isInsideBbox(projected_point.x(), projected_point.y(), roi, 1.0)) { + // cppcheck-suppress invalidPointerCast + auto p_class = reinterpret_cast(&output[stride + class_offset]); + for (const auto & cls : isClassTable_) { + // add up the class values if the point belongs to multiple classes + *p_class = cls.second(label2d) ? (class_index_[cls.first] + *p_class) : *p_class; + } } } #if 0 @@ -375,15 +365,15 @@ dc | dc dc dc dc ||zc| } } - for (const auto & feature_object : input_roi_msg.feature_objects) { - debug_image_rois.push_back(feature_object.feature.roi); - } - if (debugger_) { std::unique_ptr inner_st_ptr; if (time_keeper_) inner_st_ptr = std::make_unique("publish debug message", *time_keeper_); + for (const auto & feature_object : input_roi_msg.feature_objects) { + debug_image_rois.push_back(feature_object.feature.roi); + } + debugger_->image_rois_ = debug_image_rois; debugger_->obstacle_points_ = debug_image_points; debugger_->publishImage(image_id, input_roi_msg.header.stamp); diff --git a/perception/autoware_image_projection_based_fusion/src/roi_cluster_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/roi_cluster_fusion/node.cpp index 7520647544684..32db5319bb487 100644 --- a/perception/autoware_image_projection_based_fusion/src/roi_cluster_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/roi_cluster_fusion/node.cpp @@ -95,16 +95,12 @@ void RoiClusterFusionNode::postprocess(DetectedObjectsWithFeature & output_clust void RoiClusterFusionNode::fuseOnSingleImage( const DetectedObjectsWithFeature & input_cluster_msg, const std::size_t image_id, - const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjectsWithFeature & output_cluster_msg) + const DetectedObjectsWithFeature & input_roi_msg, DetectedObjectsWithFeature & output_cluster_msg) { - if (!checkCameraInfo(camera_info)) return; - std::unique_ptr st_ptr; if (time_keeper_) st_ptr = std::make_unique(__func__, *time_keeper_); - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); + const sensor_msgs::msg::CameraInfo & camera_info = camera_projectors_[image_id].getCameraInfo(); // get transform from cluster frame id to camera optical frame id geometry_msgs::msg::TransformStamped transform_stamped; @@ -152,18 +148,17 @@ void RoiClusterFusionNode::fuseOnSingleImage( continue; } - Eigen::Vector2d projected_point = calcRawImageProjectedPoint( - pinhole_camera_model, cv::Point3d(*iter_x, *iter_y, *iter_z), - point_project_to_unrectified_image_); - if ( - 0 <= static_cast(projected_point.x()) && - static_cast(projected_point.x()) <= static_cast(camera_info.width) - 1 && - 0 <= static_cast(projected_point.y()) && - static_cast(projected_point.y()) <= static_cast(camera_info.height) - 1) { - min_x = std::min(static_cast(projected_point.x()), min_x); - min_y = std::min(static_cast(projected_point.y()), min_y); - max_x = std::max(static_cast(projected_point.x()), max_x); - max_y = std::max(static_cast(projected_point.y()), max_y); + Eigen::Vector2d projected_point; + if (camera_projectors_[image_id].calcImageProjectedPoint( + cv::Point3d(*iter_x, *iter_y, *iter_z), projected_point)) { + const int px = static_cast(projected_point.x()); + const int py = static_cast(projected_point.y()); + + min_x = std::min(px, min_x); + min_y = std::min(py, min_y); + max_x = std::max(px, max_x); + max_y = std::max(py, max_y); + projected_points.push_back(projected_point); if (debugger_) debugger_->obstacle_points_.push_back(projected_point); } diff --git a/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp index 035bc259ab73c..5e22a95973f29 100644 --- a/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp @@ -84,14 +84,11 @@ void RoiDetectedObjectFusionNode::preprocess(DetectedObjects & output_msg) void RoiDetectedObjectFusionNode::fuseOnSingleImage( const DetectedObjects & input_object_msg, const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjects & output_object_msg __attribute__((unused))) { std::unique_ptr st_ptr; if (time_keeper_) st_ptr = std::make_unique(__func__, *time_keeper_); - if (!checkCameraInfo(camera_info)) return; - Eigen::Affine3d object2camera_affine; { const auto transform_stamped_optional = getTransformStamped( @@ -103,12 +100,8 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage( object2camera_affine = transformToEigen(transform_stamped_optional.value().transform); } - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); - const auto object_roi_map = generateDetectedObjectRoIs( - input_object_msg, static_cast(camera_info.width), - static_cast(camera_info.height), object2camera_affine, pinhole_camera_model); + input_object_msg, image_id, object2camera_affine); fuseObjectsOnImage(input_object_msg, input_roi_msg.feature_objects, object_roi_map); if (debugger_) { @@ -122,9 +115,8 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage( std::map RoiDetectedObjectFusionNode::generateDetectedObjectRoIs( - const DetectedObjects & input_object_msg, const double image_width, const double image_height, - const Eigen::Affine3d & object2camera_affine, - const image_geometry::PinholeCameraModel & pinhole_camera_model) + const DetectedObjects & input_object_msg, const std::size_t & image_id, + const Eigen::Affine3d & object2camera_affine) { std::unique_ptr st_ptr; if (time_keeper_) st_ptr = std::make_unique(__func__, *time_keeper_); @@ -139,6 +131,8 @@ RoiDetectedObjectFusionNode::generateDetectedObjectRoIs( return object_roi_map; } const auto & passthrough_object_flags = passthrough_object_flags_map_.at(timestamp_nsec); + const sensor_msgs::msg::CameraInfo & camera_info = camera_projectors_[image_id].getCameraInfo(); + for (std::size_t obj_i = 0; obj_i < input_object_msg.objects.size(); ++obj_i) { std::vector vertices_camera_coord; const auto & object = input_object_msg.objects.at(obj_i); @@ -158,25 +152,25 @@ RoiDetectedObjectFusionNode::generateDetectedObjectRoIs( transformPoints(vertices, object2camera_affine, vertices_camera_coord); } - double min_x(image_width), min_y(image_height), max_x(0.0), max_y(0.0); + double min_x(camera_info.width), min_y(camera_info.height), max_x(0.0), max_y(0.0); std::size_t point_on_image_cnt = 0; for (const auto & point : vertices_camera_coord) { if (point.z() <= 0.0) { continue; } - Eigen::Vector2d proj_point = calcRawImageProjectedPoint( - pinhole_camera_model, cv::Point3d(point.x(), point.y(), point.z()), - point_project_to_unrectified_image_); + Eigen::Vector2d proj_point; + if (camera_projectors_[image_id].calcImageProjectedPoint( + cv::Point3d(point.x(), point.y(), point.z()), proj_point + )){ + const double px = proj_point.x(); + const double py = proj_point.y(); - min_x = std::min(proj_point.x(), min_x); - min_y = std::min(proj_point.y(), min_y); - max_x = std::max(proj_point.x(), max_x); - max_y = std::max(proj_point.y(), max_y); + min_x = std::min(px, min_x); + min_y = std::min(py, min_y); + max_x = std::max(px, max_x); + max_y = std::max(py, max_y); - if ( - proj_point.x() >= 0 && proj_point.x() < image_width && proj_point.y() >= 0 && - proj_point.y() < image_height) { point_on_image_cnt++; if (debugger_) { diff --git a/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp index 206a5f77a0235..fcd3833a5c938 100644 --- a/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp @@ -87,9 +87,8 @@ void RoiPointCloudFusionNode::postprocess( } void RoiPointCloudFusionNode::fuseOnSingleImage( const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, - __attribute__((unused)) const std::size_t image_id, + const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, __attribute__((unused)) sensor_msgs::msg::PointCloud2 & output_pointcloud_msg) { std::unique_ptr st_ptr; @@ -98,7 +97,6 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( if (input_pointcloud_msg.data.empty()) { return; } - if (!checkCameraInfo(camera_info)) return; std::vector output_objs; std::vector debug_image_rois; @@ -122,10 +120,6 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( return; } - // transform pointcloud to camera optical frame id - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); - geometry_msgs::msg::TransformStamped transform_stamped; { const auto transform_stamped_optional = getTransformStamped( @@ -136,10 +130,10 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( } transform_stamped = transform_stamped_optional.value(); } - int point_step = input_pointcloud_msg.point_step; - int x_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "x")].offset; - int y_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "y")].offset; - int z_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "z")].offset; + const int point_step = input_pointcloud_msg.point_step; + const int x_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "x")].offset; + const int y_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "y")].offset; + const int z_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "z")].offset; sensor_msgs::msg::PointCloud2 transformed_cloud; tf2::doTransform(input_pointcloud_msg, transformed_cloud, transform_stamped); @@ -164,33 +158,37 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( if (transformed_z <= 0.0) { continue; } - Eigen::Vector2d projected_point = calcRawImageProjectedPoint( - pinhole_camera_model, cv::Point3d(transformed_x, transformed_y, transformed_z), - point_project_to_unrectified_image_); - for (std::size_t i = 0; i < output_objs.size(); ++i) { - auto & feature_obj = output_objs.at(i); - const auto & check_roi = feature_obj.feature.roi; - auto & cluster = clusters.at(i); - - if ( - clusters_data_size.at(i) >= - static_cast(max_cluster_size_) * static_cast(point_step)) { - continue; - } - if ( - check_roi.x_offset <= projected_point.x() && check_roi.y_offset <= projected_point.y() && - check_roi.x_offset + check_roi.width >= projected_point.x() && - check_roi.y_offset + check_roi.height >= projected_point.y()) { - std::memcpy( - &cluster.data[clusters_data_size.at(i)], &input_pointcloud_msg.data[offset], point_step); - clusters_data_size.at(i) += point_step; + + Eigen::Vector2d projected_point; + if (camera_projectors_[image_id].calcImageProjectedPoint( + cv::Point3d(transformed_x, transformed_y, transformed_z), projected_point) + ) { + for (std::size_t i = 0; i < output_objs.size(); ++i) { + auto & feature_obj = output_objs.at(i); + const auto & check_roi = feature_obj.feature.roi; + auto & cluster = clusters.at(i); + + const double px = projected_point.x(); + const double py = projected_point.y(); + + if ( + clusters_data_size.at(i) >= + static_cast(max_cluster_size_) * static_cast(point_step)) { + continue; + } + if ( + check_roi.x_offset <= px && check_roi.y_offset <= py && + check_roi.x_offset + check_roi.width >= px && + check_roi.y_offset + check_roi.height >= py) { + std::memcpy( + &cluster.data[clusters_data_size.at(i)], &input_pointcloud_msg.data[offset], + point_step); + clusters_data_size.at(i) += point_step; + } } - } - if (debugger_) { - // add all points inside image to debug - if ( - projected_point.x() > 0 && projected_point.x() < camera_info.width && - projected_point.y() > 0 && projected_point.y() < camera_info.height) { + + if (debugger_) { + // add all points inside image to debug debug_image_points.push_back(projected_point); } } diff --git a/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp index afeff213c0afe..4468c087d2250 100644 --- a/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp @@ -87,9 +87,8 @@ void SegmentPointCloudFusionNode::postprocess(PointCloud2 & pointcloud_msg) } void SegmentPointCloudFusionNode::fuseOnSingleImage( - const PointCloud2 & input_pointcloud_msg, __attribute__((unused)) const std::size_t image_id, - [[maybe_unused]] const Image & input_mask, __attribute__((unused)) const CameraInfo & camera_info, - __attribute__((unused)) PointCloud2 & output_cloud) + const PointCloud2 & input_pointcloud_msg, const std::size_t image_id, + [[maybe_unused]] const Image & input_mask, __attribute__((unused)) PointCloud2 & output_cloud) { std::unique_ptr st_ptr; if (time_keeper_) st_ptr = std::make_unique(__func__, *time_keeper_); @@ -97,10 +96,11 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage( if (input_pointcloud_msg.data.empty()) { return; } - if (!checkCameraInfo(camera_info)) return; if (input_mask.height == 0 || input_mask.width == 0) { return; } + + const sensor_msgs::msg::CameraInfo & camera_info = camera_projectors_[image_id].getCameraInfo(); std::vector mask_data(input_mask.data.begin(), input_mask.data.end()); cv::Mat mask = perception_utils::runLengthDecoder(mask_data, input_mask.height, input_mask.width); @@ -115,8 +115,6 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage( const int orig_height = camera_info.height; // resize mask to the same size as the camera image cv::resize(mask, mask, cv::Size(orig_width, orig_height), 0, 0, cv::INTER_NEAREST); - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); geometry_msgs::msg::TransformStamped transform_stamped; // transform pointcloud from frame id to camera optical frame id @@ -151,13 +149,10 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage( continue; } - Eigen::Vector2d projected_point = calcRawImageProjectedPoint( - pinhole_camera_model, cv::Point3d(transformed_x, transformed_y, transformed_z), - point_project_to_unrectified_image_); - - bool is_inside_image = projected_point.x() > 0 && projected_point.x() < camera_info.width && - projected_point.y() > 0 && projected_point.y() < camera_info.height; - if (!is_inside_image) { + Eigen::Vector2d projected_point; + if (!camera_projectors_[image_id].calcImageProjectedPoint( + cv::Point3d(transformed_x, transformed_y, transformed_z), projected_point + )){ continue; } diff --git a/perception/autoware_image_projection_based_fusion/src/utils/utils.cpp b/perception/autoware_image_projection_based_fusion/src/utils/utils.cpp index 338a5605e5a79..4456d25b18167 100644 --- a/perception/autoware_image_projection_based_fusion/src/utils/utils.cpp +++ b/perception/autoware_image_projection_based_fusion/src/utils/utils.cpp @@ -44,20 +44,6 @@ bool checkCameraInfo(const sensor_msgs::msg::CameraInfo & camera_info) return true; } -Eigen::Vector2d calcRawImageProjectedPoint( - const image_geometry::PinholeCameraModel & pinhole_camera_model, const cv::Point3d & point3d, - const bool & unrectify) -{ - const cv::Point2d rectified_image_point = pinhole_camera_model.project3dToPixel(point3d); - - if (!unrectify) { - return Eigen::Vector2d(rectified_image_point.x, rectified_image_point.y); - } - const cv::Point2d raw_image_point = pinhole_camera_model.unrectifyPoint(rectified_image_point); - - return Eigen::Vector2d(raw_image_point.x, raw_image_point.y); -} - std::optional getTransformStamped( const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id, const std::string & source_frame_id, const rclcpp::Time & time) From 3822a966073fb2a4c6c419486ca02d16bf71b728 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 16 Dec 2024 07:53:56 +0000 Subject: [PATCH 2/2] style(pre-commit): autofix --- .../camera_projection.hpp | 19 ++--- .../fusion_node.hpp | 2 +- .../roi_detected_object_fusion/node.hpp | 3 +- .../roi_pointcloud_fusion/node.hpp | 3 +- .../src/camera_projection.cpp | 83 ++++++++++++------- .../src/fusion_node.cpp | 3 +- .../src/pointpainting_fusion/node.cpp | 3 +- .../src/roi_detected_object_fusion/node.cpp | 7 +- .../src/roi_pointcloud_fusion/node.cpp | 15 ++-- .../segmentation_pointcloud_fusion/node.cpp | 3 +- 10 files changed, 78 insertions(+), 63 deletions(-) diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp index cc66dcd15ce62..2bcef9b6fa56a 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp @@ -18,13 +18,11 @@ #define EIGEN_MPL2_ONLY #include - #include +#include #include -#include - #include #include @@ -42,12 +40,9 @@ class CameraProjection { public: explicit CameraProjection( - const sensor_msgs::msg::CameraInfo & camera_info, - const float grid_width, const float grid_height, - const bool unrectify, - const bool use_approximation - ); - CameraProjection(): grid_w_size_(1.0), grid_h_size_(1.0), unrectify_(false) {}; + const sensor_msgs::msg::CameraInfo & camera_info, const float grid_width, + const float grid_height, const bool unrectify, const bool use_approximation); + CameraProjection() : grid_w_size_(1.0), grid_h_size_(1.0), unrectify_(false) {}; void initialize(); std::function calcImageProjectedPoint; sensor_msgs::msg::CameraInfo getCameraInfo(); @@ -55,9 +50,11 @@ class CameraProjection bool isOutsideVerticalView(const float py, const float pz); protected: - bool calcRectifiedImageProjectedPoint(const cv::Point3d & point3d, Eigen::Vector2d & projected_point); + bool calcRectifiedImageProjectedPoint( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point); bool calcRawImageProjectedPoint(const cv::Point3d & point3d, Eigen::Vector2d & projected_point); - bool calcRawImageProjectedPointWithApproximation(const cv::Point3d & point3d, Eigen::Vector2d & projected_point); + bool calcRawImageProjectedPointWithApproximation( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point); void initializeCache(); sensor_msgs::msg::CameraInfo camera_info_; diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp index d522ef1287847..82db64f7d7d52 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp @@ -56,8 +56,8 @@ using sensor_msgs::msg::PointCloud2; using tier4_perception_msgs::msg::DetectedObjectsWithFeature; using tier4_perception_msgs::msg::DetectedObjectWithFeature; using PointCloud = pcl::PointCloud; -using autoware_perception_msgs::msg::ObjectClassification; using autoware::image_projection_based_fusion::CameraProjection; +using autoware_perception_msgs::msg::ObjectClassification; template class FusionNode : public rclcpp::Node diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp index f355fd2129b24..43ec134168ef9 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp @@ -42,8 +42,7 @@ class RoiDetectedObjectFusionNode void fuseOnSingleImage( const DetectedObjects & input_object_msg, const std::size_t image_id, - const DetectedObjectsWithFeature & input_roi_msg, - DetectedObjects & output_object_msg) override; + const DetectedObjectsWithFeature & input_roi_msg, DetectedObjects & output_object_msg) override; std::map generateDetectedObjectRoIs( const DetectedObjects & input_object_msg, const std::size_t & image_id, diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp index 0c454aebac42c..2f2c8222e196f 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp @@ -49,8 +49,7 @@ class RoiPointCloudFusionNode void fuseOnSingleImage( const PointCloud2 & input_pointcloud_msg, const std::size_t image_id, - const DetectedObjectsWithFeature & input_roi_msg, - PointCloud2 & output_pointcloud_msg) override; + const DetectedObjectsWithFeature & input_roi_msg, PointCloud2 & output_pointcloud_msg) override; bool out_of_scope(const DetectedObjectWithFeature & obj) override; }; diff --git a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp index af17d85989d35..71318daa6ca80 100644 --- a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp +++ b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp @@ -29,8 +29,13 @@ namespace autoware::image_projection_based_fusion { CameraProjection::CameraProjection( const sensor_msgs::msg::CameraInfo & camera_info, const float grid_width, const float grid_height, - const bool unrectify, const bool use_approximation=false -): camera_info_(camera_info), grid_w_size_(grid_width), grid_h_size_(grid_height), unrectify_(unrectify), use_approximation_(use_approximation){ + const bool unrectify, const bool use_approximation = false) +: camera_info_(camera_info), + grid_w_size_(grid_width), + grid_h_size_(grid_height), + unrectify_(unrectify), + use_approximation_(use_approximation) +{ if (grid_w_size_ <= 0.0 || grid_h_size_ <= 0.0) { throw std::runtime_error("Both grid_width and grid_height must be > 0.0"); } @@ -45,20 +50,21 @@ CameraProjection::CameraProjection( // for shifting to the grid center half_grid_w_size_ = grid_w_size_ / 2.0; half_grid_h_size_ = grid_h_size_ / 2.0; - inv_grid_w_size_ = 1/grid_w_size_; - inv_grid_h_size_ = 1/grid_h_size_; + inv_grid_w_size_ = 1 / grid_w_size_; + inv_grid_h_size_ = 1 / grid_h_size_; grid_x_num_ = static_cast(std::ceil(image_w_ / grid_w_size_)); grid_y_num_ = static_cast(std::ceil(image_h_ / grid_h_size_)); - cache_size_ = grid_x_num_*grid_y_num_; + cache_size_ = grid_x_num_ * grid_y_num_; // for checking the views // cx/fx - tan_h_x_ = camera_info.k.at(2)/camera_info.k.at(0); + tan_h_x_ = camera_info.k.at(2) / camera_info.k.at(0); // cy/fy - tan_h_y_ = camera_info.k.at(5)/camera_info.k.at(4); + tan_h_y_ = camera_info.k.at(5) / camera_info.k.at(4); } -void CameraProjection::initialize(){ +void CameraProjection::initialize() +{ if (unrectify_) { if (use_approximation_) { // create the cache with size of grid center @@ -66,22 +72,26 @@ void CameraProjection::initialize(){ projection_cache_ = std::make_unique(cache_size_); initializeCache(); - calcImageProjectedPoint = [this](const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { + calcImageProjectedPoint = [this]( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { return this->calcRawImageProjectedPointWithApproximation(point3d, projected_point); }; } else { - calcImageProjectedPoint = [this](const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { + calcImageProjectedPoint = [this]( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { return this->calcRawImageProjectedPoint(point3d, projected_point); }; } } else { - calcImageProjectedPoint = [this](const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { + calcImageProjectedPoint = [this]( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { return this->calcRectifiedImageProjectedPoint(point3d, projected_point); }; } } -void CameraProjection::initializeCache(){ +void CameraProjection::initializeCache() +{ // sample grid centers till the camera height, width to precompute the projection // // grid_size @@ -101,31 +111,35 @@ void CameraProjection::initializeCache(){ for (float y = half_grid_h_size_; y < image_h_; y += grid_h_size_) { for (float x = half_grid_w_size_; x < image_w_; x += grid_w_size_) { - const float qx = std::round((x-half_grid_w_size_)*inv_grid_w_size_)*grid_w_size_+half_grid_w_size_; - const float qy = std::round((y-half_grid_h_size_)*inv_grid_h_size_)*grid_h_size_+half_grid_h_size_; + const float qx = + std::round((x - half_grid_w_size_) * inv_grid_w_size_) * grid_w_size_ + half_grid_w_size_; + const float qy = + std::round((y - half_grid_h_size_) * inv_grid_h_size_) * grid_h_size_ + half_grid_h_size_; const int grid_x = static_cast(std::floor(qx / grid_w_size_)); const int grid_y = static_cast(std::floor(qy / grid_h_size_)); - const int index = (grid_y) * grid_x_num_ + grid_x; + const int index = (grid_y)*grid_x_num_ + grid_x; // precompute projected point cv::Point2d raw_image_point = camera_model_.unrectifyPoint(cv::Point2d(qx, qy)); - projection_cache_[index] = PixelPos{static_cast(raw_image_point.x), static_cast(raw_image_point.y)}; + projection_cache_[index] = + PixelPos{static_cast(raw_image_point.x), static_cast(raw_image_point.y)}; } } } - /** * @brief Calculate a projection of 3D point to rectified image plane 2D point. * @return Return a boolean indicating whether the projected point is on the image plane. */ bool CameraProjection::calcRectifiedImageProjectedPoint( - const cv::Point3d & point3d, Eigen::Vector2d & projected_point -){ + const cv::Point3d & point3d, Eigen::Vector2d & projected_point) +{ const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); - if (rectified_image_point.x < 0.0 || rectified_image_point.x >= image_w_ || rectified_image_point.y < 0.0 || rectified_image_point.y >= image_h_){ + if ( + rectified_image_point.x < 0.0 || rectified_image_point.x >= image_w_ || + rectified_image_point.y < 0.0 || rectified_image_point.y >= image_h_) { return false; } else { projected_point << rectified_image_point.x, rectified_image_point.y; @@ -138,12 +152,14 @@ bool CameraProjection::calcRectifiedImageProjectedPoint( * @return Return a boolean indicating whether the projected point is on the image plane. */ bool CameraProjection::calcRawImageProjectedPoint( - const cv::Point3d & point3d, Eigen::Vector2d & projected_point -){ + const cv::Point3d & point3d, Eigen::Vector2d & projected_point) +{ const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); const cv::Point2d raw_image_point = camera_model_.unrectifyPoint(rectified_image_point); - if (rectified_image_point.x < 0.0 || rectified_image_point.x >= image_w_ || rectified_image_point.y < 0.0 || rectified_image_point.y >= image_h_){ + if ( + rectified_image_point.x < 0.0 || rectified_image_point.x >= image_w_ || + rectified_image_point.y < 0.0 || rectified_image_point.y >= image_h_) { return false; } else { projected_point << raw_image_point.x, raw_image_point.y; @@ -156,16 +172,20 @@ bool CameraProjection::calcRawImageProjectedPoint( * @return Return a boolean indicating whether the projected point is on the image plane. */ bool CameraProjection::calcRawImageProjectedPointWithApproximation( - const cv::Point3d & point3d, Eigen::Vector2d & projected_point -){ + const cv::Point3d & point3d, Eigen::Vector2d & projected_point) +{ const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); // round to a near grid center - const float qx = std::round((rectified_image_point.x-half_grid_w_size_)*inv_grid_w_size_)*grid_w_size_+half_grid_w_size_; + const float qx = + std::round((rectified_image_point.x - half_grid_w_size_) * inv_grid_w_size_) * grid_w_size_ + + half_grid_w_size_; if (qx < 0.0 || qx >= image_w_) { return false; } - const float qy = std::round((rectified_image_point.y-half_grid_h_size_)*inv_grid_h_size_)*grid_h_size_+half_grid_h_size_; + const float qy = + std::round((rectified_image_point.y - half_grid_h_size_) * inv_grid_h_size_) * grid_h_size_ + + half_grid_h_size_; if (qy < 0.0 || qy >= image_h_) { return false; } @@ -179,16 +199,19 @@ bool CameraProjection::calcRawImageProjectedPointWithApproximation( return true; } -sensor_msgs::msg::CameraInfo CameraProjection::getCameraInfo(){ +sensor_msgs::msg::CameraInfo CameraProjection::getCameraInfo() +{ return camera_info_; } -bool CameraProjection::isOutsideHorizontalView(const float px, const float pz){ +bool CameraProjection::isOutsideHorizontalView(const float px, const float pz) +{ // assuming the points' origin is centered on the camera return pz <= 0.0 || px > tan_h_x_ * pz || px < -tan_h_x_ * pz; } -bool CameraProjection::isOutsideVerticalView(const float py, const float pz){ +bool CameraProjection::isOutsideVerticalView(const float py, const float pz) +{ // assuming the points' origin is centered on the camera return pz <= 0.0 || py > tan_h_y_ * pz || py < -tan_h_y_ * pz; } diff --git a/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp b/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp index 48fdfc74ae32b..e3dc710bda1ba 100644 --- a/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp @@ -305,8 +305,7 @@ void FusionNode::subCallback( } fuseOnSingleImage( - *input_msg, roi_i, *((cached_roi_msgs_.at(roi_i))[matched_stamp]), - *output_msg); + *input_msg, roi_i, *((cached_roi_msgs_.at(roi_i))[matched_stamp]), *output_msg); (cached_roi_msgs_.at(roi_i)).erase(matched_stamp); is_fused_.at(roi_i) = true; diff --git a/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp index d1c80ec052818..2837bac458541 100644 --- a/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp @@ -346,8 +346,7 @@ dc | dc dc dc dc ||zc| // paint current point if it is inside bbox int label2d = feature_object.object.classification.front().label; if ( - !isUnknown(label2d) && - isInsideBbox(projected_point.x(), projected_point.y(), roi, 1.0)) { + !isUnknown(label2d) && isInsideBbox(projected_point.x(), projected_point.y(), roi, 1.0)) { // cppcheck-suppress invalidPointerCast auto p_class = reinterpret_cast(&output[stride + class_offset]); for (const auto & cls : isClassTable_) { diff --git a/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp index 5e22a95973f29..b088f1e1c377d 100644 --- a/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp @@ -100,8 +100,8 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage( object2camera_affine = transformToEigen(transform_stamped_optional.value().transform); } - const auto object_roi_map = generateDetectedObjectRoIs( - input_object_msg, image_id, object2camera_affine); + const auto object_roi_map = + generateDetectedObjectRoIs(input_object_msg, image_id, object2camera_affine); fuseObjectsOnImage(input_object_msg, input_roi_msg.feature_objects, object_roi_map); if (debugger_) { @@ -161,8 +161,7 @@ RoiDetectedObjectFusionNode::generateDetectedObjectRoIs( Eigen::Vector2d proj_point; if (camera_projectors_[image_id].calcImageProjectedPoint( - cv::Point3d(point.x(), point.y(), point.z()), proj_point - )){ + cv::Point3d(point.x(), point.y(), point.z()), proj_point)) { const double px = proj_point.x(); const double py = proj_point.y(); diff --git a/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp index fcd3833a5c938..998072d87774e 100644 --- a/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp @@ -86,8 +86,7 @@ void RoiPointCloudFusionNode::postprocess( } } void RoiPointCloudFusionNode::fuseOnSingleImage( - const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, - const std::size_t image_id, + const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, __attribute__((unused)) sensor_msgs::msg::PointCloud2 & output_pointcloud_msg) { @@ -131,9 +130,12 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( transform_stamped = transform_stamped_optional.value(); } const int point_step = input_pointcloud_msg.point_step; - const int x_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "x")].offset; - const int y_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "y")].offset; - const int z_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "z")].offset; + const int x_offset = + input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "x")].offset; + const int y_offset = + input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "y")].offset; + const int z_offset = + input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "z")].offset; sensor_msgs::msg::PointCloud2 transformed_cloud; tf2::doTransform(input_pointcloud_msg, transformed_cloud, transform_stamped); @@ -161,8 +163,7 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( Eigen::Vector2d projected_point; if (camera_projectors_[image_id].calcImageProjectedPoint( - cv::Point3d(transformed_x, transformed_y, transformed_z), projected_point) - ) { + cv::Point3d(transformed_x, transformed_y, transformed_z), projected_point)) { for (std::size_t i = 0; i < output_objs.size(); ++i) { auto & feature_obj = output_objs.at(i); const auto & check_roi = feature_obj.feature.roi; diff --git a/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp index 4468c087d2250..486ae291abc6a 100644 --- a/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp @@ -151,8 +151,7 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage( Eigen::Vector2d projected_point; if (!camera_projectors_[image_id].calcImageProjectedPoint( - cv::Point3d(transformed_x, transformed_y, transformed_z), projected_point - )){ + cv::Point3d(transformed_x, transformed_y, transformed_z), projected_point)) { continue; }