diff --git a/perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_detected_object_fusion/node.hpp b/perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_detected_object_fusion/node.hpp index 7d7132309fb91..f65ff4ad322a5 100644 --- a/perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_detected_object_fusion/node.hpp +++ b/perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_detected_object_fusion/node.hpp @@ -18,6 +18,8 @@ #include "image_projection_based_fusion/fusion_node.hpp" #include "tier4_autoware_utils/ros/debug_publisher.hpp" +#include + #include "autoware_auto_perception_msgs/msg/object_classification.hpp" #include @@ -45,7 +47,8 @@ class RoiDetectedObjectFusionNode std::map generateDetectedObjectRoIs( const DetectedObjects & input_object_msg, const double image_width, const double image_height, - const Eigen::Affine3d & object2camera_affine, const Eigen::Matrix4d & camera_projection); + const Eigen::Affine3d & object2camera_affine, + const image_geometry::PinholeCameraModel & pinhole_camera_model); void fuseObjectsOnImage( const DetectedObjects & input_object_msg, diff --git a/perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp b/perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp index 4cbc0990352e6..4d4fd8d2dac42 100644 --- a/perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp +++ b/perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp @@ -17,6 +17,8 @@ #include "image_projection_based_fusion/fusion_node.hpp" +#include + #include #include namespace image_projection_based_fusion diff --git a/perception/image_projection_based_fusion/include/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp b/perception/image_projection_based_fusion/include/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp index 4168c483469ab..446417f060b9b 100644 --- a/perception/image_projection_based_fusion/include/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp +++ b/perception/image_projection_based_fusion/include/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp @@ -17,6 +17,8 @@ #include "image_projection_based_fusion/fusion_node.hpp" +#include + #include #include diff --git a/perception/image_projection_based_fusion/include/image_projection_based_fusion/utils/utils.hpp b/perception/image_projection_based_fusion/include/image_projection_based_fusion/utils/utils.hpp index 1851d3faaee6e..04c010523b02a 100644 --- a/perception/image_projection_based_fusion/include/image_projection_based_fusion/utils/utils.hpp +++ b/perception/image_projection_based_fusion/include/image_projection_based_fusion/utils/utils.hpp @@ -42,6 +42,7 @@ #include "autoware_auto_perception_msgs/msg/shape.hpp" #include "tier4_perception_msgs/msg/detected_object_with_feature.hpp" +#include #include #include #include @@ -60,6 +61,10 @@ struct PointData float distance; size_t orig_index; }; + +Eigen::Vector2d calcRawImageProjectedPoint( + const image_geometry::PinholeCameraModel & pinhole_camera_model, const cv::Point3d & point3d); + std::optional getTransformStamped( const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id, const std::string & source_frame_id, const rclcpp::Time & time); diff --git a/perception/image_projection_based_fusion/package.xml b/perception/image_projection_based_fusion/package.xml index a5b41a30be166..f6eef1d0c2ace 100644 --- a/perception/image_projection_based_fusion/package.xml +++ b/perception/image_projection_based_fusion/package.xml @@ -20,6 +20,7 @@ autoware_point_types cv_bridge euclidean_cluster + image_geometry image_transport lidar_centerpoint message_filters diff --git a/perception/image_projection_based_fusion/src/pointpainting_fusion/node.cpp b/perception/image_projection_based_fusion/src/pointpainting_fusion/node.cpp index 2c33df0b65afc..99fe667c354d5 100644 --- a/perception/image_projection_based_fusion/src/pointpainting_fusion/node.cpp +++ b/perception/image_projection_based_fusion/src/pointpainting_fusion/node.cpp @@ -307,6 +307,9 @@ void PointPaintingFusionNode::fuseOnSingleImage( const auto class_offset = painted_pointcloud_msg.fields.at(4).offset; const auto p_step = painted_pointcloud_msg.point_step; // projection matrix + image_geometry::PinholeCameraModel pinhole_camera_model; + pinhole_camera_model.fromCameraInfo(camera_info); + Eigen::Matrix3f camera_projection; // use only x,y,z camera_projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6); @@ -342,15 +345,15 @@ dc | dc dc dc dc ||zc| continue; } // project - Eigen::Vector3f normalized_projected_point = camera_projection * Eigen::Vector3f(p_x, p_y, p_z); + Eigen::Vector2d projected_point = + calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(p_x, p_y, p_z)); + // iterate 2d bbox for (const auto & feature_object : objects) { sensor_msgs::msg::RegionOfInterest roi = feature_object.feature.roi; // paint current point if it is inside bbox int label2d = feature_object.object.classification.front().label; - if ( - !isUnknown(label2d) && - isInsideBbox(normalized_projected_point.x(), normalized_projected_point.y(), roi, p_z)) { + if (!isUnknown(label2d) && isInsideBbox(projected_point.x(), projected_point.y(), roi, p_z)) { data = &painted_pointcloud_msg.data[0]; auto p_class = reinterpret_cast(&output[stride + class_offset]); for (const auto & cls : isClassTable_) { @@ -361,7 +364,7 @@ dc | dc dc dc dc ||zc| #if 0 // Parallelizing loop don't support push_back if (debugger_) { - debug_image_points.push_back(normalized_projected_point); + debug_image_points.push_back(projected_point); } #endif } diff --git a/perception/image_projection_based_fusion/src/roi_cluster_fusion/node.cpp b/perception/image_projection_based_fusion/src/roi_cluster_fusion/node.cpp index feaf1ad495ce8..bfe313a838fd6 100644 --- a/perception/image_projection_based_fusion/src/roi_cluster_fusion/node.cpp +++ b/perception/image_projection_based_fusion/src/roi_cluster_fusion/node.cpp @@ -83,10 +83,8 @@ void RoiClusterFusionNode::fuseOnSingleImage( const DetectedObjectsWithFeature & input_roi_msg, const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjectsWithFeature & output_cluster_msg) { - Eigen::Matrix4d projection; - projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3), - camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7), - camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11); + image_geometry::PinholeCameraModel pinhole_camera_model; + pinhole_camera_model.fromCameraInfo(camera_info); // get transform from cluster frame id to camera optical frame id geometry_msgs::msg::TransformStamped transform_stamped; @@ -133,23 +131,19 @@ void RoiClusterFusionNode::fuseOnSingleImage( continue; } - Eigen::Vector4d projected_point = - projection * Eigen::Vector4d(*iter_x, *iter_y, *iter_z, 1.0); - Eigen::Vector2d normalized_projected_point = Eigen::Vector2d( - projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z()); + Eigen::Vector2d projected_point = + calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(*iter_x, *iter_y, *iter_z)); if ( - 0 <= static_cast(normalized_projected_point.x()) && - static_cast(normalized_projected_point.x()) <= - static_cast(camera_info.width) - 1 && - 0 <= static_cast(normalized_projected_point.y()) && - static_cast(normalized_projected_point.y()) <= - static_cast(camera_info.height) - 1) { - min_x = std::min(static_cast(normalized_projected_point.x()), min_x); - min_y = std::min(static_cast(normalized_projected_point.y()), min_y); - max_x = std::max(static_cast(normalized_projected_point.x()), max_x); - max_y = std::max(static_cast(normalized_projected_point.y()), max_y); - projected_points.push_back(normalized_projected_point); - if (debugger_) debugger_->obstacle_points_.push_back(normalized_projected_point); + 0 <= static_cast(projected_point.x()) && + static_cast(projected_point.x()) <= static_cast(camera_info.width) - 1 && + 0 <= static_cast(projected_point.y()) && + static_cast(projected_point.y()) <= static_cast(camera_info.height) - 1) { + min_x = std::min(static_cast(projected_point.x()), min_x); + min_y = std::min(static_cast(projected_point.y()), min_y); + max_x = std::max(static_cast(projected_point.x()), max_x); + max_y = std::max(static_cast(projected_point.y()), max_y); + projected_points.push_back(projected_point); + if (debugger_) debugger_->obstacle_points_.push_back(projected_point); } } if (projected_points.empty()) { diff --git a/perception/image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp b/perception/image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp index 762d3e0ee51b1..7d85ecb2f9200 100644 --- a/perception/image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp +++ b/perception/image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp @@ -86,15 +86,12 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage( object2camera_affine = transformToEigen(transform_stamped_optional.value().transform); } - Eigen::Matrix4d camera_projection; - camera_projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), - camera_info.p.at(3), camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), - camera_info.p.at(7), camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), - camera_info.p.at(11); + image_geometry::PinholeCameraModel pinhole_camera_model; + pinhole_camera_model.fromCameraInfo(camera_info); const auto object_roi_map = generateDetectedObjectRoIs( input_object_msg, static_cast(camera_info.width), - static_cast(camera_info.height), object2camera_affine, camera_projection); + static_cast(camera_info.height), object2camera_affine, pinhole_camera_model); fuseObjectsOnImage(input_object_msg, input_roi_msg.feature_objects, object_roi_map); if (debugger_) { @@ -109,7 +106,8 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage( std::map RoiDetectedObjectFusionNode::generateDetectedObjectRoIs( const DetectedObjects & input_object_msg, const double image_width, const double image_height, - const Eigen::Affine3d & object2camera_affine, const Eigen::Matrix4d & camera_projection) + const Eigen::Affine3d & object2camera_affine, + const image_geometry::PinholeCameraModel & pinhole_camera_model) { std::map object_roi_map; int64_t timestamp_nsec = @@ -148,13 +146,8 @@ RoiDetectedObjectFusionNode::generateDetectedObjectRoIs( continue; } - Eigen::Vector2d proj_point; - { - Eigen::Vector4d proj_point_hom = - camera_projection * Eigen::Vector4d(point.x(), point.y(), point.z(), 1.0); - proj_point = Eigen::Vector2d( - proj_point_hom.x() / (proj_point_hom.z()), proj_point_hom.y() / (proj_point_hom.z())); - } + Eigen::Vector2d proj_point = calcRawImageProjectedPoint( + pinhole_camera_model, cv::Point3d(point.x(), point.y(), point.z())); min_x = std::min(proj_point.x(), min_x); min_y = std::min(proj_point.y(), min_y); diff --git a/perception/image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp b/perception/image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp index 3f78ee159330f..b7f792bfe6e75 100644 --- a/perception/image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp +++ b/perception/image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp @@ -101,10 +101,9 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( } // transform pointcloud to camera optical frame id - Eigen::Matrix4d projection; - projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3), - camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7), - camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11); + image_geometry::PinholeCameraModel pinhole_camera_model; + pinhole_camera_model.fromCameraInfo(camera_info); + geometry_msgs::msg::TransformStamped transform_stamped; { const auto transform_stamped_optional = getTransformStamped( @@ -143,10 +142,8 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( if (transformed_z <= 0.0) { continue; } - Eigen::Vector4d projected_point = - projection * Eigen::Vector4d(transformed_x, transformed_y, transformed_z, 1.0); - Eigen::Vector2d normalized_projected_point = Eigen::Vector2d( - projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z()); + Eigen::Vector2d projected_point = calcRawImageProjectedPoint( + pinhole_camera_model, cv::Point3d(transformed_x, transformed_y, transformed_z)); for (std::size_t i = 0; i < output_objs.size(); ++i) { auto & feature_obj = output_objs.at(i); const auto & check_roi = feature_obj.feature.roi; @@ -156,10 +153,9 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( continue; } if ( - check_roi.x_offset <= normalized_projected_point.x() && - check_roi.y_offset <= normalized_projected_point.y() && - check_roi.x_offset + check_roi.width >= normalized_projected_point.x() && - check_roi.y_offset + check_roi.height >= normalized_projected_point.y()) { + check_roi.x_offset <= projected_point.x() && check_roi.y_offset <= projected_point.y() && + check_roi.x_offset + check_roi.width >= projected_point.x() && + check_roi.y_offset + check_roi.height >= projected_point.y()) { std::memcpy( &cluster.data[clusters_data_size.at(i)], &input_pointcloud_msg.data[offset], point_step); clusters_data_size.at(i) += point_step; diff --git a/perception/image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp b/perception/image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp index 1c9c865b8d21e..2693a58366b9d 100644 --- a/perception/image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp +++ b/perception/image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp @@ -65,11 +65,9 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage( if (mask.cols == 0 || mask.rows == 0) { return; } - Eigen::Matrix4d projection; - projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3), - camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7), - camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11), 0.0, 0.0, - 0.0, 1.0; + image_geometry::PinholeCameraModel pinhole_camera_model; + pinhole_camera_model.fromCameraInfo(camera_info); + geometry_msgs::msg::TransformStamped transform_stamped; // transform pointcloud from frame id to camera optical frame id { @@ -99,13 +97,11 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage( continue; } - Eigen::Vector4d projected_point = projection * Eigen::Vector4d(*iter_x, *iter_y, *iter_z, 1.0); - Eigen::Vector2d normalized_projected_point = Eigen::Vector2d( - projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z()); + Eigen::Vector2d projected_point = + calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(*iter_x, *iter_y, *iter_z)); - bool is_inside_image = - normalized_projected_point.x() > 0 && normalized_projected_point.x() < camera_info.width && - normalized_projected_point.y() > 0 && normalized_projected_point.y() < camera_info.height; + bool is_inside_image = projected_point.x() > 0 && projected_point.x() < camera_info.width && + projected_point.y() > 0 && projected_point.y() < camera_info.height; if (!is_inside_image) { output_cloud.push_back(pcl::PointXYZ(*iter_orig_x, *iter_orig_y, *iter_orig_z)); continue; @@ -113,8 +109,7 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage( // skip filtering pointcloud where semantic id out of the defined list uint8_t semantic_id = mask.at( - static_cast(normalized_projected_point.y()), - static_cast(normalized_projected_point.x())); + static_cast(projected_point.y()), static_cast(projected_point.x())); if (static_cast(semantic_id) >= filter_semantic_label_target_.size()) { output_cloud.push_back(pcl::PointXYZ(*iter_orig_x, *iter_orig_y, *iter_orig_z)); continue; diff --git a/perception/image_projection_based_fusion/src/utils/utils.cpp b/perception/image_projection_based_fusion/src/utils/utils.cpp index fd25696243951..2eb4d4ef42d77 100644 --- a/perception/image_projection_based_fusion/src/utils/utils.cpp +++ b/perception/image_projection_based_fusion/src/utils/utils.cpp @@ -13,8 +13,30 @@ // limitations under the License. #include "image_projection_based_fusion/utils/utils.hpp" + namespace image_projection_based_fusion { +Eigen::Vector2d calcRawImageProjectedPoint( + const image_geometry::PinholeCameraModel & pinhole_camera_model, const cv::Point3d & point3d) +{ + cv::Point2d rectified_image_point = pinhole_camera_model.project3dToPixel(point3d); + + cv::Point2d raw_image_point = rectified_image_point; + + try { + const bool need_unrectify = std::any_of( + pinhole_camera_model.cameraInfo().d.begin(), pinhole_camera_model.cameraInfo().d.end(), + [](double distortion) { return distortion != 0.0; }); + + if (need_unrectify) { + raw_image_point = pinhole_camera_model.unrectifyPoint(rectified_image_point); + } + } catch (cv::Exception & e) { + RCLCPP_WARN_STREAM(rclcpp::get_logger("image_projection_based_fusion"), e.what()); + } + + return Eigen::Vector2d(raw_image_point.x, raw_image_point.y); +} std::optional getTransformStamped( const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id,