Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/beta/v0.10.1' into experiment/st…
Browse files Browse the repository at this point in the history
…eer_accel_limit
  • Loading branch information
jpntaxi committed Sep 25, 2023
2 parents 7e4e0ef + e70187f commit 8f758c2
Show file tree
Hide file tree
Showing 141 changed files with 5,489 additions and 2,266 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -28,15 +28,18 @@ using geometry_msgs::msg::Pose;

visualization_msgs::msg::MarkerArray createStopVirtualWallMarker(
const Pose & pose, const std::string & module_name, const rclcpp::Time & now, const int32_t id,
const double longitudinal_offset = 0.0, const std::string & ns_prefix = "");
const double longitudinal_offset = 0.0, const std::string & ns_prefix = "",
const bool is_driving_forward = true);

visualization_msgs::msg::MarkerArray createSlowDownVirtualWallMarker(
const Pose & pose, const std::string & module_name, const rclcpp::Time & now, const int32_t id,
const double longitudinal_offset = 0.0, const std::string & ns_prefix = "");
const double longitudinal_offset = 0.0, const std::string & ns_prefix = "",
const bool is_driving_forward = true);

visualization_msgs::msg::MarkerArray createDeadLineVirtualWallMarker(
const Pose & pose, const std::string & module_name, const rclcpp::Time & now, const int32_t id,
const double longitudinal_offset = 0.0, const std::string & ns_prefix = "");
const double longitudinal_offset = 0.0, const std::string & ns_prefix = "",
const bool is_driving_forward = true);

visualization_msgs::msg::MarkerArray createDeletedStopVirtualWallMarker(
const rclcpp::Time & now, const int32_t id);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ struct VirtualWall
std::string ns{};
VirtualWallType style = stop;
double longitudinal_offset{};
bool is_driving_forward{true};
};
typedef std::vector<VirtualWall> VirtualWalls;

Expand All @@ -54,7 +55,7 @@ class VirtualWallMarkerCreator
using create_wall_function = std::function<visualization_msgs::msg::MarkerArray(
const geometry_msgs::msg::Pose & pose, const std::string & module_name,
const rclcpp::Time & now, const int32_t id, const double longitudinal_offset,
const std::string & ns_prefix)>;
const std::string & ns_prefix, const bool is_driving_forward)>;

VirtualWalls virtual_walls;
std::unordered_map<std::string, MarkerCount> marker_count_per_namespace;
Expand Down
21 changes: 12 additions & 9 deletions common/motion_utils/src/marker/marker_helper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,32 +86,35 @@ namespace motion_utils
{
visualization_msgs::msg::MarkerArray createStopVirtualWallMarker(
const geometry_msgs::msg::Pose & pose, const std::string & module_name, const rclcpp::Time & now,
const int32_t id, const double longitudinal_offset, const std::string & ns_prefix)
const int32_t id, const double longitudinal_offset, const std::string & ns_prefix,
const bool is_driving_forward)
{
const auto pose_with_offset =
tier4_autoware_utils::calcOffsetPose(pose, longitudinal_offset, 0.0, 0.0);
const auto pose_with_offset = tier4_autoware_utils::calcOffsetPose(
pose, longitudinal_offset * (is_driving_forward ? 1.0 : -1.0), 0.0, 0.0);
return createVirtualWallMarkerArray(
pose_with_offset, module_name, ns_prefix + "stop_", now, id,
createMarkerColor(1.0, 0.0, 0.0, 0.5));
}

visualization_msgs::msg::MarkerArray createSlowDownVirtualWallMarker(
const geometry_msgs::msg::Pose & pose, const std::string & module_name, const rclcpp::Time & now,
const int32_t id, const double longitudinal_offset, const std::string & ns_prefix)
const int32_t id, const double longitudinal_offset, const std::string & ns_prefix,
const bool is_driving_forward)
{
const auto pose_with_offset =
tier4_autoware_utils::calcOffsetPose(pose, longitudinal_offset, 0.0, 0.0);
const auto pose_with_offset = tier4_autoware_utils::calcOffsetPose(
pose, longitudinal_offset * (is_driving_forward ? 1.0 : -1.0), 0.0, 0.0);
return createVirtualWallMarkerArray(
pose_with_offset, module_name, ns_prefix + "slow_down_", now, id,
createMarkerColor(1.0, 1.0, 0.0, 0.5));
}

visualization_msgs::msg::MarkerArray createDeadLineVirtualWallMarker(
const geometry_msgs::msg::Pose & pose, const std::string & module_name, const rclcpp::Time & now,
const int32_t id, const double longitudinal_offset, const std::string & ns_prefix)
const int32_t id, const double longitudinal_offset, const std::string & ns_prefix,
const bool is_driving_forward)
{
const auto pose_with_offset =
tier4_autoware_utils::calcOffsetPose(pose, longitudinal_offset, 0.0, 0.0);
const auto pose_with_offset = tier4_autoware_utils::calcOffsetPose(
pose, longitudinal_offset * (is_driving_forward ? 1.0 : -1.0), 0.0, 0.0);
return createVirtualWallMarkerArray(
pose_with_offset, module_name, ns_prefix + "dead_line_", now, id,
createMarkerColor(0.0, 1.0, 0.0, 0.5));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ visualization_msgs::msg::MarkerArray VirtualWallMarkerCreator::create_markers(
}
auto markers = create_fn(
virtual_wall.pose, virtual_wall.text, now, 0, virtual_wall.longitudinal_offset,
virtual_wall.ns);
virtual_wall.ns, virtual_wall.is_driving_forward);
for (auto & marker : markers.markers) {
marker.id = marker_count_per_namespace[marker.ns].current++;
marker_array.markers.push_back(marker);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,22 @@
#ifndef OBJECT_RECOGNITION_UTILS__TRANSFORM_HPP_
#define OBJECT_RECOGNITION_UTILS__TRANSFORM_HPP_

#include <pcl_ros/transforms.hpp>

#include <geometry_msgs/msg/transform.hpp>
#include <sensor_msgs/msg/point_cloud2.hpp>
#include <sensor_msgs/msg/point_field.hpp>

#include <boost/optional.hpp>

#include <tf2_ros/buffer.h>
#include <tf2_ros/transform_listener.h>
#ifdef ROS_DISTRO_GALACTIC
#include <tf2_eigen/tf2_eigen.h>
#include <tf2_geometry_msgs/tf2_geometry_msgs.h>
#else
#include <tf2_eigen/tf2_eigen.hpp>

#include <tf2_geometry_msgs/tf2_geometry_msgs.hpp>
#endif

Expand All @@ -45,6 +52,23 @@ namespace detail
return boost::none;
}
}

[[maybe_unused]] inline boost::optional<Eigen::Matrix4f> getTransformMatrix(
const std::string & in_target_frame, const std_msgs::msg::Header & in_cloud_header,
const tf2_ros::Buffer & tf_buffer)
{
try {
geometry_msgs::msg::TransformStamped transform_stamped;
transform_stamped = tf_buffer.lookupTransform(
in_target_frame, in_cloud_header.frame_id, in_cloud_header.stamp,
rclcpp::Duration::from_seconds(1.0));
Eigen::Matrix4f mat = tf2::transformToEigen(transform_stamped.transform).matrix().cast<float>();
return mat;
} catch (tf2::TransformException & e) {
RCLCPP_WARN_STREAM(rclcpp::get_logger("detail::getTransformMatrix"), e.what());
return boost::none;
}
}
} // namespace detail

namespace object_recognition_utils
Expand Down Expand Up @@ -79,6 +103,46 @@ bool transformObjects(
}
return true;
}
template <class T>
bool transformObjectsWithFeature(
const T & input_msg, const std::string & target_frame_id, const tf2_ros::Buffer & tf_buffer,
T & output_msg)
{
output_msg = input_msg;
if (input_msg.header.frame_id != target_frame_id) {
output_msg.header.frame_id = target_frame_id;
tf2::Transform tf_target2objects_world;
tf2::Transform tf_target2objects;
tf2::Transform tf_objects_world2objects;
const auto ros_target2objects_world = detail::getTransform(
tf_buffer, input_msg.header.frame_id, target_frame_id, input_msg.header.stamp);
if (!ros_target2objects_world) {
return false;
}
const auto tf_matrix = detail::getTransformMatrix(target_frame_id, input_msg.header, tf_buffer);
if (!tf_matrix) {
RCLCPP_WARN(
rclcpp::get_logger("object_recognition_utils:"), "failed to get transformed matrix");
return false;
}
for (auto & feature_object : output_msg.feature_objects) {
// transform object
tf2::fromMsg(
feature_object.object.kinematics.pose_with_covariance.pose, tf_objects_world2objects);
tf_target2objects = tf_target2objects_world * tf_objects_world2objects;
tf2::toMsg(tf_target2objects, feature_object.object.kinematics.pose_with_covariance.pose);

// transform cluster
sensor_msgs::msg::PointCloud2 transformed_cluster;
pcl_ros::transformPointCloud(*tf_matrix, feature_object.feature.cluster, transformed_cluster);
transformed_cluster.header.frame_id = target_frame_id;
feature_object.feature.cluster = transformed_cluster;
}
output_msg.header.frame_id = target_frame_id;
return true;
}
return true;
}
} // namespace object_recognition_utils

#endif // OBJECT_RECOGNITION_UTILS__TRANSFORM_HPP_
6 changes: 6 additions & 0 deletions common/object_recognition_utils/package.xml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,13 @@
<depend>geometry_msgs</depend>
<depend>interpolation</depend>
<depend>libboost-dev</depend>
<depend>pcl_conversions</depend>
<depend>pcl_ros</depend>
<depend>rclcpp</depend>
<depend>sensor_msgs</depend>
<depend>std_msgs</depend>
<depend>tf2</depend>
<depend>tf2_eigen</depend>
<depend>tier4_autoware_utils</depend>

<test_depend>ament_cmake_ros</test_depend>
Expand Down
18 changes: 15 additions & 3 deletions common/rtc_manager_rviz_plugin/src/rtc_manager_panel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -366,16 +366,28 @@ void RTCManagerPanel::onRTCStatus(const CooperateStatusArray::ConstSharedPtr msg
rtc_table_->clearContents();
num_rtc_status_ptr_->setText(
QString::fromStdString("The Number of RTC Statuses: " + std::to_string(msg->statuses.size())));
if (msg->statuses.empty()) return;
if (msg->statuses.empty()) {
rtc_table_->update();
return;
}
// this is to stable rtc display not to occupy too much
size_t min_display_size{5};
size_t max_display_size{10};
// rtc messages are already sorted by distance
rtc_table_->setRowCount(
std::max(min_display_size, std::min(msg->statuses.size(), max_display_size)));
int cnt = 0;
for (auto status : msg->statuses) {
if (static_cast<size_t>(cnt) >= max_display_size) return;

auto sorted_statuses = msg->statuses;
std::partition(sorted_statuses.begin(), sorted_statuses.end(), [](const auto & status) {
return !status.auto_mode && !uint2bool(status.command_status.type);
});

for (auto status : sorted_statuses) {
if (static_cast<size_t>(cnt) >= max_display_size) {
rtc_table_->update();
return;
}
// uuid
{
std::stringstream uuid;
Expand Down
14 changes: 12 additions & 2 deletions control/operation_mode_transition_manager/src/state.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -121,14 +121,24 @@ bool AutonomousMode::isModeChangeCompleted()
const auto closest_point = trajectory_.points.at(*closest_idx);

// check for lateral deviation
const auto dist_deviation = calcDistance2d(closest_point.pose, kinematics_.pose.pose);
const auto dist_deviation =
motion_utils::calcLateralOffset(trajectory_.points, kinematics_.pose.pose.position);
if (std::isnan(dist_deviation)) {
RCLCPP_INFO(logger_, "Not stable yet: lateral offset calculation failed.");
return unstable();
}
if (dist_deviation > stable_check_param_.dist_threshold) {
RCLCPP_INFO(logger_, "Not stable yet: distance deviation is too large: %f", dist_deviation);
return unstable();
}

// check for yaw deviation
const auto yaw_deviation = calcYawDeviation(closest_point.pose, kinematics_.pose.pose);
const auto yaw_deviation =
motion_utils::calcYawDeviation(trajectory_.points, kinematics_.pose.pose);
if (std::isnan(yaw_deviation)) {
RCLCPP_INFO(logger_, "Not stable yet: lateral offset calculation failed.");
return unstable();
}
if (yaw_deviation > stable_check_param_.yaw_threshold) {
RCLCPP_INFO(logger_, "Not stable yet: yaw deviation is too large: %f", yaw_deviation);
return unstable();
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,24 @@
<?xml version="1.0"?>
<launch>
<!-- Interface param -->
<arg name="output/objects" default="objects"/>

<!-- LiDAR param -->
<arg name="input/pointcloud"/>
<arg name="input/obstacle_segmentation/pointcloud" default="/perception/obstacle_segmentation/pointcloud"/>
<arg name="output/objects" default="objects"/>
<arg name="container_name" default="pointcloud_container"/>

<arg name="lidar_detection_model" default="centerpoint" description="options: `centerpoint`, `apollo`, `pointpainting`, `clustering`"/>
<arg name="score_threshold" default="0.35"/>

<arg name="use_object_filter" default="true" description="use object filter"/>
<arg name="use_pointcloud_container" default="false" description="use pointcloud container for detection preprocessor"/>
<arg name="use_validator" default="true" description="use obstacle_pointcloud based validator"/>
<arg name="remove_unknown" default="true"/>
<arg name="trust_distance" default="30.0"/>
<arg name="use_roi_based_cluster" default="false"/>

<!-- Camera param -->
<arg name="image_raw0" default="/image_raw" description="image raw topic name"/>
<arg name="camera_info0" default="/camera_info" description="camera info topic name"/>
<arg name="detection_rois0" default="/perception/object_recognition/detection/rois0" description="detection rois output topic name"/>
Expand All @@ -28,16 +44,6 @@
<arg name="camera_info7" default="/camera_info7"/>
<arg name="detection_rois7" default="/perception/object_recognition/detection/rois7"/>
<arg name="image_number" default="1" description="choose image raw number(1-8)"/>
<arg name="lidar_detection_model" default="centerpoint" description="options: `centerpoint`, `apollo`, `pointpainting`, `clustering`"/>
<arg name="use_pointcloud_map" default="true" description="use pointcloud map in detection"/>
<arg name="use_downsample_pointcloud" default="true" description="use downsample pointcloud in perception"/>
<arg name="use_object_filter" default="true" description="use object filter"/>
<arg name="use_pointcloud_container" default="false" description="use pointcloud container for detection preprocessor"/>
<arg name="container_name" default="pointcloud_container"/>
<arg name="use_validator" default="true" description="use obstacle_pointcloud based validator"/>
<arg name="score_threshold" default="0.35"/>
<arg name="remove_unknown" default="true"/>
<arg name="trust_distance" default="30.0"/>

<!-- Jetson AGX -->
<!-- <include file="$(find-pkg-share tensorrt_yolo)/launch/yolo.launch.xml">
Expand All @@ -52,29 +58,71 @@
<arg name="image_number" value="$(var image_number)"/>
</include> -->

<!-- Pointcloud map filter -->
<!-- Pointcloud filter -->
<group>
<include file="$(find-pkg-share tier4_perception_launch)/launch/object_recognition/detection/pointcloud_map_filter.launch.py" if="$(var use_pointcloud_map)">
<include file="$(find-pkg-share tier4_perception_launch)/launch/object_recognition/detection/pointcloud_map_filter.launch.py">
<arg name="input_topic" value="$(var input/obstacle_segmentation/pointcloud)"/>
<arg name="output_topic" value="pointcloud_map_filtered/pointcloud"/>
<arg name="use_intra_process" value="true"/>
<arg name="use_multithread" value="true"/>
<arg name="use_pointcloud_container" value="$(var use_pointcloud_container)"/>
<arg name="container_name" value="$(var container_name)"/>
<arg name="use_pointcloud_map" value="$(var use_pointcloud_map)"/>
</include>
</group>

<!-- Clustering -->
<group>
<push-ros-namespace namespace="clustering"/>
<group>
<let name="clustering/input/pointcloud" value="/perception/object_recognition/detection/pointcloud_map_filtered/pointcloud" if="$(var use_pointcloud_map)"/>
<let name="clustering/input/pointcloud" value="$(var input/obstacle_segmentation/pointcloud)" unless="$(var use_pointcloud_map)"/>
<let name="euclidean_cluster_output" value="euclidean_cluster/clusters" if="$(var use_roi_based_cluster)"/>
<let name="euclidean_cluster_output" value="clusters" unless="$(var use_roi_based_cluster)"/>
<include file="$(find-pkg-share euclidean_cluster)/launch/voxel_grid_based_euclidean_cluster.launch.xml">
<arg name="input_pointcloud" value="$(var clustering/input/pointcloud)"/>
<arg name="output_clusters" value="clusters"/>
<arg name="use_pointcloud_map" value="false"/>
<arg name="use_downsample_pointcloud" value="$(var use_downsample_pointcloud)"/>
<arg name="input_pointcloud" value="/perception/object_recognition/detection/pointcloud_map_filtered/pointcloud"/>
<arg name="output_clusters" value="$(var euclidean_cluster_output)"/>
<arg name="use_low_height_cropbox" value="$(var use_low_height_cropbox)"/>
</include>
</group>

<!-- roi based clustering -->
<group>
<include file="$(find-pkg-share image_projection_based_fusion)/launch/roi_pointcloud_fusion.launch.xml" if="$(var use_roi_based_cluster)">
<arg name="input/camera_info0" value="$(var camera_info0)"/>
<arg name="input/rois0" value="$(var detection_rois0)"/>
<arg name="input/camera_info1" value="$(var camera_info1)"/>
<arg name="input/rois1" value="$(var detection_rois1)"/>
<arg name="input/camera_info2" value="$(var camera_info2)"/>
<arg name="input/rois2" value="$(var detection_rois2)"/>
<arg name="input/camera_info3" value="$(var camera_info3)"/>
<arg name="input/rois3" value="$(var detection_rois3)"/>
<arg name="input/camera_info4" value="$(var camera_info4)"/>
<arg name="input/rois4" value="$(var detection_rois4)"/>
<arg name="input/camera_info5" value="$(var camera_info5)"/>
<arg name="input/rois5" value="$(var detection_rois5)"/>
<arg name="input/camera_info6" value="$(var camera_info6)"/>
<arg name="input/rois6" value="$(var detection_rois6)"/>
<arg name="input/camera_info7" value="$(var camera_info7)"/>
<arg name="input/rois7" value="$(var detection_rois7)"/>
<arg name="input/rois_number" value="$(var image_number)"/>
<arg name="input/image0" value="$(var image_raw0)"/>
<arg name="input/image1" value="$(var image_raw1)"/>
<arg name="input/image2" value="$(var image_raw2)"/>
<arg name="input/image3" value="$(var image_raw3)"/>
<arg name="input/image4" value="$(var image_raw4)"/>
<arg name="input/image5" value="$(var image_raw5)"/>
<arg name="input/image6" value="$(var image_raw6)"/>
<arg name="input/image7" value="$(var image_raw7)"/>
<arg name="input/pointcloud" value="/perception/object_recognition/detection/pointcloud_map_filtered/pointcloud"/>
<arg name="output_clusters" value="roi_cluster/clusters"/>
</include>
</group>

<!-- simple_cluster_merger -->
<group>
<include file="$(find-pkg-share cluster_merger)/launch/cluster_merger.launch.xml" if="$(var use_roi_based_cluster)">
<arg name="input/cluster0" value="euclidean_cluster/clusters"/>
<arg name="input/cluster1" value="roi_cluster/clusters"/>
<arg name="output/clusters" value="clusters"/>
</include>
</group>

Expand All @@ -89,6 +137,8 @@
<let name="input/clustering" value="/perception/object_recognition/detection/clustering/clusters"/>
<push-ros-namespace namespace="camera_lidar_fusion"/>
<!-- Fusion camera-lidar to classify -->

<!-- euclidean clustering -->
<group>
<include file="$(find-pkg-share image_projection_based_fusion)/launch/roi_cluster_fusion.launch.xml">
<arg name="input/camera_info0" value="$(var camera_info0)"/>
Expand Down
Loading

0 comments on commit 8f758c2

Please sign in to comment.