diff --git a/code/perception/launch/perception.launch b/code/perception/launch/perception.launch index 67a9c2ab..cca0dc68 100644 --- a/code/perception/launch/perception.launch +++ b/code/perception/launch/perception.launch @@ -1,6 +1,6 @@ - + - + diff --git a/code/perception/src/__init__.py b/code/perception/src/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/perception/src/lidar_distance.py b/code/perception/src/lidar_distance.py index 04394ee2..5c937be0 100755 --- a/code/perception/src/lidar_distance.py +++ b/code/perception/src/lidar_distance.py @@ -1,15 +1,15 @@ #!/usr/bin/env python +from joblib import Parallel, delayed import rospy import ros_numpy import numpy as np -import lidar_filter_utility -from sensor_msgs.msg import PointCloud2 +from lidar_filter_utility import bounding_box, remove_field_name +from sensor_msgs.msg import PointCloud2, Image as ImageMsg +from sklearn.cluster import DBSCAN +from cv_bridge import CvBridge # from mpl_toolkits.mplot3d import Axes3D # from itertools import combinations -from sensor_msgs.msg import Image as ImageMsg -from cv_bridge import CvBridge - # from matplotlib.colors import LinearSegmentedColormap @@ -18,234 +18,416 @@ class LidarDistance: how to configute this node """ - def callback(self, data): - """Callback function, filters a PontCloud2 message - by restrictions defined in the launchfile. + def __init__(self): + self.cluster_buffer = [] - Publishes a Depth image for the specified camera angle. - Each angle has do be delt with differently since the signs of the - coordinate system change with the view angle. - - :param data: a PointCloud2 + def callback(self, data): """ - coordinates = ros_numpy.point_cloud2.pointcloud2_to_array(data) + Callback function that processes LiDAR point cloud data. - # Center - reconstruct_bit_mask_center = lidar_filter_utility.bounding_box( - coordinates, - max_x=np.inf, - min_x=0.0, - min_z=-1.6, - ) - reconstruct_coordinates_center = coordinates[reconstruct_bit_mask_center] - reconstruct_coordinates_xyz_center = np.array( - lidar_filter_utility.remove_field_name( - reconstruct_coordinates_center, "intensity" - ).tolist() - ) - dist_array_center = self.reconstruct_img_from_lidar( - reconstruct_coordinates_xyz_center, focus="Center" - ) - dist_array_center_msg = self.bridge.cv2_to_imgmsg( - dist_array_center, encoding="passthrough" - ) - dist_array_center_msg.header = data.header - self.dist_array_center_publisher.publish(dist_array_center_msg) - - # Back - reconstruct_bit_mask_back = lidar_filter_utility.bounding_box( - coordinates, - max_x=0.0, - min_x=-np.inf, - min_z=-1.6, - ) - reconstruct_coordinates_back = coordinates[reconstruct_bit_mask_back] - reconstruct_coordinates_xyz_back = np.array( - lidar_filter_utility.remove_field_name( - reconstruct_coordinates_back, "intensity" - ).tolist() - ) - dist_array_back = self.reconstruct_img_from_lidar( - reconstruct_coordinates_xyz_back, focus="Back" - ) - dist_array_back_msg = self.bridge.cv2_to_imgmsg( - dist_array_back, encoding="passthrough" - ) - dist_array_back_msg.header = data.header - self.dist_array_back_publisher.publish(dist_array_back_msg) - - # Left - reconstruct_bit_mask_left = lidar_filter_utility.bounding_box( - coordinates, - max_y=np.inf, - min_y=0.0, - min_z=-1.6, - ) - reconstruct_coordinates_left = coordinates[reconstruct_bit_mask_left] - reconstruct_coordinates_xyz_left = np.array( - lidar_filter_utility.remove_field_name( - reconstruct_coordinates_left, "intensity" - ).tolist() - ) - dist_array_left = self.reconstruct_img_from_lidar( - reconstruct_coordinates_xyz_left, focus="Left" - ) - dist_array_left_msg = self.bridge.cv2_to_imgmsg( - dist_array_left, encoding="passthrough" - ) - dist_array_left_msg.header = data.header - self.dist_array_left_publisher.publish(dist_array_left_msg) + Executes clustering and image calculations for the provided point cloud. - # Right - reconstruct_bit_mask_right = lidar_filter_utility.bounding_box( - coordinates, max_y=-0.0, min_y=-np.inf, min_z=-1.6 - ) - reconstruct_coordinates_right = coordinates[reconstruct_bit_mask_right] - reconstruct_coordinates_xyz_right = np.array( - lidar_filter_utility.remove_field_name( - reconstruct_coordinates_right, "intensity" - ).tolist() - ) - dist_array_right = self.reconstruct_img_from_lidar( - reconstruct_coordinates_xyz_right, focus="Right" - ) - dist_array_right_msg = self.bridge.cv2_to_imgmsg( - dist_array_right, encoding="passthrough" - ) - dist_array_right_msg.header = data.header - self.dist_array_right_publisher.publish(dist_array_right_msg) + :param data: LiDAR point cloud as a ROS PointCloud2 message. + """ + self.start_clustering(data) + self.start_image_calculation(data) def listener(self): """ - Initializes the node and it's publishers + Initializes the ROS node, creates publishers/subscribers, and keeps it active. """ - # run simultaneously. rospy.init_node("lidar_distance") - self.bridge = CvBridge() + self.bridge = CvBridge() # OpenCV bridge for image conversions + # Publisher for filtered point clouds self.pub_pointcloud = rospy.Publisher( rospy.get_param( "~point_cloud_topic", "/carla/hero/" + rospy.get_namespace() + "_filtered", ), PointCloud2, - queue_size=10, + queue_size=1, ) - # publisher for dist_array + # Publishers for distance images in various directions self.dist_array_center_publisher = rospy.Publisher( rospy.get_param("~image_distance_topic", "/paf/hero/Center/dist_array"), ImageMsg, queue_size=10, ) - - # publisher for dist_array self.dist_array_back_publisher = rospy.Publisher( rospy.get_param("~image_distance_topic", "/paf/hero/Back/dist_array"), ImageMsg, queue_size=10, ) - - # publisher for dist_array + self.dist_array_lidar_publisher = rospy.Publisher( + rospy.get_param( + "~image_distance_topic_cluster", "/paf/hero/dist_clustered" + ), + PointCloud2, + queue_size=10, + ) + rospy.loginfo("dist_array_lidar_publisher successfully created.") self.dist_array_left_publisher = rospy.Publisher( rospy.get_param("~image_distance_topic", "/paf/hero/Left/dist_array"), ImageMsg, queue_size=10, ) - - # publisher for dist_array self.dist_array_right_publisher = rospy.Publisher( rospy.get_param("~image_distance_topic", "/paf/hero/Right/dist_array"), ImageMsg, queue_size=10, ) + # Subscriber for LiDAR data (point clouds) rospy.Subscriber( rospy.get_param("~source_topic", "/carla/hero/LIDAR"), PointCloud2, self.callback, ) + rospy.loginfo("Lidar Processor Node started.") rospy.spin() - def reconstruct_img_from_lidar(self, coordinates_xyz, focus): + def start_clustering(self, data): + """ + Filters LiDAR point clouds, performs clustering, + and publishes the combined clusters. + + :param data: LiDAR point clouds in ROS PointCloud2 format. + """ + + # Filter point clouds to remove irrelevant data + coordinates = ros_numpy.point_cloud2.pointcloud2_to_array(data) + filtered_coordinates = coordinates[ + ~( + (coordinates["x"] >= -2) + & (coordinates["x"] <= 2) + & (coordinates["y"] >= -1) + & (coordinates["y"] <= 1) + ) # Exclude points related to the ego vehicle + & ( + coordinates["z"] > -1.7 + 0.05 + ) # Minimum height in z to exclude the road + ] + + # Compute cluster data from the filtered coordinates + clustered_points = cluster_lidar_data_from_pointcloud( + coordinates=filtered_coordinates + ) + + # Only store valid cluster data + if clustered_points: + self.cluster_buffer.append(clustered_points) + else: + rospy.logwarn("No cluster data generated.") + + # Combine clusters + combined_clusters = combine_clusters(self.cluster_buffer) + + self.cluster_buffer = [] + + # Publish the combined clusters + self.publish_clusters(combined_clusters, data.header) + + def publish_clusters(self, combined_clusters, data_header): + """ + Publishes combined clusters as a ROS PointCloud2 message. + + :param combined_clusters: Combined point clouds of the clusters as a structured + NumPy array. + :param data_header: Header information for the ROS message. + """ + # Convert to a PointCloud2 message + pointcloud_msg = ros_numpy.point_cloud2.array_to_pointcloud2(combined_clusters) + pointcloud_msg.header = data_header + pointcloud_msg.header.stamp = rospy.Time.now() + # Publish the clusters + self.dist_array_lidar_publisher.publish(pointcloud_msg) + + def start_image_calculation(self, data): + """ + Computes distance images based on LiDAR data and publishes them. + + :param data: LiDAR point cloud in ROS PointCloud2 format. + """ + coordinates = ros_numpy.point_cloud2.pointcloud2_to_array(data) + + # Directions to process + directions = ["Center", "Back", "Left", "Right"] + + # Process images for all directions + processed_images = { + direction: self.calculate_image(coordinates, focus=direction) + for direction in directions + } + + # Publish the processed images + self.publish_images(processed_images, data.header) + + def calculate_image(self, coordinates, focus): + """ + Calculates a distance image for a specific focus (view direction) from + LiDAR coordinates. + + :param coordinates: Filtered LiDAR coordinates as a NumPy array. + :param focus: The focus direction ("Center", "Back", "Left", "Right"). + :return: Distance image as a 2D array. + """ + # Define bounding box parameters based on focus direction + bounding_box_params = { + "Center": {"max_x": np.inf, "min_x": 0.0, "min_z": -1.6}, + "Back": {"max_x": 0.0, "min_x": -np.inf, "min_z": -1.6}, + "Left": {"max_y": np.inf, "min_y": 0.0, "min_z": -1.6}, + "Right": {"max_y": -0.0, "min_y": -np.inf, "min_z": -1.6}, + } + + # Select parameters for the given focus + params = bounding_box_params.get(focus) + if not params: + rospy.logwarn(f"Unknown focus: {focus}. Skipping image calculation.") + return None + + # Apply bounding box filter + reconstruct_bit_mask = bounding_box(coordinates, **params) + reconstruct_coordinates = coordinates[reconstruct_bit_mask] + + # Remove the "intensity" field and convert to a NumPy array + reconstruct_coordinates_xyz = np.array( + remove_field_name(reconstruct_coordinates, "intensity").tolist() + ) + + # Reconstruct the image based on the focus + return self.reconstruct_img_from_lidar(reconstruct_coordinates_xyz, focus=focus) + + def publish_images(self, processed_images, data_header): """ - reconstruct 3d LIDAR-Data and calculate 2D Pixel - according to Camera-World + Publishes distance images for various directions as ROS image messages. + + :param processed_images: Dictionary with directions ("Center", "Back", etc.) + as keys and image arrays as values. + :param data_header: Header for the ROS image messages. + """ + # Process only valid NumPy arrays + for direction, image_array in processed_images.items(): + if not isinstance(image_array, np.ndarray): + continue + + # Convert the image into a ROS image message + dist_array_msg = self.bridge.cv2_to_imgmsg( + image_array, encoding="passthrough" + ) + dist_array_msg.header = data_header - Args: - coordinates_xyz (np.array): filtered lidar points - focus (String): Camera Angle + if direction == "Center": + self.dist_array_center_publisher.publish(dist_array_msg) + if direction == "Back": + self.dist_array_back_publisher.publish(dist_array_msg) + if direction == "Left": + self.dist_array_left_publisher.publish(dist_array_msg) + if direction == "Right": + self.dist_array_right_publisher.publish(dist_array_msg) - Returns: - image: depth image for camera angle + def reconstruct_img_from_lidar(self, coordinates_xyz, focus): + """ + Reconstructs a 2D image from 3D LiDAR data for a given camera perspective. + + :param coordinates_xyz: 3D coordinates of the filtered LiDAR points. + :param focus: Camera view (e.g., "Center", "Back"). + :return: Reconstructed image as a 2D array. """ - # intrinsic matrix for camera: - # width -> 300, height -> 200, fov -> 100 (agent.py) + # Create the intrinsic camera matrix based on image parameters (FOV, resolution) im = np.identity(3) - im[0, 2] = 1280 / 2.0 - im[1, 2] = 720 / 2.0 - im[0, 0] = im[1, 1] = 1280 / (2.0 * np.tan(100 * np.pi / 360.0)) + im[0, 2] = 1280 / 2.0 # x-offset (image center) + im[1, 2] = 720 / 2.0 # y-offset (image center) + im[0, 0] = im[1, 1] = 1280 / ( + 2.0 * np.tan(100 * np.pi / 360.0) + ) # Scale factor based on FOV - # extrinsic matrix for camera + # Create the extrinsic camera matrix (identity matrix for no transformation) ex = np.zeros(shape=(3, 4)) - ex[0][0] = 1 - ex[1][1] = 1 - ex[2][2] = 1 - m = np.matmul(im, ex) + ex[0][0] = ex[1][1] = ex[2][2] = 1 + m = np.matmul(im, ex) # Combine intrinsic and extrinsic matrices - # reconstruct camera image with LIDAR-Data + # Initialize empty images for reconstruction img = np.zeros(shape=(720, 1280), dtype=np.float32) dist_array = np.zeros(shape=(720, 1280, 3), dtype=np.float32) - for c in coordinates_xyz: - # center depth image - if focus == "Center": - point = np.array([c[1], c[2], c[0], 1]) - pixel = np.matmul(m, point) - x, y = int(pixel[0] / pixel[2]), int(pixel[1] / pixel[2]) - if x >= 0 and x <= 1280 and y >= 0 and y <= 720: - img[719 - y][1279 - x] = c[0] - dist_array[719 - y][1279 - x] = np.array( - [c[0], c[1], c[2]], dtype=np.float32 - ) - - # back depth image - if focus == "Back": - point = np.array([c[1], c[2], c[0], 1]) - pixel = np.matmul(m, point) - x, y = int(pixel[0] / pixel[2]), int(pixel[1] / pixel[2]) - if x >= 0 and x <= 1280 and y >= 0 and y < 720: - img[y][1279 - x] = -c[0] - dist_array[y][1279 - x] = np.array( - [-c[0], c[1], c[2]], dtype=np.float32 - ) - - # left depth image - if focus == "Left": - point = np.array([c[0], c[2], c[1], 1]) - pixel = np.matmul(m, point) - x, y = int(pixel[0] / pixel[2]), int(pixel[1] / pixel[2]) - if x >= 0 and x <= 1280 and y >= 0 and y <= 720: - img[719 - y][1279 - x] = c[1] - dist_array[y][1279 - x] = np.array( - [c[0], c[1], c[2]], dtype=np.float32 - ) - - # right depth image - if focus == "Right": - point = np.array([c[0], c[2], c[1], 1]) - pixel = np.matmul(m, point) - x, y = int(pixel[0] / pixel[2]), int(pixel[1] / pixel[2]) - if x >= 0 and x < 1280 and y >= 0 and y < 720: - img[y][1279 - x] = -c[1] - dist_array[y][1279 - x] = np.array( - [c[0], c[1], c[2]], dtype=np.float32 - ) + + # Prepare points based on focus + if focus in ["Center", "Back"]: + points = np.column_stack( + ( + coordinates_xyz[:, 1], + coordinates_xyz[:, 2], + coordinates_xyz[:, 0], + np.ones(coordinates_xyz.shape[0]), + ) + ) + elif focus in ["Left", "Right"]: + points = np.column_stack( + ( + coordinates_xyz[:, 0], + coordinates_xyz[:, 2], + coordinates_xyz[:, 1], + np.ones(coordinates_xyz.shape[0]), + ) + ) + else: + rospy.logwarn(f"Unknown focus: {focus}. Skipping image calculation.") + return None + + # Project 3D points to 2D image coordinates + pixels = np.dot(m, points.T).T + x = (pixels[:, 0] / pixels[:, 2]).astype(int) + y = (pixels[:, 1] / pixels[:, 2]).astype(int) + + # Filter valid coordinates + valid_indices = (x >= 0) & (x < 1280) & (y >= 0) & (y < 720) + x = x[valid_indices] + y = y[valid_indices] + valid_coordinates = coordinates_xyz[valid_indices] + + if focus == "Center": + img[719 - y, 1279 - x] = valid_coordinates[:, 0] + dist_array[719 - y, 1279 - x] = valid_coordinates + elif focus == "Back": + img[y, 1279 - x] = -valid_coordinates[:, 0] + dist_array[y, 1279 - x] = np.column_stack( + ( + -valid_coordinates[:, 0], + valid_coordinates[:, 1], + valid_coordinates[:, 2], + ) + ) + elif focus == "Left": + img[719 - y, 1279 - x] = valid_coordinates[:, 1] + dist_array[719 - y, 1279 - x] = valid_coordinates + elif focus == "Right": + img[y, 1279 - x] = -valid_coordinates[:, 1] + dist_array[y, 1279 - x] = np.column_stack( + ( + valid_coordinates[:, 0], + -valid_coordinates[:, 1], + valid_coordinates[:, 2], + ) + ) return dist_array +def array_to_pointcloud2(points, header="hero/Lidar"): + """ + Converts an array of points into a ROS PointCloud2 message. + + :param points: Array of points with [x, y, z] coordinates. + :param header: Header information for the ROS PointCloud2 message. + :return: ROS PointCloud2 message. + """ + # Ensure the input is a NumPy array + points_array = np.array(points) + + # Convert the points into a structured array with fields "x", "y", "z" + points_structured = np.array( + [(p[0], p[1], p[2]) for p in points_array], + dtype=[("x", "f4"), ("y", "f4"), ("z", "f4")], + ) + + # Create a PointCloud2 message from the structured array + pointcloud_msg = ros_numpy.point_cloud2.array_to_pointcloud2(points_structured) + + # Set the timestamp and header for the message + pointcloud_msg.header.stamp = rospy.Time.now() + pointcloud_msg.header = header + + return pointcloud_msg + + +def combine_clusters(cluster_buffer): + """ + Combines clusters from multiple point clouds into a structured NumPy array. + + :param cluster_buffer: List of dictionaries containing cluster IDs and point clouds. + :return: Combined structured NumPy array with fields "x", "y", "z", "cluster_id". + """ + points_list = [] + cluster_ids_list = [] + + for clustered_points in cluster_buffer: + for cluster_id, points in clustered_points.items(): + if points.size > 0: # Ignore empty clusters + points_list.append(points) + # Create an array with the cluster ID for all points in the cluster + cluster_ids_list.append( + np.full(points.shape[0], cluster_id, dtype=np.float32) + ) + + if not points_list: # If no points are present + return np.array( + [], dtype=[("x", "f4"), ("y", "f4"), ("z", "f4"), ("cluster_id", "f4")] + ) + + # Combine all points and cluster IDs into two separate arrays + all_points = np.vstack(points_list) + all_cluster_ids = np.concatenate(cluster_ids_list) + + # Create a structured array for the combined data + combined_points = np.zeros( + all_points.shape[0], + dtype=[("x", "f4"), ("y", "f4"), ("z", "f4"), ("cluster_id", "f4")], + ) + combined_points["x"] = all_points[:, 0] + combined_points["y"] = all_points[:, 1] + combined_points["z"] = all_points[:, 2] + combined_points["cluster_id"] = all_cluster_ids + + return combined_points + + +def cluster_lidar_data_from_pointcloud(coordinates, eps=0.3, min_samples=10): + """ + Performs clustering on LiDAR data using DBSCAN and returns the clusters. + + :param coordinates: LiDAR point cloud as a NumPy array with "x", "y", "z". + :param eps: Maximum distance between points to group them into a cluster. + :param min_samples: Minimum number of points required to form a cluster. + :return: Dictionary with cluster IDs and their corresponding point clouds. + """ + if coordinates.shape[0] == 0: + rospy.logerr("The input array 'coordinates' is empty.") + return {} + + # Extract x, y, and z from the coordinates for DBSCAN clustering + xyz = np.column_stack((coordinates["x"], coordinates["y"], coordinates["z"])) + + if xyz.shape[0] == 0: + rospy.logwarn("No data points available for DBSCAN. Skipping clustering.") + return {} + + # Apply DBSCAN to compute cluster labels for the point cloud + clustering = DBSCAN(eps=eps, min_samples=min_samples).fit(xyz) + labels = clustering.labels_ + + # Remove noise (cluster ID: -1) and identify valid cluster IDs + unique_labels = np.unique(labels) + valid_labels = unique_labels[unique_labels != -1] + + # Create a dictionary with cluster IDs and their corresponding points + clusters = Parallel(n_jobs=-1)( + delayed(lambda cluster_label: (cluster_label, xyz[labels == cluster_label]))( + label + ) + for label in valid_labels + ) + + clusters = dict(clusters) + + return clusters + + if __name__ == "__main__": + """ + Initialisiert die LidarDistance-Klasse und startet die Listener-Methode. + """ lidar_distance = LidarDistance() lidar_distance.listener() diff --git a/code/perception/src/radar_node.py b/code/perception/src/radar_node.py index 4ddf5b4d..fcb32ddd 100755 --- a/code/perception/src/radar_node.py +++ b/code/perception/src/radar_node.py @@ -2,10 +2,16 @@ import rospy import ros_numpy import numpy as np -from std_msgs.msg import String -from sensor_msgs.msg import PointCloud2 +from std_msgs.msg import String, Header +from sensor_msgs.msg import PointCloud2, PointField from sklearn.cluster import DBSCAN +from sklearn.preprocessing import StandardScaler import json +from sensor_msgs import point_cloud2 +from visualization_msgs.msg import Marker, MarkerArray +from geometry_msgs.msg import Point + +import struct class RadarNode: @@ -21,9 +27,42 @@ def callback(self, data): Args: data: Point2Cloud message containing radar data """ - clustered_points = cluster_radar_data_from_pointcloud(data, 10) - clustered_points_json = json.dumps(clustered_points) - self.dist_array_radar_publisher.publish(clustered_points_json) + + dataarray = pointcloud2_to_array(data) + + # radar position z=0.7 + dataarray = filter_data(dataarray, min_z=-0.6) + + clustered_data = cluster_data(dataarray) + + # transformed_data = transform_data_to_2d(dataarray) + + cloud = create_pointcloud2(dataarray, clustered_data.labels_) + self.visualization_radar_publisher.publish(cloud) + + points_with_labels = np.hstack( + (dataarray, clustered_data.labels_.reshape(-1, 1)) + ) + bounding_boxes = generate_bounding_boxes(points_with_labels) + + marker_array = MarkerArray() + for label, bbox in bounding_boxes: + if label != -1: + marker = create_bounding_box_marker(label, bbox) + marker_array.markers.append(marker) + # can be used for extra debugging + # min_marker, max_marker = create_min_max_markers(label, bbox) + # marker_array.markers.append(min_marker) + # marker_array.markers.append(max_marker) + + marker_array = clear_old_markers(marker_array, max_id=len(bounding_boxes) - 1) + + self.marker_visualization_radar_publisher.publish(marker_array) + + cluster_info = generate_cluster_info( + clustered_data, dataarray, marker_array, bounding_boxes + ) + self.cluster_info_radar_publisher.publish(cluster_info) def listener(self): """Initializes the node and its publishers.""" @@ -35,6 +74,21 @@ def listener(self): String, queue_size=10, ) + self.visualization_radar_publisher = rospy.Publisher( + rospy.get_param("~visualisation_topic", "/paf/hero/Radar/Visualization"), + PointCloud2, + queue_size=10, + ) + self.marker_visualization_radar_publisher = rospy.Publisher( + rospy.get_param("~marker_topic", "/paf/hero/Radar/Marker"), + MarkerArray, + queue_size=10, + ) + self.cluster_info_radar_publisher = rospy.Publisher( + rospy.get_param("~clusterInfo_topic_topic", "/paf/hero/Radar/ClusterInfo"), + String, + queue_size=10, + ) rospy.Subscriber( rospy.get_param("~source_topic", "/carla/hero/RADAR"), PointCloud2, @@ -55,60 +109,418 @@ def pointcloud2_to_array(pointcloud_msg): Returns: - np.ndarray A 2D array where each row corresponds to a point in the point cloud: - [x, y, z, distance], where "distance" is the distance from the origin. + [x, y, z, Velocity] """ cloud_array = ros_numpy.point_cloud2.pointcloud2_to_array(pointcloud_msg) - distances = np.sqrt( - cloud_array["x"] ** 2 + cloud_array["y"] ** 2 + cloud_array["z"] ** 2 - ) return np.column_stack( - (cloud_array["x"], cloud_array["y"], cloud_array["z"], distances) + (cloud_array["x"], cloud_array["y"], cloud_array["z"], cloud_array["Velocity"]) ) -def cluster_radar_data_from_pointcloud( - pointcloud_msg, max_distance, eps=1.0, min_samples=2 +def filter_data( + data, + min_x=-100, + max_x=100, + min_y=-100, + max_y=100, + min_z=-1, + max_z=100, + max_distance=100, ): """ - Filters and clusters points from a ROS PointCloud2 message based on DBSCAN - clustering. + Filters radar data based on specified spatial and distance constraints. - Parameters: - - pointcloud_msg: sensor_msgs/PointCloud2 - The ROS PointCloud2 message containing the 3D points. - - max_distance: float - Maximum distance to consider points. Points beyond this distance are - discarded. - - eps: float, optional (default: 1.0) - The maximum distance between two points for them to be considered in - the same cluster. - - min_samples: int, optional (default: 2) - The minimum number of points required to form a cluster. + This function applies multiple filtering criteria to the input radar data. + Points outside these bounds are excluded from the output. + + Args: + data (np.ndarray): A 2D numpy array containing radar data, where each row + represents a data point with the format [x, y, z, distance]. The array + shape is (N, 4), where N is the number of points. + min_x (float, optional): Minimum value for the x-coordinate. Default is -100. + max_x (float, optional): Maximum value for the x-coordinate. Default is 100. + min_y (float, optional): Minimum value for the y-coordinate. Default is -100. + max_y (float, optional): Maximum value for the y-coordinate. Default is 100. + min_z (float, optional): Minimum value for the z-coordinate. Default is -1. + max_z (float, optional): Maximum value for the z-coordinate. Default is 100. + max_distance (float, optional): Maximum allowable distance of the point from + the sensor. Default is 100. Returns: - - dict - A dictionary where the keys are cluster labels (int) and the values - are the number of points in each cluster. Returns an empty dictionary - if no points are available. + np.ndarray: A numpy array containing only the filtered data points that meet + the specified criteria. """ - data = pointcloud2_to_array(pointcloud_msg) + filtered_data = data[data[:, 3] < max_distance] filtered_data = filtered_data[ - (filtered_data[:, 1] >= -1) - & (filtered_data[:, 1] <= 1) - & (filtered_data[:, 2] <= 1.3) - & (filtered_data[:, 2] >= -0.7) + (filtered_data[:, 0] >= min_x) + & (filtered_data[:, 0] <= max_x) + & (filtered_data[:, 1] >= min_y) + & (filtered_data[:, 1] <= max_y) + & (filtered_data[:, 2] <= max_z) + & (filtered_data[:, 2] >= min_z) ] - if len(filtered_data) == 0: + return filtered_data + + +def cluster_data(data, eps=0.8, min_samples=3): + """ + Clusters the radar data using the DBSCAN algorithm + + Args: + data (np.ndarray): data array which should be clustered + eps (float, optional): maximum distance of points. Defaults to 0.8. + min_samples (int, optional): min samples for 1 cluster. Defaults to 3. + + Returns: + dict: A dictionary where the keys are cluster labels (int) and the values + are the number of points in each cluster. Returns an empty dictionary + if no points are available. + DBSCAN: A DBSCAN clustering object containing labels and core sample indices + """ + + if len(data) == 0: return {} - coordinates = filtered_data[:, :2] - clustering = DBSCAN(eps=eps, min_samples=min_samples).fit(coordinates) - labels = clustering.labels_ - clustered_points = {label: list(labels).count(label) for label in set(labels)} - clustered_points = {int(label): count for label, count in clustered_points.items()} + scaler = StandardScaler() + data_scaled = scaler.fit_transform(data) + clustered_points = DBSCAN(eps=eps, min_samples=min_samples).fit(data_scaled) + return clustered_points +# generates random color for cluster +def generate_color_map(num_clusters): + np.random.seed(42) + colors = np.random.randint(0, 255, size=(num_clusters, 3)) + return colors + + +def create_pointcloud2(clustered_points, cluster_labels): + """_summary_ + + Args: + clustered_points (dict): clustered points after dbscan + cluster_labels (_type_): _description_ + + Returns: + PointCloud2: pointcloud which can be published + """ + header = Header() + header.stamp = rospy.Time.now() + header.frame_id = "hero/RADAR" + + points = [] + unique_labels = np.unique(cluster_labels) + colors = generate_color_map(len(unique_labels)) + + for i, point in enumerate(clustered_points): + x, y, z, v = point + label = cluster_labels[i] + + if label == -1: + r, g, b = 128, 128, 128 + else: + r, g, b = colors[label] + + rgb = struct.unpack("f", struct.pack("I", (r << 16) | (g << 8) | b))[0] + points.append([x, y, z, rgb]) + + fields = [ + PointField("x", 0, PointField.FLOAT32, 1), + PointField("y", 4, PointField.FLOAT32, 1), + PointField("z", 8, PointField.FLOAT32, 1), + PointField("rgb", 12, PointField.FLOAT32, 1), + ] + + return point_cloud2.create_cloud(header, fields, points) + + +def transform_data_to_2d(clustered_data): + """_summary_ + + Args: + clustered_data (np.ndarray): clustered 3d data points + + Returns: + _np.ndarray: clustered points, every z value is set to 0 + """ + + transformed_points = clustered_data + transformed_points[:, 0] = clustered_data[:, 0] + transformed_points[:, 1] = clustered_data[:, 1] + transformed_points[:, 2] = 0 + transformed_points[:, 3] = clustered_data[:, 3] + + return transformed_points + + +def calculate_aabb(cluster_points): + """ + Calculates the axis-aligned bounding box (AABB) for a set of 3D points. + + This function computes the minimum and maximum values along each axis (x, y, z) + for a given set of 3D points, which defines the bounding box that contains + all points in the cluster. + + Args: + cluster_points (numpy.ndarray): + A 2D array where each row represents a 3D point (x, y, z). + The array should have shape (N, 3) where N is the number of points. + + Returns: + tuple: A tuple of the form (x_min, x_max, y_min, y_max, z_min, z_max), + which represents the axis-aligned bounding box (AABB) for the given + set of points. The values are the minimum and maximum coordinates + along the x, y, and z axes. + """ + + # for 2d (top-down) boxes + # x_min = np.min(cluster_points[:, 0]) + # x_max = np.max(cluster_points[:, 0]) + # y_min = np.min(cluster_points[:, 1]) + # y_max = np.max(cluster_points[:, 1]) + # rospy.loginfo(f"Bounding box: X({x_min}, {x_max}), Y({y_min}, {y_max})") + # return x_min, x_max, y_min, y_max + + # for 3d boxes + x_min = np.min(cluster_points[:, 0]) + x_max = np.max(cluster_points[:, 0]) + y_min = np.min(cluster_points[:, 1]) + y_max = np.max(cluster_points[:, 1]) + z_min = np.min(cluster_points[:, 2]) + z_max = np.max(cluster_points[:, 2]) + return x_min, x_max, y_min, y_max, z_min, z_max + + +def generate_bounding_boxes(points_with_labels): + """ + Generates bounding boxes for clustered points. + + This function processes a set of points, each associated with a cluster label, + and generates an axis-aligned bounding box (AABB) for each unique cluster label. + + Args: + points_with_labels (numpy.ndarray): + A 2D array of shape (N, 4) where each row contains + the coordinates (x, y, z) of a point along with its + corresponding cluster label in the last column. + The array should have the structure [x, y, z, label]. + + Returns: + list: A list of tuples, where each tuple contains a cluster label and the + corresponding bounding box (bbox). The bbox is represented by a tuple + of the form (x_min, x_max, y_min, y_max, z_min, z_max). + """ + bounding_boxes = [] + unique_labels = np.unique(points_with_labels[:, -1]) + for label in unique_labels: + if label == -1: + continue + cluster_points = points_with_labels[points_with_labels[:, -1] == label, :3] + bbox = calculate_aabb(cluster_points) + bounding_boxes.append((label, bbox)) + return bounding_boxes + + +def create_bounding_box_marker(label, bbox): + """ + Creates an RViz Marker for visualizing a 3D bounding box. + + This function generates a Marker object for RViz to visualize a 3D bounding box + based on the provided label and bounding box dimensions. The marker is + represented as a series of lines connecting the corners of the box. + + Args: + label (int): The unique identifier for the cluster or object to which the + bounding box belongs. This label is used as the Marker ID. + bbox (tuple): A tuple containing the min and max coordinates of the bounding box + in the format (x_min, x_max, y_min, y_max, z_min, z_max). + + Returns: + Marker: A Marker object that can be published to RViz to display the + 3D bounding box. The marker is of type LINE_LIST, + representing the edges of the bounding box. + """ + # for 2d (top-down) boxes + # x_min, x_max, y_min, y_max = bbox + + # for 3d boxes + x_min, x_max, y_min, y_max, z_min, z_max = bbox + + marker = Marker() + marker.header.frame_id = "hero/RADAR" + marker.id = int(label) + # marker.type = Marker.LINE_STRIP # 2d boxes + marker.type = Marker.LINE_LIST # 3d boxes + marker.action = Marker.ADD + marker.scale.x = 0.1 + marker.color.r = 1.0 + marker.color.g = 1.0 + marker.color.b = 0.0 + marker.color.a = 1.0 + + # for 2d (top-down) boxes + # points = [ + # Point(x_min, y_min, 0), + # Point(x_max, y_min, 0), + # Point(x_max, y_max, 0), + # Point(x_min, y_max, 0), + # Point(x_min, y_min, 0), + # ] + # marker.points = points + + # for 3d boxes + points = [ + Point(x_min, y_min, z_min), + Point(x_max, y_min, z_min), + Point(x_max, y_max, z_min), + Point(x_min, y_max, z_min), + Point(x_min, y_min, z_max), + Point(x_max, y_min, z_max), + Point(x_max, y_max, z_max), + Point(x_min, y_max, z_max), + ] + + lines = [ + (0, 1), + (1, 2), + (2, 3), + (3, 0), # Bottom + (4, 5), + (5, 6), + (6, 7), + (7, 4), # Top + (0, 4), + (1, 5), + (2, 6), + (3, 7), # Vertical Edges + ] + for start, end in lines: + marker.points.append(points[start]) + marker.points.append(points[end]) + + return marker + + +# can be used for extra debugging +def create_min_max_markers( + label, + bbox, + frame_id="hero/RADAR", + min_color=(0.0, 1.0, 0.0, 1.0), + max_color=(1.0, 0.0, 0.0, 1.0), +): + """ + creates RViz-Markers for min- and max-points of a bounding box. + + Args: + label (int): cluster-id (used as marker-ID in rviz). + bbox (tuple): min- and max-values of bounding box + (x_min, x_max, y_min, y_max, z_min, z_max). + frame_id (str): frame ID for markers + min_color (tuple): RGBA-value for min-point-marker + max_color (tuple): RGBA-value for max-point-marker + + Returns: + tuple: pair of markers (min_marker, max_marker). + """ + x_min, x_max, y_min, y_max, z_min, z_max = bbox + + # min-point-marker + min_marker = Marker() + min_marker.header.frame_id = frame_id + min_marker.id = int(label * 10) + min_marker.type = Marker.SPHERE + min_marker.action = Marker.ADD + min_marker.scale.x = 0.2 + min_marker.scale.y = 0.2 + min_marker.scale.z = 0.2 + min_marker.color.r = min_color[0] + min_marker.color.g = min_color[1] + min_marker.color.b = min_color[2] + min_marker.color.a = min_color[3] + min_marker.pose.position.x = x_min + min_marker.pose.position.y = y_min + min_marker.pose.position.z = z_min + + # max-point-marker + max_marker = Marker() + max_marker.header.frame_id = frame_id + max_marker.id = int(label * 10 + 1) + max_marker.type = Marker.SPHERE + max_marker.action = Marker.ADD + max_marker.scale.x = 0.2 + max_marker.scale.y = 0.2 + max_marker.scale.z = 0.2 + max_marker.color.r = max_color[0] + max_marker.color.g = max_color[1] + max_marker.color.b = max_color[2] + max_marker.color.a = max_color[3] + max_marker.pose.position.x = x_max + max_marker.pose.position.y = y_max + max_marker.pose.position.z = z_max + + return min_marker, max_marker + + +def clear_old_markers(marker_array, max_id): + """ + Removes old markers from the given MarkerArray by setting the action + to DELETE for markers with an ID greater than or equal to max_id. + + Args: + marker_array (MarkerArray): The current MarkerArray containing all markers. + max_id (int): The highest ID of the new markers. Markers with an ID + greater than or equal to this value will be marked for deletion. + + Returns: + MarkerArray: The updated MarkerArray with old markers removed. + """ + for marker in marker_array.markers: + if marker.id > max_id: + marker.action = Marker.DELETE + return marker_array + + +# generates string with label-id and cluster size, can be used for extra debugging +def generate_cluster_info(clusters, data, marker_array, bounding_boxes): + """ + Generates information about clusters, including the label, number of points, + markers, and bounding boxes. + + Args: + clusters (DBSCAN): The clustered data, containing the labels for each point. + data (numpy.ndarray): + The point cloud data, typically with columns [x, y, z, distance]. + marker_array (MarkerArray): + The array of RViz markers associated with the clusters. + bounding_boxes (list): The list of bounding boxes for each detected object. + + Returns: + str: A JSON string containing the information about each cluster, including: + - "label": The cluster label. + - "points_count": The number of points in the cluster. + - "Anzahl marker": The number of markers in the MarkerArray. + - "Anzahl Boundingboxen": The number of bounding boxes. + """ + cluster_info = [] + + for label in set(clusters.labels_): + cluster_points = data[clusters.labels_ == label] + cluster_size = len(cluster_points) + if label != -1: + cluster_info.append( + { + "label": int(label), + "points_count": cluster_size, + "num_marker": len(marker_array.markers), + "num_bounding_boxes": len(bounding_boxes), + } + ) + + return json.dumps(cluster_info) + + if __name__ == "__main__": radar_node = RadarNode() radar_node.listener() diff --git a/code/perception/src/vision_node.py b/code/perception/src/vision_node.py index 7039b9f3..1a558df2 100755 --- a/code/perception/src/vision_node.py +++ b/code/perception/src/vision_node.py @@ -15,6 +15,7 @@ ) import torchvision.transforms as t import cv2 +from vision_node_helper import get_carla_class_name, get_carla_color from rospy.numpy_msg import numpy_msg from sensor_msgs.msg import Image as ImageMsg from std_msgs.msg import Header, Float32MultiArray @@ -24,6 +25,7 @@ from ultralytics import NAS, YOLO, RTDETR, SAM, FastSAM import asyncio import rospy +from ultralytics.utils.ops import scale_masks class VisionNode(CompatibleNode): @@ -77,6 +79,10 @@ def __init__(self, name, **kwargs): "yolov8x-seg": (YOLO, "yolov8x-seg.pt", "segmentation", "ultralytics"), "sam_l": (SAM, "sam_l.pt", "detection", "ultralytics"), "FastSAM-x": (FastSAM, "FastSAM-x.pt", "detection", "ultralytics"), + "yolo11n-seg": (YOLO, "yolo11n-seg.pt", "segmentation", "ultralytics"), + "yolo11s-seg": (YOLO, "yolo11s-seg.pt", "segmentation", "ultralytics"), + "yolo11m-seg": (YOLO, "yolo11m-seg.pt", "segmentation", "ultralytics"), + "yolo11l-seg": (YOLO, "yolo11l-seg.pt", "segmentation", "ultralytics"), } # general setup @@ -239,7 +245,7 @@ def handle_camera_image(self, image): vision_result = self.predict_ultralytics(image) # publish vision result to rviz - img_msg = self.bridge.cv2_to_imgmsg(vision_result, encoding="rgb8") + img_msg = self.bridge.cv2_to_imgmsg(vision_result, encoding="bgr8") img_msg.header = image.header # publish img to corresponding angle topic @@ -341,7 +347,7 @@ def predict_ultralytics(self, image): cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR) # run model prediction - output = self.model(cv_image, half=True, verbose=False) + output = self.model.track(cv_image, half=True, verbose=False, imgsz=640) # handle distance of objects @@ -349,6 +355,11 @@ def predict_ultralytics(self, image): distance_output = [] c_boxes = [] c_labels = [] + c_colors = [] + if hasattr(output[0], "masks") and output[0].masks is not None: + masks = output[0].masks.data + else: + masks = None boxes = output[0].boxes for box in boxes: @@ -358,72 +369,74 @@ def predict_ultralytics(self, image): # only run distance calc when dist_array is available # this if is needed because the lidar starts # publishing with a delay - if self.dist_arrays is not None: - - # crop bounding box area out of depth image - distances = np.asarray( - self.dist_arrays[ - int(pixels[1]) : int(pixels[3]) : 1, - int(pixels[0]) : int(pixels[2]) : 1, - ::, - ] - ) + if self.dist_arrays is None: + continue + + # crop bounding box area out of depth image + distances = np.asarray( + self.dist_arrays[ + int(pixels[1]) : int(pixels[3]) : 1, + int(pixels[0]) : int(pixels[2]) : 1, + ::, + ] + ) - # set all 0 (black) values to np.inf (necessary if - # you want to search for minimum) - # these are all pixels where there is no - # corresponding lidar point in the depth image - condition = distances[:, :, 0] != 0 - non_zero_filter = distances[condition] - distances_copy = distances.copy() - distances_copy[distances_copy == 0] = np.inf - - # only proceed if there is more than one lidar - # point in the bounding box - if len(non_zero_filter) > 0: - - """ - !Watch out: - The calculation of min x and min abs y is currently - only for center angle - For back, left and right the values are different in the - coordinate system of the lidar. - (Example: the closedt distance on the back view should the - max x since the back view is on the -x axis) - """ - - # copy actual lidar points - obj_dist_min_x = self.min_x(dist_array=distances_copy) - obj_dist_min_abs_y = self.min_abs_y(dist_array=distances_copy) - - # absolut distance to object for visualization - abs_distance = np.sqrt( - obj_dist_min_x[0] ** 2 - + obj_dist_min_x[1] ** 2 - + obj_dist_min_x[2] ** 2 - ) - - # append class index, min x and min abs y to output array - distance_output.append(float(cls)) - distance_output.append(float(obj_dist_min_x[0])) - distance_output.append(float(obj_dist_min_abs_y[1])) - - else: - # fallback values for bounding box if - # no lidar points where found - obj_dist_min_x = (np.inf, np.inf, np.inf) - obj_dist_min_abs_y = (np.inf, np.inf, np.inf) - abs_distance = np.inf - - # add values for visualization - c_boxes.append(torch.tensor(pixels)) - c_labels.append( - f"Class: {cls}," - f"Meters: {round(abs_distance, 2)}," - f"({round(float(obj_dist_min_x[0]), 2)}," - f"{round(float(obj_dist_min_abs_y[1]), 2)})" + # set all 0 (black) values to np.inf (necessary if + # you want to search for minimum) + # these are all pixels where there is no + # corresponding lidar point in the depth image + condition = distances[:, :, 0] != 0 + non_zero_filter = distances[condition] + distances_copy = distances.copy() + distances_copy[distances_copy == 0] = np.inf + + # only proceed if there is more than one lidar + # point in the bounding box + if len(non_zero_filter) > 0: + """ + !Watch out: + The calculation of min x and min abs y is currently + only for center angle + For back, left and right the values are different in the + coordinate system of the lidar. + (Example: the closedt distance on the back view should the + max x since the back view is on the -x axis) + """ + + # copy actual lidar points + obj_dist_min_x = self.min_x(dist_array=distances_copy) + obj_dist_min_abs_y = self.min_abs_y(dist_array=distances_copy) + + # absolut distance to object for visualization + abs_distance = np.sqrt( + obj_dist_min_x[0] ** 2 + + obj_dist_min_x[1] ** 2 + + obj_dist_min_x[2] ** 2 ) + # append class index, min x and min abs y to output array + distance_output.append(float(cls)) + distance_output.append(float(obj_dist_min_x[0])) + distance_output.append(float(obj_dist_min_abs_y[1])) + + else: + # fallback values for bounding box if + # no lidar points where found + obj_dist_min_x = (np.inf, np.inf, np.inf) + obj_dist_min_abs_y = (np.inf, np.inf, np.inf) + abs_distance = np.inf + + # add values for visualization + c_boxes.append(torch.tensor(pixels)) + c_labels.append( + f"Class: {get_carla_class_name(cls)}, " + f"Meters: {round(abs_distance, 2)}, " + f"TrackingId: {int(box.id)}, " + f"({round(float(obj_dist_min_x[0]), 2)}, " + f"{round(float(obj_dist_min_abs_y[1]), 2)})", + ) + c_colors.append(get_carla_color(int(cls))) + # publish list of distances of objects for planning self.distance_publisher.publish(Float32MultiArray(data=distance_output)) @@ -437,7 +450,7 @@ def predict_ultralytics(self, image): # draw bounding boxes and distance values on image c_boxes = torch.stack(c_boxes) - box = draw_bounding_boxes( + drawn_images = draw_bounding_boxes( image_np_with_detections, c_boxes, c_labels, @@ -445,9 +458,21 @@ def predict_ultralytics(self, image): width=3, font_size=12, ) - np_box_img = np.transpose(box.detach().numpy(), (1, 2, 0)) - box_img = cv2.cvtColor(np_box_img, cv2.COLOR_BGR2RGB) - return box_img + if masks is not None: + scaled_masks = np.squeeze( + scale_masks(masks.unsqueeze(1), cv_image.shape[:2], True).cpu().numpy(), + 1, + ) + + drawn_images = draw_segmentation_masks( + drawn_images, + torch.from_numpy(scaled_masks > 0), + alpha=0.6, + colors=c_colors, + ) + + np_image = np.transpose(drawn_images.detach().numpy(), (1, 2, 0)) + return cv2.cvtColor(np_image, cv2.COLOR_BGR2RGB) def min_x(self, dist_array): """ diff --git a/code/perception/src/vision_node_helper.py b/code/perception/src/vision_node_helper.py new file mode 100644 index 00000000..6778f7b6 --- /dev/null +++ b/code/perception/src/vision_node_helper.py @@ -0,0 +1,158 @@ +from typing import List, Union + + +# Carla-Farben +carla_colors = [ + [0, 0, 0], # 0: None + [70, 70, 70], # 1: Buildings + [190, 153, 153], # 2: Fences + [72, 0, 90], # 3: Other + [220, 20, 60], # 4: Pedestrians + [153, 153, 153], # 5: Poles + [157, 234, 50], # 6: RoadLines + [128, 64, 128], # 7: Roads + [244, 35, 232], # 8: Sidewalks + [107, 142, 35], # 9: Vegetation + [0, 0, 255], # 10: Vehicles + [102, 102, 156], # 11: Walls + [220, 220, 0], # 12: TrafficSigns +] + +carla_class_names = [ + "None", # 0 + "Buildings", # 1 + "Fences", # 2 + "Other", # 3 + "Pedestrians", # 4 + "Poles", # 5 + "RoadLines", # 6 + "Roads", # 7 + "Sidewalks", # 8 + "Vegetation", # 9 + "Vehicles", # 10 + "Walls", # 11 + "TrafficSigns", # 12 +] + +# COCO-Klassen → Carla-Klassen Mapping +coco_to_carla = [ + 4, # 0: Person -> Pedestrians + 10, # 1: Bicycle -> Vehicles + 10, # 2: Car -> Vehicles + 10, # 3: Motorbike -> Vehicles + 10, # 4: Airplane -> Vehicles + 10, # 5: Bus -> Vehicles + 10, # 6: Train -> Vehicles + 10, # 7: Truck -> Vehicles + 10, # 8: Boat -> Vehicles + 12, # 9: Traffic Light -> TrafficSigns + 3, # 10: Fire Hydrant -> Other + 12, # 11: Stop Sign -> TrafficSigns + 3, # 12: Parking Meter -> Other + 3, # 13: Bench -> Other + 3, # 14: Bird -> Other + 3, # 15: Cat -> Other + 3, # 16: Dog -> Other + 3, # 17: Horse -> Other + 3, # 18: Sheep -> Other + 3, # 19: Cow -> Other + 3, # 20: Elephant -> Other + 3, # 21: Bear -> Other + 3, # 22: Zebra -> Other + 3, # 23: Giraffe -> Other + 3, # 24: Backpack -> Other + 3, # 25: Umbrella -> Other + 3, # 26: Handbag -> Other + 3, # 27: Tie -> Other + 3, # 28: Suitcase -> Other + 3, # 29: Frisbee -> Other + 3, # 30: Skis -> Other + 3, # 31: Snowboard -> Other + 3, # 32: Sports Ball -> Other + 3, # 33: Kite -> Other + 3, # 34: Baseball Bat -> Other + 3, # 35: Baseball Glove -> Other + 3, # 36: Skateboard -> Other + 3, # 37: Surfboard -> Other + 3, # 38: Tennis Racket -> Other + 3, # 39: Bottle -> Other + 3, # 40: Wine Glass -> Other + 3, # 41: Cup -> Other + 3, # 42: Fork -> Other + 3, # 43: Knife -> Other + 3, # 44: Spoon -> Other + 3, # 45: Bowl -> Other + 3, # 46: Banana -> Other + 3, # 47: Apple -> Other + 3, # 48: Sandwich -> Other + 3, # 49: Orange -> Other + 3, # 50: Broccoli -> Other + 3, # 51: Carrot -> Other + 3, # 52: Hot Dog -> Other + 3, # 53: Pizza -> Other + 3, # 54: Donut -> Other + 3, # 55: Cake -> Other + 3, # 56: Chair -> Other + 3, # 57: Couch -> Other + 3, # 58: Potted Plant -> Other + 3, # 59: Bed -> Other + 3, # 60: Dining Table -> Other + 3, # 61: Toilet -> Other + 3, # 62: TV -> Other + 3, # 63: Laptop -> Other + 3, # 64: Mouse -> Other + 3, # 65: Remote -> Other + 3, # 66: Keyboard -> Other + 3, # 67: Cell Phone -> Other + 3, # 68: Microwave -> Other + 3, # 69: Oven -> Other + 3, # 70: Toaster -> Other + 3, # 71: Sink -> Other + 3, # 72: Refrigerator -> Other + 3, # 73: Book -> Other + 3, # 74: Clock -> Other + 3, # 75: Vase -> Other + 3, # 76: Scissors -> Other + 3, # 77: Teddy Bear -> Other + 3, # 78: Hair Drier -> Other + 3, # 79: Toothbrush -> Other +] + +COCO_CLASS_COUNT = 80 + + +def get_carla_color(coco_class: Union[int, float]) -> List[int]: + """Get the Carla color for a given COCO class. + Args: + coco_class: COCO class index (0-79) + + Returns: + RGB color values for the corresponding Carla class + + Raises: + ValueError: If coco_class is out of valid range + """ + coco_idx = int(coco_class) + if not 0 <= coco_idx < COCO_CLASS_COUNT: + raise ValueError(f"Invalid COCO class index: {coco_idx}") + carla_class = coco_to_carla[coco_idx] + return carla_colors[carla_class] + + +def get_carla_class_name(coco_class: Union[int, float]) -> str: + """Get the Carla class name for a given COCO class. + Args: + coco_class: COCO class index (0-79) + + Returns: + Name of the corresponding Carla class + + Raises: + ValueError: If coco_class is out of valid range + """ + coco_idx = int(coco_class) + + if not 0 <= coco_idx < COCO_CLASS_COUNT: + raise ValueError(f"Invalid COCO class index: {coco_idx}") + carla_class = coco_to_carla[coco_idx] + return carla_class_names[carla_class] diff --git a/code/planning/CMakeLists.txt b/code/planning/CMakeLists.txt index 34c3ccd0..d8cf2bff 100755 --- a/code/planning/CMakeLists.txt +++ b/code/planning/CMakeLists.txt @@ -13,8 +13,11 @@ project(planning) find_package(catkin REQUIRED COMPONENTS perception rospy + rosout roslaunch std_msgs + geometry_msgs + message_generation ) roslaunch_add_file_check(launch) @@ -48,16 +51,19 @@ catkin_python_setup() ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) ## Generate messages in the 'msg' folder -# add_message_files( -# MinDistance.msg -# ) + add_message_files( + FILES + NavigationPoint.msg + Trajectory.msg + ) ## Generate services in the 'srv' folder -# add_service_files( -# FILES +add_service_files( + FILES # Service1.srv # Service2.srv -# ) + RequestBehaviourChange.srv +) ## Generate actions in the 'action' folder # add_action_files( @@ -67,9 +73,12 @@ catkin_python_setup() # ) ## Generate added messages and services with any dependencies listed here -# generate_messages( -# perception -# ) + generate_messages( + DEPENDENCIES + std_msgs + perception + geometry_msgs + ) ################################################ ## Declare ROS dynamic reconfigure parameters ## @@ -105,7 +114,7 @@ catkin_package( # LIBRARIES planning # CATKIN_DEPENDS other_catkin_pkg # DEPENDS system_lib - CATKIN_DEPENDS perception rospy + CATKIN_DEPENDS perception rospy message_runtime ) ########### diff --git a/code/planning/launch/planning.launch b/code/planning/launch/planning.launch index 6662d9a3..d5803ddd 100644 --- a/code/planning/launch/planning.launch +++ b/code/planning/launch/planning.launch @@ -1,7 +1,7 @@ - + @@ -35,4 +35,9 @@ + + + + + diff --git a/code/planning/msg/NavigationPoint.msg b/code/planning/msg/NavigationPoint.msg new file mode 100644 index 00000000..e9eb53c9 --- /dev/null +++ b/code/planning/msg/NavigationPoint.msg @@ -0,0 +1,3 @@ +geometry_msgs/Point position +float32 speed +uint8 behaviour \ No newline at end of file diff --git a/code/planning/msg/Trajectory.msg b/code/planning/msg/Trajectory.msg new file mode 100644 index 00000000..94060cdd --- /dev/null +++ b/code/planning/msg/Trajectory.msg @@ -0,0 +1,2 @@ +Header header +planning/NavigationPoint[] navigationPoints \ No newline at end of file diff --git a/code/planning/package.xml b/code/planning/package.xml index a321a109..daf36bc6 100755 --- a/code/planning/package.xml +++ b/code/planning/package.xml @@ -47,6 +47,9 @@ + message_generation + message_runtime + rospy roslaunch @@ -57,6 +60,7 @@ carla_msgs sensor_msgs std_msgs + geometry_msgs perception perception diff --git a/code/planning/src/BehaviourEnum.py b/code/planning/src/BehaviourEnum.py new file mode 100644 index 00000000..361b0b43 --- /dev/null +++ b/code/planning/src/BehaviourEnum.py @@ -0,0 +1,8 @@ +from enum import IntEnum + +# getattr(Behaviour, "name").value returns the ID +# Behaviour(id) returns Behaviour.NAME + + +class BehaviourEnum(IntEnum): + CRUISING = 0 diff --git a/code/planning/src/behavior_agent/RequestBehaviourChangeService.py b/code/planning/src/behavior_agent/RequestBehaviourChangeService.py new file mode 100755 index 00000000..aa058ab3 --- /dev/null +++ b/code/planning/src/behavior_agent/RequestBehaviourChangeService.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python +# import BehaviourEnum +import ros_compatibility as roscomp +from ros_compatibility.node import CompatibleNode +from rospy import Subscriber, Publisher +import rospy +from planning.srv import RequestBehaviourChange, RequestBehaviourChangeResponse +from std_msgs.msg import String, Int8 + + +class RequestBehaviourChangeService(CompatibleNode): + def __init__(self): + super(RequestBehaviourChangeService, self).__init__( + "RequestBehaviourChangeService" + ) + self.role_name = self.get_param("role_name", "hero") + self.control_loop_rate = self.get_param("control_loop_rate", 1) + self.__curr_behavior = None + + self.service = rospy.Service( + "RequestBehaviourChange", + RequestBehaviourChange, + self.handle_request_behaviour_change, + ) + + self.behaviour_pub: Publisher = self.new_publisher( + Int8, + f"/paf/{self.role_name}/behaviour_request", + qos_profile=1, + ) + + self.curr_behavior_sub: Subscriber = self.new_subscription( + String, + f"/paf/{self.role_name}/curr_behavior", + self.__set_curr_behavior, + qos_profile=1, + ) + + self.behaviour_pub.publish(0) + rospy.spin() + + def __set_curr_behavior(self, data: String): + """ + Sets the received current behavior of the vehicle. + """ + self.__curr_behavior = data.data + + def handle_request_behaviour_change(self, req): + if ( + self.__curr_behavior == "us_unstuck" + or self.__curr_behavior == "us_stop" + or self.__curr_behavior == "us_overtake" + or self.__curr_behavior == "Cruise" + ): + self.behaviour_pub.publish(req.request) + return RequestBehaviourChangeResponse(True) + else: + return RequestBehaviourChangeResponse(False) + + def run(self): + """ + Control loop + + :return: + """ + + self.spin() + + +if __name__ == "__main__": + """ + main function starts the RequestBehaviourChangeService node + :param args: + """ + roscomp.init("RequestBehaviourChangeService") + try: + node = RequestBehaviourChangeService() + node.run() + except KeyboardInterrupt: + pass + finally: + roscomp.shutdown() diff --git a/code/planning/src/behavior_agent/behaviours/intersection.py b/code/planning/src/behavior_agent/behaviours/intersection.py index c19d3386..6c4d4470 100755 --- a/code/planning/src/behavior_agent/behaviours/intersection.py +++ b/code/planning/src/behavior_agent/behaviours/intersection.py @@ -97,7 +97,9 @@ def update(self): - Set a feedback message - return a py_trees.common.Status.[RUNNING, SUCCESS, FAILURE] Gets the current traffic light status, stop sign status - and the stop line distance + and the stop line distance. Calcualtes a virtual stop line and + publishes a distance to it. Slows down car until virtual stop line + is reached when there is a red traffic light or a stop sign. :return: py_trees.common.Status.RUNNING, if too far from intersection py_trees.common.Status.SUCCESS, if stopped in front of inter- section or entered the intersection @@ -145,7 +147,10 @@ def update(self): or (self.stop_sign_detected and not self.traffic_light_detected) ): - rospy.loginfo("slowing down!") + rospy.loginfo( + f"Intersection Approach: slowing down! Stop sign: " + f"{self.stop_sign_detected}, Light: {self.traffic_light_status}" + ) self.curr_behavior_pub.publish(bs.int_app_to_stop.name) # approach slowly when traffic light is green as traffic lights are @@ -164,14 +169,14 @@ def update(self): self.traffic_light_distance > 150 ): # too far - print("still approaching") + rospy.loginfo("Intersection still approaching") return py_trees.common.Status.RUNNING elif speed < convert_to_ms(2.0) and ( (self.virtual_stopline_distance < target_distance) or (self.traffic_light_distance < 150) ): # stopped - print("stopped") + rospy.loginfo("Intersection Approach: stopped") return py_trees.common.Status.SUCCESS elif ( speed > convert_to_ms(5.0) @@ -180,6 +185,10 @@ def update(self): ): # drive through intersection even if traffic light turns yellow + rospy.loginfo( + f"Intersection Approach Light is green, light:" + f"{self.traffic_light_status}" + ) return py_trees.common.Status.SUCCESS elif speed > convert_to_ms(5.0) and self.virtual_stopline_distance < 3.5: # running over line @@ -189,7 +198,7 @@ def update(self): self.virtual_stopline_distance < target_distance and not self.stopline_detected ): - rospy.loginfo("Leave intersection!") + rospy.loginfo("Intersection Approach: Leave intersection!") return py_trees.common.Status.SUCCESS else: return py_trees.common.Status.RUNNING @@ -276,7 +285,7 @@ def update(self): """ light_status_msg = self.blackboard.get("/paf/hero/Center/traffic_light_state") - # ADD FEATURE: Check if intersection is clear + # TODO: ADD FEATURE Check if intersection is clear lidar_data = None intersection_clear = True if lidar_data is not None: @@ -292,7 +301,7 @@ def update(self): # Wait at traffic light self.red_light_flag = True self.green_light_time = rospy.get_rostime() - rospy.loginfo(f"Light Status: {traffic_light_status}") + rospy.loginfo(f"Intersection Wait Light Status: {traffic_light_status}") self.curr_behavior_pub.publish(bs.int_wait.name) return py_trees.common.Status.RUNNING elif ( @@ -300,7 +309,7 @@ def update(self): and traffic_light_status == "green" ): # Wait approx 1s for confirmation - rospy.loginfo("Confirm green light!") + rospy.loginfo("Intersection Wait Confirm green light!") return py_trees.common.Status.RUNNING elif self.red_light_flag and traffic_light_status != "green": rospy.loginfo(f"Light Status: {traffic_light_status}" "-> prev was red") @@ -310,7 +319,9 @@ def update(self): rospy.get_rostime() - self.green_light_time > rospy.Duration(1) and traffic_light_status == "green" ): - rospy.loginfo(f"Light Status: {traffic_light_status}") + rospy.loginfo( + f"Driving through Intersection Light Status: {traffic_light_status}" + ) # Drive through intersection return py_trees.common.Status.SUCCESS else: diff --git a/code/planning/src/behavior_agent/behaviours/lane_change.py b/code/planning/src/behavior_agent/behaviours/lane_change.py index 49ce0d6f..b6c67eae 100755 --- a/code/planning/src/behavior_agent/behaviours/lane_change.py +++ b/code/planning/src/behavior_agent/behaviours/lane_change.py @@ -27,7 +27,7 @@ def __init__(self, name): :param name: name of the behaviour """ super(Approach, self).__init__(name) - rospy.loginfo("Approach started") + rospy.loginfo("Lane Change Approach started") def setup(self, timeout): """ @@ -72,7 +72,8 @@ def update(self): - Triggering, checking, monitoring. Anything...but do not block! - Set a feedback message - return a py_trees.common.Status.[RUNNING, SUCCESS, FAILURE] - Gets the current lane change distance. + Calculates a virtual stop line and slows down while approaching unless + lane change is not blocked. :return: py_trees.common.Status.RUNNING, if too far from lane change py_trees.common.Status.SUCCESS, if stopped in front of lane change or entered the lane change @@ -91,15 +92,15 @@ def update(self): if self.change_distance != np.inf and self.change_detected: self.virtual_change_distance = self.change_distance - # ADD FEATURE: Check for Traffic + # TODO: ADD FEATURE Check for Traffic distance_lidar = 20 if distance_lidar is not None and distance_lidar > 15.0: - rospy.loginfo("Change is free not slowing down!") + rospy.loginfo("Lane Change is free not slowing down!") self.curr_behavior_pub.publish(bs.lc_app_free.name) self.blocked = False else: - rospy.loginfo("Change blocked slowing down") + rospy.loginfo("Lane Change blocked slowing down") self.blocked = True # get speed @@ -110,11 +111,14 @@ def update(self): if speedometer is not None: speed = speedometer.speed else: - rospy.logwarn("no speedometer connected") + rospy.logwarn("Lane Change: no speedometer connected") return py_trees.common.Status.RUNNING if self.virtual_change_distance > target_dis and self.blocked: # too far - rospy.loginfo("still approaching") + rospy.loginfo( + f"Lane Change: still approaching, " + f"distance:{self.virtual_change_distance}" + ) self.curr_behavior_pub.publish(bs.lc_app_blocked.name) return py_trees.common.Status.RUNNING elif ( @@ -123,7 +127,7 @@ def update(self): and self.blocked ): # stopped - rospy.loginfo("stopped") + rospy.loginfo("Lane Change: stopped at virtual stop line") return py_trees.common.Status.SUCCESS elif ( speed > convert_to_ms(5.0) @@ -191,7 +195,7 @@ def initialise(self): Any initialisation you need before putting your behaviour to work. This just prints a state status message. """ - rospy.loginfo("Wait Change") + rospy.loginfo("Lane Change Wait") return True def update(self): @@ -213,14 +217,14 @@ def update(self): if speedometer is not None: speed = speedometer.speed else: - rospy.logwarn("no speedometer connected") + rospy.logwarn("Lane change wait: no speedometer connected") return py_trees.common.Status.RUNNING if speed > convert_to_ms(10): - rospy.loginfo("Forward to enter") + rospy.loginfo("Lane change wait: Was not blocked, proceed to drive forward") return py_trees.common.Status.SUCCESS - # ADD FEATURE: Check for Traffic + # TODO: ADD FEATURE Check for Traffic distance_lidar = 20 change_clear = False @@ -231,11 +235,11 @@ def update(self): else: change_clear = True if not change_clear: - rospy.loginfo("Change blocked") + rospy.loginfo("Lane Change Wait: blocked") self.curr_behavior_pub.publish(bs.lc_wait.name) return py_trees.common.Status.RUNNING else: - rospy.loginfo("Change clear") + rospy.loginfo("Lane Change Wait: Change clear") return py_trees.common.Status.SUCCESS def terminate(self, new_status): @@ -256,9 +260,8 @@ def terminate(self, new_status): class Enter(py_trees.behaviour.Behaviour): """ - This behavior handles the driving through an intersection, it initially - sets a speed and finishes if the ego vehicle is close to the end of the - intersection. + This behavior inititates the lane change and waits until the + lane change is finished. """ def __init__(self, name): @@ -297,7 +300,7 @@ def initialise(self): This prints a state status message and changes the driving speed for the lane change. """ - rospy.loginfo("Enter next Lane") + rospy.loginfo("Lane Change: Enter next Lane") self.curr_behavior_pub.publish(bs.lc_enter_init.name) def update(self): @@ -321,7 +324,7 @@ def update(self): if next_waypoint_msg is None: return py_trees.common.Status.FAILURE if next_waypoint_msg.distance < 5: - rospy.loginfo("Drive on the next lane!") + rospy.loginfo("Lane Change Enter: Drive on the next lane!") return py_trees.common.Status.RUNNING else: return py_trees.common.Status.SUCCESS @@ -345,7 +348,7 @@ def terminate(self, new_status): class Leave(py_trees.behaviour.Behaviour): """ This behaviour defines the leaf of this subtree, if this behavior is - reached, the vehicle left the intersection. + reached, the vehicle has finished the lane change. """ def __init__(self, name): @@ -384,7 +387,7 @@ def initialise(self): This prints a state status message and changes the driving speed to the street speed limit. """ - rospy.loginfo("Leave Change") + rospy.loginfo("Lane Change Finished") self.curr_behavior_pub.publish(bs.lc_exit.name) return True diff --git a/code/planning/src/behavior_agent/behaviours/overtake.py b/code/planning/src/behavior_agent/behaviours/overtake.py index f90ec1ae..41ad24f4 100644 --- a/code/planning/src/behavior_agent/behaviours/overtake.py +++ b/code/planning/src/behavior_agent/behaviours/overtake.py @@ -75,8 +75,9 @@ def update(self): - Set a feedback message - return a py_trees.common.Status.[RUNNING, SUCCESS, FAILURE] - Gets the current distance to overtake, the current lane status and the - distance to collsion object. + Gets the current distance to overtake, the current oncoming lane status and the + distance to collsion object. Slows down while oncoming blocked until stopped + or oncoming clear. :return: py_trees.common.Status.RUNNING, if too far from overtaking py_trees.common.Status.SUCCESS, if stopped behind the blocking object or entered the process. @@ -110,7 +111,7 @@ def update(self): self.curr_behavior_pub.publish(bs.ot_app_free.name) return py_trees.common.Status.SUCCESS else: - rospy.loginfo("Overtake blocked slowing down") + rospy.loginfo("Overtake Approach: oncoming blocked slowing down") self.curr_behavior_pub.publish(bs.ot_app_blocked.name) # get speed @@ -123,11 +124,14 @@ def update(self): if self.ot_distance > 20.0: # too far - rospy.loginfo("still approaching") + rospy.loginfo( + f"Overtake Approach: still approaching obstacle, " + f"distance: {self.ot_distance}" + ) return py_trees.common.Status.RUNNING elif speed < convert_to_ms(2.0) and self.ot_distance < TARGET_DISTANCE_TO_STOP: # stopped - rospy.loginfo("stopped") + rospy.loginfo("Overtake Approach: stopped behind obstacle") return py_trees.common.Status.SUCCESS else: # still approaching @@ -216,7 +220,7 @@ def update(self): clear_distance = 30 obstacle_msg = self.blackboard.get("/paf/hero/collision") if obstacle_msg is None: - rospy.logerr("No OBSTACLE") + rospy.logerr("No OBSTACLE in overtake wait") return py_trees.common.Status.FAILURE data = self.blackboard.get("/paf/hero/oncoming") @@ -231,14 +235,16 @@ def update(self): self.curr_behavior_pub.publish(bs.ot_wait_free.name) return py_trees.common.Status.SUCCESS else: - rospy.loginfo(f"Overtake still blocked: {distance_oncoming}") + rospy.loginfo( + f"Overtake still blocked, distance to oncoming: {distance_oncoming}" + ) self.curr_behavior_pub.publish(bs.ot_wait_stopped.name) return py_trees.common.Status.RUNNING elif obstacle_msg.data[0] == np.inf: - rospy.loginf("No OBSTACLE") + rospy.loginf("No OBSTACLE in overtake wait") return py_trees.common.Status.FAILURE else: - rospy.loginfo("No Lidar Distance") + rospy.loginfo("No Lidar Distance in overtake wait") return py_trees.common.Status.SUCCESS def terminate(self, new_status): @@ -312,7 +318,7 @@ def update(self): - Set a feedback message - return a py_trees.common.Status.[RUNNING, SUCCESS, FAILURE] - + Waits for motion_planner to finish the new trajectory. :return: py_trees.common.Status.RUNNING, py_trees.common.Status.SUCCESS, py_trees.common.Status.FAILURE, @@ -320,17 +326,17 @@ def update(self): status = self.blackboard.get("/paf/hero/overtake_success") if status is not None: if status.data == 1: - rospy.loginfo("Overtake: Trajectory planned") + rospy.loginfo("Overtake Enter: Trajectory planned") return py_trees.common.Status.SUCCESS elif status.data == 0: self.curr_behavior_pub.publish(bs.ot_enter_slow.name) - rospy.loginfo("Overtake: Slowing down") + rospy.loginfo("Overtake Enter: Slowing down") return py_trees.common.Status.RUNNING else: - rospy.loginfo("OvertakeEnter: Abort") + rospy.loginfo("Overtake Enter: Abort") return py_trees.common.Status.FAILURE else: - rospy.loginfo("Overtake: Waiting for status update") + rospy.loginfo("Overtake Enter: Waiting for status update") return py_trees.common.Status.RUNNING def terminate(self, new_status): @@ -393,7 +399,7 @@ def initialise(self): self.curr_behavior_pub.publish(bs.ot_leave.name) data = self.blackboard.get("/paf/hero/current_pos") self.first_pos = np.array([data.pose.position.x, data.pose.position.y]) - rospy.loginfo(f"Leave Overtake: {self.first_pos}") + rospy.loginfo(f"Init Leave Overtake: {self.first_pos}") return True def update(self): @@ -412,7 +418,7 @@ def update(self): self.current_pos = np.array([data.pose.position.x, data.pose.position.y]) distance = np.linalg.norm(self.first_pos - self.current_pos) if distance > OVERTAKE_EXECUTING + NUM_WAYPOINTS: - rospy.loginfo(f"Left Overtake: {self.current_pos}") + rospy.loginfo(f"Overtake executed: {self.current_pos}") return py_trees.common.Status.FAILURE else: return py_trees.common.Status.RUNNING diff --git a/code/planning/srv/RequestBehaviourChange.srv b/code/planning/srv/RequestBehaviourChange.srv new file mode 100644 index 00000000..d4da3b97 --- /dev/null +++ b/code/planning/srv/RequestBehaviourChange.srv @@ -0,0 +1,3 @@ +uint8 request +--- +bool answer \ No newline at end of file diff --git a/code/requirements.txt b/code/requirements.txt index 5d30dcc8..34cddb77 100644 --- a/code/requirements.txt +++ b/code/requirements.txt @@ -13,7 +13,7 @@ scipy==1.10.1 xmltodict==0.13.0 py-trees==2.2.3 numpy==1.23.5 -ultralytics==8.1.11 +ultralytics==8.3.32 scikit-learn>=0.18 pandas==2.0.3 debugpy==1.8.7 diff --git a/doc/assets/overview.jpg b/doc/assets/overview.jpg index 3bdc9225..7c6b3c76 100644 Binary files a/doc/assets/overview.jpg and b/doc/assets/overview.jpg differ diff --git a/doc/assets/planning/behaviour_tree.PNG b/doc/assets/planning/behaviour_tree.PNG new file mode 100644 index 00000000..87ff5eb4 Binary files /dev/null and b/doc/assets/planning/behaviour_tree.PNG differ diff --git a/doc/assets/planning/behaviour_tree.drawio.xml b/doc/assets/planning/behaviour_tree.drawio.xml new file mode 100644 index 00000000..973300fc --- /dev/null +++ b/doc/assets/planning/behaviour_tree.drawio.xml @@ -0,0 +1,230 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/assets/research_assets/ACC_FLC_Example_1.PNG b/doc/assets/research_assets/ACC_FLC_Example_1.PNG new file mode 100644 index 00000000..e6382981 Binary files /dev/null and b/doc/assets/research_assets/ACC_FLC_Example_1.PNG differ diff --git a/doc/development/project_management.md b/doc/development/project_management.md index ef5e45d7..ec772185 100644 --- a/doc/development/project_management.md +++ b/doc/development/project_management.md @@ -50,4 +50,4 @@ Merge the pull request after: After merging, remember to delete the source branch to keep the repository clean. ->[!INFO] For more information about the review process, see [Review process](./review_guideline.md). +>[!TIP] For more information about the review process, see [Review process](./review_guideline.md). diff --git a/doc/development/review_guideline.md b/doc/development/review_guideline.md index 840adb48..64a502c9 100644 --- a/doc/development/review_guideline.md +++ b/doc/development/review_guideline.md @@ -95,7 +95,7 @@ If a comment of a review was resolved by either, a new commit or a discussion be If a new commit took place it is encouraged to comment the commit SHA to have a connection between comment and resolving commit ![img.png](../assets/Resolve_conversation.png) -> [!INFO] All conversations should be resolved before merging the pull request. +> [!TIP] All conversations should be resolved before merging the pull request. ## 5. Merging a Pull Request @@ -120,7 +120,7 @@ Long story short, the reviewer who approves the PR should merge. Only approve if Before merging a pull request, the request checks by the CI/CD pipeline should be successful. If the checks fail, the pull request should not be merged. -> [!INFO] An exception can be made for a PR that only addresses the documentation and the `driving` check is not yet completed. +> [!TIP] An exception can be made for a PR that only addresses the documentation and the `driving` check is not yet completed. ### 5.3. Deleting the branch diff --git a/doc/general/README.md b/doc/general/README.md index cbf937f4..fbde8778 100644 --- a/doc/general/README.md +++ b/doc/general/README.md @@ -4,4 +4,5 @@ This Folder contains instruction on installation, execution and architecture of 1. [Installation](./installation.md) 2. [Execution](./execution.md) -3. [Current architecture of the agent](./architecture.md) +3. [Current architecture of the agent](./architecture_current.md) +4. [Planned architecture of the agent](./architecture_planned.md) diff --git a/doc/general/architecture_current.md b/doc/general/architecture_current.md new file mode 100644 index 00000000..aa0a472a --- /dev/null +++ b/doc/general/architecture_current.md @@ -0,0 +1,371 @@ +# Current architecture of the vehicle agent + +**Summary:** This page gives an overview over the current general architecture of the vehicle agent. +The document contains an overview over all [nodes](#overview) and [topics](#topics). Additionally a visual thematic grouping of the subsystems of the agent is done for an easier optical identification which nodes and topics belong/contribute to them. + +- [Overview](#overview) +- [Perception](#perception) + - [Vision Node (vision\_node.py)](#vision-node-vision_nodepy) + - [Traffic Light Detection (traffic\_light\_node.py)](#traffic-light-detection-traffic_light_nodepy) + - [Position Heading Node (position\_heading\_publisher\_node.py)](#position-heading-node-position_heading_publisher_nodepy) + - [Global distances (global\_plan\_distance\_publisher.py)](#global-distances-global_plan_distance_publisherpy) + - [Kalman filtering (kalman\_filter.py)](#kalman-filtering-kalman_filterpy) + - [Localization](#localization) +- [Planning](#planning) + - [PrePlaner (global\_planner.py)](#preplaner-global_plannerpy) + - [Behavior Agent (behavior\_agent)](#behavior-agent-behavior_agent) + - [Local Planning](#local-planning) +- [Acting](#acting) + - [MainFramePublisher](#mainframepublisher) + - [pure\_pursuit\_controller](#pure_pursuit_controller) + - [stanley\_controller](#stanley_controller) + - [vehicle\_controller](#vehicle_controller) + - [velocity\_controller](#velocity_controller) + +## Overview + +The vehicle agent is split into three major components: [Perception](#Perception), [Planning](#Planning) +and [Acting](#Acting). +A separate node is responsible for the [visualization](#Visualization). +The topics published by the Carla bridge can be +found [here](https://carla.readthedocs.io/projects/ros-bridge/en/latest/ros_sensors/).\ +The messages necessary to control the vehicle via the Carla bridge can be +found [here](https://carla.readthedocs.io/en/0.9.8/ros_msgs/#CarlaEgoVehicleControlmsg). + +The miro-board can be found [here](https://miro.com/welcomeonboard/a1F0d1dya2FneWNtbVk4cTBDU1NiN3RiZUIxdGhHNzJBdk5aS3N4VmdBM0R5c2Z1VXZIUUN4SkkwNHpuWlk2ZXwzNDU4NzY0NTMwNjYwNzAyODIzfDI=?share_link_id=785020837509). +![Architecture overview](../assets/overview.jpg)*Connections between nodes visualized* + +![Department node overview](../assets/research_assets/node_path_ros.png)*In- and outgoing topics for every node of the departments* + +## Perception + +The perception is responsible for the efficient conversion of raw sensor and map data into a useful +environment representation that can be used by the [Planning](#Planning) for further processing. +It provides localization of the agent, filtering noise from sensor data, preconditions images for further usage in other nodes and detects certain points of interest (at the moment only traffic lights). + +Further information regarding the perception can be found [here](../perception/README.md). +Research for the perception can be found [here](../research/paf24/perception/). + +In the following, all subscribed and published topics and their corresponding nodes are listed with the format: + +- ```name of topic``` \(origin/destination node\) (typo of message) (link to ROS documentation of the message type) + +### Vision Node ([vision_node.py](/../paf/code/perception/src/vision_node.py)) + +Evaluates sensor data to detect and classify objects around the ego vehicle. +Other road users and objects blocking the vehicle's path are recognized (most of the time). +Recognized traffic lights get cut out from the image and made available for further processing. + +Subscriptions: + +- ```/paf/hero/Center/dist_array``` \(/lidar_distance\) ([sensor_msgs/Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) +- ```/carla/hero/Center/image``` \(/carla_ros_bridge\) ([sensor_msgs/Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) +- ```/carla/hero/Back/image``` \(/carla_ros_bridge\) ([sensor_msgs/Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) +- ```/carla/hero/Left/image``` \(/carla_ros_bridge\) ([sensor_msgs/Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) +- ```/carla/hero/Right/image``` \(/carla_ros_bridge\) ([sensor_msgs/Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) + +Publishes: + +- ```/paf/hero/Center/segmented_image``` \(/rviz\) ([sensor_msgs/Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) +- ```/paf/hero/Back/segmented_image``` \(/rviz\) ([sensor_msgs/Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) +- ```/paf/hero/Left/segmented_image``` \(/rviz\) ([sensor_msgs/Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) +- ```/paf/hero/Right/segmented_image``` \(/rviz\) ([sensor_msgs/Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) +- ```/paf/hero/Center/object_distance``` \(/CollisionCheck\) ([sensor_msgs/Float32MultiArray](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32MultiArray.html)) +- ```/paf/hero/Back/object_distance``` \(/CollisionCheck\) ([sensor_msgs/Float32MultiArray](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32MultiArray.html)) +- ```/paf/hero/Left/object_distance``` \(/CollisionCheck\) ([sensor_msgs/Float32MultiArray](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32MultiArray.html)) +- ```/paf/hero/Right/object_distance``` \(/CollisionCheck\) ([sensor_msgs/Float32MultiArray](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32MultiArray.html)) +- ```/paf/hero/Center/segmented_traffic_light``` \(/TrafficLightNode\) ([sensor_msgs/Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) +- ```/paf/hero/Back/segmented_traffic_light``` \(/TrafficLightNode\) ([sensor_msgs/Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) +- ```/paf/hero/Left/segmented_traffic_light``` \(/TrafficLightNode\) ([sensor_msgs/Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) +- ```/paf/hero/Right/segmented_traffic_light``` \(/TrafficLightNode\) ([sensor_msgs/Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) + +### Traffic Light Detection ([traffic_light_node.py](/../paf/code/perception/src/traffic_light_node.py)) + +Recognizes traffic lights and what they are showing at the moment. +In particular traffic lights that are relevant for the correct traffic behavior of the ego vehicle, +are recognized early and reliably. + +Subscriptions: + +- ```/paf/hero/Center/segmented_traffic_light``` \(/VisionNode\) ([sensor_msgs/Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) + +Publishes: + +- ```/paf/hero/Center/traffic_light_state``` \(/behavior_agent\) ([perception/TrafficLightState](/../paf/code/perception/msg/TrafficLightState.msg)) +- ```/paf/hero/Center/traffic_light_y_distance``` \(/behavior_agent\) ([std_msgs/Int16](https://docs.ros.org/en/api/std_msgs/html/msg/Int16.html)) + +### Position Heading Node ([position_heading_publisher_node.py](/../paf/code/perception/src/position_heading_publisher_node.py)) + +Calculates the current_pos (Location of the car) and current_heading (Orientation of the car around the Z- axis). + +Subscriptions: + +- ```/carla/hero/OpenDRIVE``` \(/carla_ros_bridge\) ([sensor_msgs/String](https://docs.ros.org/en/melodic/api/std_msgs/html/msg/String.html)) +- ```/carla/hero/IMU``` \(/carla_ros_bridge\) ([sensor_msgs/Imu](https://docs.ros.org/en/noetic/api/sensor_msgs/html/msg/Imu.html)) +- ```/carla/hero/GPS``` \(/carla_ros_bridge\) ([sensor_msgs/NavSatFix](https://docs.ros.org/en/noetic/api/sensor_msgs/html/msg/NavSatFix.html)) +- ```/paf/hero/kalman_pos``` \(/kalman_filter_node\) ([geometry_msgs/PoseStamped](https://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/PoseStamped.html)) +- ```/paf/hero/kalman_heading``` \(/kalman_filter_node\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) + +Publishes: + +- ```/paf/hero/unfiltered_heading``` \(no subscriber at the moment\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/unfiltered_pos``` \(/kalman_filter_node\) ([geometry_msgs/PoseStamped](https://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/PoseStamped.html)) +- ```/paf/hero/current_pos``` \(/pure_pursuit_controller, /stanley_controller, /MotionPlanning, /ACC, /MainFramePublisher, /GlobalPlanDistance, /position_heading_publisher_node, /curr_behavior \) ([geometry_msgs/PoseStamped](https://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/PoseStamped.html)) +- ```/paf/hero/current_heading``` \(/stanley_controller, /behavior_agent\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) + +### Global distances ([global_plan_distance_publisher.py](/../paf/code/perception/src/global_plan_distance_publisher.py)) + +Subscriptions: + +- ```/paf/hero/current_pos``` \(/position_heading_publisher_node\) ([geometry_msgs/PoseStamped](https://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/PoseStamped.html)) +- ```/carla/hero/global_plan``` \(/ \) ([ros_carla_msgs/msg/CarlaRoute](https://github.com/carla-simulator/ros-carla-msgs/blob/leaderboard-2.0/msg/CarlaRoute.msg)) + +Publishes: + +- ```/paf/hero/waypoint_distance``` \(/MotionPlanning, /behavior_agent\) ([geographic_msgs/WayPoint](https://docs.ros.org/en/melodic/api/geographic_msgs/html/msg/WayPoint.html)) +- ```/paf/hero/lane_change_distance``` \(/MotionPlanning, /behavior_agent\) ([perception/msg/LaneChange](/../paf/code/perception/msg/LaneChange.msg)) + +### Kalman filtering ([kalman_filter.py](/../paf/code/perception/src/kalman_filter.py)) + +Subscriptions: + +- ```/carla/hero/OpenDRIVE``` \(/carla_ros_bridge\) ([sensor_msgs/String](https://docs.ros.org/en/melodic/api/std_msgs/html/msg/String.html)) +- ```/carla/hero/IMU``` \(/carla_ros_bridge\) ([sensor_msgs/Imu](https://docs.ros.org/en/noetic/api/sensor_msgs/html/msg/Imu.html)) +- ```/carla/hero/GPS``` \(/carla_ros_bridge\) ([sensor_msgs/NavSatFix](https://docs.ros.org/en/noetic/api/sensor_msgs/html/msg/NavSatFix.html)) +- ```/paf/hero/unfiltered_pos``` \(/position_heading_publisher_node\) ([geometry_msgs/PoseStamped](https://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/PoseStamped.html)) +- ```/carla/hero/Speed``` \(/carla_ros_bridge\) ([geometry_msgs/PoseStamped](https://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/PoseStamped.html)) + +Publishes: + +- ```/paf/hero/kalman_pos``` \(/position_heading_publisher_node\) ([geometry_msgs/PoseStamped](https://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/PoseStamped.html)) +- ```/paf/hero/kalman_heading``` \(/position_heading_publisher_node\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) + +### Localization + +Provides corrected accurate position, direction and speed of the ego vehicle. For this to be achived the + +- [Position Heading Node](#position-heading-node-position_heading_publisher_nodepy) +- [Kalman filtering](#kalman-filtering) and +- [Coordinate Transformation](/../paf/code/perception/src/coordinate_transformation.py) + +classes are used. + +## Planning + +The planning uses the data from the [Perception](#Perception) to find a path on which the ego vehicle can safely reach its destination. It also detects situations and reacts accordingly in traffic. It publishes signals such as a trajecotry or a target speed to acting. + +Further information regarding the planning can be found [here](../planning/README.md). +Research for the planning can be found [here](../research/planning/README.md). + +### PrePlaner ([global_planner.py](/../paf/code/planning/src/global_planner/global_planner.py)) + +Uses information from the map and the path specified by CARLA to find a first concrete path to the next intermediate point. More about it can be found [here](../planning/Global_Planner.md). + +Subscriptions: + +- ```/carla/hero/OpenDRIVE``` \(/carla_ros_bridge\) ([sensor_msgs/String](https://docs.ros.org/en/melodic/api/std_msgs/html/msg/String.html)) +- ```/carla/hero/global_plan``` \(/ \) ([ros_carla_msgs/msg/CarlaRoute](https://github.com/carla-simulator/ros-carla-msgs/blob/leaderboard-2.0/msg/CarlaRoute.msg)) +- ```/paf/hero/current_pos``` \(/position_heading_publisher_node\) ([geometry_msgs/PoseStamped](https://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/PoseStamped.html)) + +Publishes: + +- ```/paf/hero/trajectory_global``` \(/ACC, /MotionPlanning\) ([nav_msgs/Path](https://docs.ros.org/en/noetic/api/nav_msgs/html/msg/Path.html)) +- ```/paf/hero/speed_limits_OpenDrive``` \(/ACC\) ([sensor_msgs/Float32MultiArray](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32MultiArray.html)) + +### Behavior Agent ([behavior_agent](/../paf/code/planning/src/behavior_agent/)) + +Decides which speed is the right one to pass through a certain situation and +also checks if an overtake is necessary. +Everything is based on the data from the Perception [Perception](#Perception). More about the behavior tree can be found [here](../planning/Behavior_tree.md) + +Subscriptions: + +[topics2blackboard.py](/../paf/code/planning/src/behavior_agent/behaviours/topics2blackboard.py) + +- ```/carla/hero/Speed``` \(/carla_ros_bridge\) ([ros_carla_msgs/msg/CarlaSpeedometer](https://github.com/carla-simulator/ros-carla-msgs/blob/leaderboard-2.0/msg/CarlaSpeedometer.msg)) +- ```/paf/hero/slowed_by_car_in_front``` \(/\) ([std_msg/Bool](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Bool.html)) +- ```/paf/hero/waypoint_distance``` \(/GlobalPlanDistance\) ([geographic_msgs/WayPoint](https://docs.ros.org/en/melodic/api/geographic_msgs/html/msg/WayPoint.html)) +- ```/paf/hero/stop_sign``` \(/\) ([mock/Stop_sign](/../paf/code/mock/msg/Stop_sign.msg)) +- ```/paf/hero/Center/traffic_light_state``` \(/TrafficLightNode\) ([perception/TrafficLightState](/../paf/code/perception/msg/TrafficLightState.msg)) +- ```/paf/hero/Center/traffic_light_y_distance``` \(/TrafficLightNode\) ([std_msgs/Int16](https://docs.ros.org/en/api/std_msgs/html/msg/Int16.html)) +- ```/paf/hero/max_velocity``` \(/behavior_agent\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/speed_limit``` \(/ACC\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/lane_change_distance``` \(/GlobalPlanDistance\) ([perception/msg/LaneChange](/../paf/code/perception/msg/LaneChange.msg)) +- ```/paf/hero/collision``` \(/CollisionCheck\) ([sensor_msgs/Float32MultiArray](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32MultiArray.html)) +- ```/paf/hero/current_pos``` \(/position_heading_publisher_node\) ([geometry_msgs/PoseStamped](https://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/PoseStamped.html)) +- ```/paf/hero/current_heading``` \(/position_heading_publisher_node\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/overtake_success``` \(/MotionPlanning\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/oncoming``` \(/CollisionCheck\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/target_velocity``` \(/MotionPlanning\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) + +Publishes: + +[maneuvers.py](/../paf/code/planning/src/behavior_agent/behaviours/maneuvers.py) + +- ```/paf/hero/curr_behavior``` \(/MotionPlanning, /vehicle_controller\) ([std_msgs/String](https://docs.ros.org/en/api/std_msgs/html/msg/String.html)) +- ```/paf/hero/unstuck_distance``` \(/ACC, /MotionPlanning\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/unstuck_flag``` \(/ACC\) ([std_msg/Bool](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Bool.html)) + +[intersection.py](/../paf/code/planning/src/behavior_agent/behaviours/intersection.py) + +- ```/paf/hero/curr_behavior``` \(/MotionPlanning, /vehicle_controller\) ([std_msgs/String](https://docs.ros.org/en/api/std_msgs/html/msg/String.html)) + +[lane_change.py](/../paf/code/planning/src/behavior_agent/behaviours/lane_change.py) + +- ```/paf/hero/curr_behavior``` \(/MotionPlanning, /vehicle_controller\) ([std_msgs/String](https://docs.ros.org/en/api/std_msgs/html/msg/String.html)) + +[meta.py](/../paf/code/planning/src/behavior_agent/behaviours/meta.py) + +- ```/paf/hero/max_velocity``` \(/behavior_agent\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) + +[overtake.py](/../paf/code/planning/src/behavior_agent/behaviours/overtake.py) + +- ```/paf/hero/curr_behavior``` \(/MotionPlanning, /vehicle_controller\) ([std_msgs/String](https://docs.ros.org/en/api/std_msgs/html/msg/String.html)) + +### [Local Planning](../planning/Local_Planning.md) + +It consists of three components: + +- [Collision Check](../planning//Collision_Check.md): Checks for collisions based on objects recieved from [Perception](#perception) +- [ACC](../planning/ACC.md): Generates a new speed based on a possible collision recieved from Collision Check and speedlimits recieved from [Global Planner](#global-planning) +- [Motion Planning](../planning/motion_planning.md): Decides the target speed and modifies trajectory if signal recieved from [Behavior Agent](#behavior-agent-behavior_agent) + +Subscriptions: + +[ACC.py](/../paf/code/planning/src/local_planner/ACC.py) + +- ```/paf/hero/unstuck_flag``` \(/behavior_agent\) ([std_msg/Bool](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Bool.html)) +- ```/paf/hero/unstuck_distance``` \(/behavior_agent\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/carla/hero/Speed``` \(/carla_ros_bridge\) ([ros_carla_msgs/msg/CarlaSpeedometer](https://github.com/carla-simulator/ros-carla-msgs/blob/leaderboard-2.0/msg/CarlaSpeedometer.msg)) +- ```/paf/hero/speed_limits_OpenDrive``` \(/PrePlanner\) ([sensor_msgs/Float32MultiArray](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32MultiArray.html)) +- ```/paf/hero/trajectory_global``` \(/PrePlanner\) ([nav_msgs/Path](https://docs.ros.org/en/noetic/api/nav_msgs/html/msg/Path.html)) +- ```/paf/hero/current_pos``` \(/position_heading_publisher_node\) ([geometry_msgs/PoseStamped](https://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/PoseStamped.html)) +- ```/paf/hero/collision``` \(/CollisionCheck\) ([sensor_msgs/Float32MultiArray](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32MultiArray.html)) + +[collision_check.py](/../paf/code/planning/src/local_planner/collision_check.py) + +- ```/carla/hero/Speed``` \(/carla_ros_bridge\) ([ros_carla_msgs/msg/CarlaSpeedometer](https://github.com/carla-simulator/ros-carla-msgs/blob/leaderboard-2.0/msg/CarlaSpeedometer.msg)) +- ```/paf/hero/Center/object_distance``` \(/CollisionCheck\) ([sensor_msgs/Float32MultiArray](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32MultiArray.html)) + +[motion_planning.py](/../paf/code/planning/src/local_planner/motion_planning.py) + +- ```/paf/hero/spawn_car``` \(/\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/speed_limit``` \(/ACC\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/carla/hero/Speed``` \(/carla_ros_bridge\) ([ros_carla_msgs/msg/CarlaSpeedometer](https://github.com/carla-simulator/ros-carla-msgs/blob/leaderboard-2.0/msg/CarlaSpeedometer.msg)) +- ```/paf/hero/current_heading``` \(/position_heading_publisher_node\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/trajectory_global``` \(/PrePlanner\) ([nav_msgs/Path](https://docs.ros.org/en/noetic/api/nav_msgs/html/msg/Path.html)) +- ```/paf/hero/current_pos``` \(/position_heading_publisher_node\) ([geometry_msgs/PoseStamped](https://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/PoseStamped.html)) +- ```/paf/hero/curr_behavior``` \(/behavior_agent\) ([std_msgs/String](https://docs.ros.org/en/api/std_msgs/html/msg/String.html)) +- ```/paf/hero/unchecked_emergency``` \(/\) ([std_msg/Bool](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Bool.html)) +- ```/paf/hero/acc_velocity``` \(/ACC\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/waypoint_distance``` \(/GlobalPlanDistance\) ([geographic_msgs/WayPoint](https://docs.ros.org/en/melodic/api/geographic_msgs/html/msg/WayPoint.html)) +- ```/paf/hero/lane_change_distance``` \(/GlobalPlanDistance\) ([perception/msg/LaneChange](/../paf/code/perception/msg/LaneChange.msg)) +- ```/paf/hero/collision``` \(/CollisionCheck\) ([sensor_msgs/Float32MultiArray](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32MultiArray.html)) +- ```/paf/hero/Center/traffic_light_y_distance``` \(/TrafficLightNode\) ([std_msgs/Int16](https://docs.ros.org/en/api/std_msgs/html/msg/Int16.html)) +- ```/paf/hero/unstuck_distance``` \(/behavior_agent\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/current_wp``` \(/ACC\) ([std_msgs/Float32](https://docs.ros.org/en/api/std_msgs/html/msg/Float32.html)) + +Publishes: + +[ACC.py](/../paf/code/planning/src/local_planner/ACC.py) + +- ```/paf/hero/acc_velocity``` \(/MotionPlanning\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/current_wp``` ([std_msgs/Float32](https://docs.ros.org/en/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/speed_limit``` \(/behavior_agent\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) + +[collision_check.py](/../paf/code/planning/src/local_planner/collision_check.py) + +- ```/paf/hero/emergency``` \(/vehicle_controller\) ([std_msg/Bool](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Bool.html)) +- ```/paf/hero/collision``` \(/ACC, /MotionPlanning\) ([sensor_msgs/Float32MultiArray](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32MultiArray.html)) +- ```/paf/hero/oncoming``` \(/behavior_agent\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) + +[motion_planning.py](/../paf/code/planning/src/local_planner/motion_planning.py) + +- ```/paf/hero/trajectory``` \(/pure_pursuit_controller\) ([nav_msgs/Path](https://docs.ros.org/en/noetic/api/nav_msgs/html/msg/Path.html)) +- ```/paf/hero/target_velocity``` \(/behavior_agent\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/overtake_success``` \(/behavior_agent\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) + +## Acting + +The job of this component is to take the planned trajectory and target-velocities from the [Planning](#Planning) component and convert them into steering and throttle/brake controls for the CARLA-vehicle. + +All information regarding research done about acting can be found [here](../research/acting/README.md). + +Indepth information about the currently implemented acting Components can be found [here](../acting/README.md)! + +### [MainFramePublisher](/../paf/code/acting/src/acting/MainFramePublisher.py) + +Subscription: + +- ```/paf/hero/current_pos``` \(/position_heading_publisher_node\) ([geometry_msgs/PoseStamped](https://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/PoseStamped.html)) +- ```/paf/hero/current_heading``` \(/position_heading_publisher_node\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) + +### [pure_pursuit_controller](/../paf/code/acting/src/acting/pure_pursuit_controller.py) + +Calculates steering angles that keep the ego vehicle on the path given by +the [Local path planning](#Local-path-planning). + +Subscription: + +- ```/paf/hero/current_pos``` \(/position_heading_publisher_node\) ([geometry_msgs/PoseStamped](https://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/PoseStamped.html)) +- ```/carla/hero/Speed``` \(/carla_ros_bridge\) ([geometry_msgs/PoseStamped](https://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/PoseStamped.html)) +- ```/paf/hero/current_heading``` \(/position_heading_publisher_node\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) + +Publishes: + +- ```/paf/hero/pure_pursuit_steer``` \(/vehicle_controller\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/pure_p_debug``` \(/\) ([acting/msg/Debug](/../paf/code/acting/msg/Debug.msg)) + +### [stanley_controller](/../paf/code/acting/src/acting/stanley_controller.py) + +Calculates steering angles that keep the ego vehicle on the path given by +the [Local path planning](#Local-path-planning). + +Subscription: + +- ```/paf/hero/trajectory_global``` \(/PrePlanner\) ([nav_msgs/Path](https://docs.ros.org/en/noetic/api/nav_msgs/html/msg/Path.html)) +- ```/paf/hero/current_pos``` \(/position_heading_publisher_node\) ([geometry_msgs/PoseStamped](https://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/PoseStamped.html)) +- ```/carla/hero/Speed``` \(/carla_ros_bridge\) ([geometry_msgs/PoseStamped](https://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/PoseStamped.html)) +- ```/paf/hero/current_heading``` \(/position_heading_publisher_node\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) + +Publishes: + +- ```/paf/hero/stanley_steer``` \(/vehicle_controller\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/stanley_debug``` \(/\) ([acting/msg/StanleyDebug](/../paf/code/acting/msg/StanleyDebug.msg)) + +### [vehicle_controller](/../paf/code/acting/src/acting/vehicle_controller.py) + +Subscription: + +- ```/paf/hero/curr_behavior``` \(/behavior_agent\) ([std_msgs/String](https://docs.ros.org/en/api/std_msgs/html/msg/String.html)) +- ```/paf/hero/emergency``` \(/vehicle_controller\) ([std_msg/Bool](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Bool.html)) +- ```/carla/hero/Speed``` \(/carla_ros_bridge\) ([geometry_msgs/PoseStamped](https://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/PoseStamped.html)) +- ```/paf/hero/throttle``` \(/velocity_controller\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/brake``` \(/velocity_controller\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/reverse``` \(/velocity_controller\) ([std_msg/Bool](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Bool.html)) +- ```/paf/hero/pure_pursuit_steer``` \(/pure_pursuit_controller\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/stanley_steer``` \(/pure_pursuit_controller\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) + +Publishes: + +- ```/carla/hero/vehicle_control_cmd``` \(/\) ([ros_carla_msgs/msg/CarlaEgoVehicleControl](https://github.com/carla-simulator/ros-carla-msgs/blob/master/msg/CarlaEgoVehicleControl.msg)) +- ```/carla/hero/status``` \(/\) ([std_msg/Bool](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Bool.html)) +- ```/paf/hero/controller``` \(/\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/emergency``` \(/vehicle_controller\) ([std_msg/Bool](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Bool.html)) + +### [velocity_controller](/../paf/code/acting/src/acting/velocity_controller.py) + +Calculates acceleration values to drive the target-velocity given by the [Local path planning](#Local-path-planning). + +For further indepth information about the currently implemented Vehicle Controller click [here](../acting/vehicle_controller.md) + +Subscription: + +- ```/paf/hero/target_velocity``` \(/MotionPlanning\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/carla/hero/Speed``` \(/carla_ros_bridge\) ([ros_carla_msgs/msg/CarlaSpeedometer](https://github.com/carla-simulator/ros-carla-msgs/blob/leaderboard-2.0/msg/CarlaSpeedometer.msg)) + +Publishes: + +- ```/paf/hero/throttle``` \(/vehicle_controller\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/brake``` \(/vehicle_controller\) ([std_msgs/Float32](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32.html)) +- ```/paf/hero/reverse``` \(/vehicle_controller\) ([std_msg/Bool](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Bool.html)) diff --git a/doc/general/architecture.md b/doc/general/architecture_planned.md similarity index 86% rename from doc/general/architecture.md rename to doc/general/architecture_planned.md index f8eaf839..1d6fd803 100644 --- a/doc/general/architecture.md +++ b/doc/general/architecture_planned.md @@ -3,24 +3,25 @@ **Summary:** This page gives an overview over the planned general architecture of the vehicle agent. The document contains an overview over all [nodes](#overview) and [topics](#topics). -- [Planned architecture of vehicle agent](#planned-architecture-of-vehicle-agent) - - [Overview](#overview) - - [Perception](#perception) - - [Obstacle Detection and Classification](#obstacle-detection-and-classification) - - [Traffic Light Detection](#traffic-light-detection) - - [Localization](#localization) - - [Planning](#planning) - - [Global Planning](#global-planning) - - [Decision Making](#decision-making) - - [Local Planning](#local-planning) - - [Collision Check](#collision-check) - - [ACC](#acc) - - [Motion Planning](#motion-planning) - - [Acting](#acting) - - [Path following with Steering Controllers](#path-following-with-steering-controllers) - - [Velocity control](#velocity-control) - - [Vehicle controller](#vehicle-controller) - - [Visualization](#visualization) +- [Overview](#overview) +- [Perception](#perception) + - [Vision Node](#vision-node) + - [Traffic Light Detection](#traffic-light-detection) + - [Position Heading Node](#position-heading-node) + - [Distance to Objects](#distance-to-objects) + - [Localization](#localization) +- [Planning](#planning) + - [Global Planning](#global-planning) + - [Decision Making](#decision-making) + - [Local Planning](#local-planning) + - [Collision Check](#collision-check) + - [ACC](#acc) + - [Motion Planning](#motion-planning) +- [Acting](#acting) + - [Path following with Steering Controllers](#path-following-with-steering-controllers) + - [Velocity control](#velocity-control) + - [Vehicle controller](#vehicle-controller) +- [Visualization](#visualization) ## Overview @@ -28,12 +29,13 @@ The vehicle agent is split into three major components: [Perception](#Perception and [Acting](#Acting). A separate node is responsible for the [visualization](#Visualization). The topics published by the Carla bridge can be -found [here](https://carla.readthedocs.io/projects/ros-bridge/en/latest/ros_sensors/). -The msgs necessary to control the vehicle via the Carla bridge can be -found [here](https://carla.readthedocs.io/en/0.9.8/ros_msgs/#CarlaEgoVehicleControlmsg) +found [here](https://carla.readthedocs.io/projects/ros-bridge/en/latest/ros_sensors/).\ +The messages necessary to control the vehicle via the Carla bridge can be +found [here](https://carla.readthedocs.io/en/0.9.8/ros_msgs/#CarlaEgoVehicleControlmsg).\ -![Architecture overview](../assets/overview.jpg) The miro-board can be found [here](https://miro.com/welcomeonboard/a1F0d1dya2FneWNtbVk4cTBDU1NiN3RiZUIxdGhHNzJBdk5aS3N4VmdBM0R5c2Z1VXZIUUN4SkkwNHpuWlk2ZXwzNDU4NzY0NTMwNjYwNzAyODIzfDI=?share_link_id=785020837509). +![Architecture overview](../assets/overview.jpg "Connections between nodes visualized") +![Department node overview](../assets/research_assets/node_path_ros.png "In- and outgoing topics for every node of the departments") ## Perception @@ -43,7 +45,7 @@ environment representation that can be used by the [Planning](#Planning) for fur Further information regarding the perception can be found [here](../perception/README.md). Research for the perception can be found [here](../research/perception/README.md). -### Obstacle Detection and Classification +### Vision Node Evaluates sensor data to detect and classify objects around the ego vehicle. Other road users and objects blocking the vehicle's path are recognized. @@ -52,17 +54,28 @@ In the case of dynamic objects, an attempt is made to recognize the direction an Subscriptions: -- ```radar``` ([sensor_msgs/PointCloud2](https://docs.ros.org/en/api/sensor_msgs/html/msg/PointCloud2.html)) - ```lidar``` ([sensor_msgs/PointCloud2](https://docs.ros.org/en/api/sensor_msgs/html/msg/PointCloud2.html)) - ```rgb_camera``` ([sensor_msgs/Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) - ````gnss```` ([sensor_msgs/NavSatFix](https://carla.readthedocs.io/projects/ros-bridge/en/latest/ros_sensors/)) - ```map``` ([std_msgs/String](https://docs.ros.org/en/api/std_msgs/html/msg/String.html)) +- ```radar``` ([sensor_msgs/PointCloud2](https://docs.ros.org/en/api/sensor_msgs/html/msg/PointCloud2.html)) SOON TO COME Publishes: - ```obstacles``` (Custom msg: obstacle ([vision_msgs/Detection3DArray Message](http://docs.ros.org/en/api/vision_msgs/html/msg/Detection3DArray.html)) and its classification ([std_msgs/String Message](http://docs.ros.org/en/noetic/api/std_msgs/html/msg/String.html))) +- ```segmented_image``` ([sensor_msgs/Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) + - /paf/hero/Center/segmented_image + - /paf/hero/Back/segmented_image + - /paf/hero/Left/segmented_image + - /paf/hero/Right/segmented_image +- ```segmented_traffic_light``` ([sensor_msgs/Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) +- ```object_distance``` ([sensor_msgs/Float32MultiArray](https://docs.ros.org/en/noetic/api/std_msgs/html/msg/Float32MultiArray.html)) + - /paf/hero/Center/object_distance + - /paf/hero/Back/object_distance + - /paf/hero/Left/object_distance + - /paf/hero/Right/object_distance ### Traffic Light Detection @@ -84,6 +97,14 @@ Publishes: position ([geometry_msgs/Pose Message](http://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/Pose.html)), distance_to_stop_line ([std_msgs/Float64 Message](http://docs.ros.org/en/api/std_msgs/html/msg/Float64.html))) +### Position Heading Node + +There are currently no planned improvements of the Position Heading Node. + +### Distance to Objects + +There are currently no planned improvements for Distance to Objects. + ### Localization Provides corrected accurate position, direction and speed of the ego vehicle diff --git a/doc/planning/Behavior_tree.md b/doc/planning/Behavior_tree.md index aae559df..bf78edb9 100644 --- a/doc/planning/Behavior_tree.md +++ b/doc/planning/Behavior_tree.md @@ -36,9 +36,9 @@ For visualization at runtime you might want to also install this [rqt-Plugin](ht ## Our behaviour tree The following section describes the behaviour tree we use for normal driving using all functionality provided by the agent. In the actual implementation this is part of a bigger tree, that handles things like writing topics to the blackboard, starting and finishing the decision tree. -The following tree is a simplification. +The following tree is a simplification. The draw.io xml file to update this diagram can be found inside /assets/planning/. -![Simple Tree](../assets/planning/simple_final_tree.png) +![Simple Tree](../assets/planning/behaviour_tree.PNG) ### Behavior diff --git a/doc/planning/README.md b/doc/planning/README.md index aff116ab..2fc268a0 100644 --- a/doc/planning/README.md +++ b/doc/planning/README.md @@ -25,7 +25,7 @@ The decision making collects most of the available information of the other comp the information. All possible traffic scenarios are covered in this component. The decision making uses a so called decision tree, which is easy to adapt and to expand. -![Simple Tree](../assets/planning/simple_final_tree.png) +![Simple Tree](../assets/planning/behaviour_tree.PNG) ### [Local Planning](./Local_Planning.md) diff --git a/doc/planning/RequestBehaviourChangeService.md b/doc/planning/RequestBehaviourChangeService.md new file mode 100644 index 00000000..fe0a3751 --- /dev/null +++ b/doc/planning/RequestBehaviourChangeService.md @@ -0,0 +1,26 @@ +# Request Behaviour Change Service + +This service is hosted in the node RequestBehaviourChangeService. + +Calling it requires a behaviour ID integer (based on the corresponding enum) and returns a bool depending on whether the request is granted. + +To use it, import RequestBehaviourChange from planning.srv. + +To call it, create a callable instance with: + +```python +rospy.wait_for_service('RequestBehaviourChange') + +name = rospy.ServiceProxy('RequestBehaviourChange', RequestBehaviourChange) +``` + +Then, you can just use this instance in a try setup: + +```python +try: + response = name(input) +except rospy.ServiceException as e: + # handle exception +``` + +For communication with the behaviour tree this node publishes a granted request to a topic behaviour_request. diff --git a/doc/planning/behaviours/Intersection.md b/doc/planning/behaviours/Intersection.md new file mode 100644 index 00000000..7d17dd90 --- /dev/null +++ b/doc/planning/behaviours/Intersection.md @@ -0,0 +1,47 @@ +# Intersection Behavior + +**Summary:** This file explains the Intersection behavior. + +- [Intersection Behavior](#intersection-behavior) + - [General](#general) + - [Intersection Ahead](#intersection-ahead) + - [Approach](#approach) + - [Wait](#wait) + - [Enter](#enter) + - [Leave](#leave) + +## General + +The Intersection behaviour is used to control the vehicle when it encounters a intersection. It handles stop signs as well as traffic lights. +The Intersection sequence consists of the sub-behaviours "Approach", "Wait", "Enter" and "Leave". + +To enter the Intersection sequence "Intersection ahead" must firstly be successful. + +## Intersection ahead + +Successful when there is a stop line within a set distance. + +## Approach + +Handles approaching the intersection by slowing down the vehicle. Returns RUNNING while still far from the intersection, SUCCESS when the vehicle has stopped or has already entered the intersection and FAILURE when the path is faulty. + +Calculates a virtual stopline based on whether a stopline or only a stop sign has been detected and publishes a distance to it. While the vehicle is not stopped at the virtual stopline nor has entered the intersection, int_app_to_stop is published as the current behaviour. +This is used inside motion_planning to calculate a stopping velocity. + +A green light is approached with 30 km/h. + +## Wait + +Handles wating at the stop line until the vehicle is allowed to drive. + +If the light is green or when there isn't a traffic light returns SUCCESS otherwise RUNNING. + +## Enter + +Handles driving through the intersection. Uses 50 km/h as target speed. + +Returns SUCCESS once the next waypoint is not this intersection anymore. + +## Leave + +Signifies that the vehicle has left the intersection and simply returns FAILURE to leave the Intersection sequence. diff --git a/doc/planning/behaviours/LaneChange.md b/doc/planning/behaviours/LaneChange.md new file mode 100644 index 00000000..341b40a4 --- /dev/null +++ b/doc/planning/behaviours/LaneChange.md @@ -0,0 +1,39 @@ +# Lane Change Behavior + +**Summary:** This file explains the Lane Change behavior. + +- [Lane Change Behavior](#lanechange-behavior) + - [General](#general) + - [Lane Change Ahead](#lanechange-ahead) + - [Approach](#approach) + - [Wait](#wait) + - [Enter](#enter) + - [Leave](#leave) + +## General + +This behaviour executes a lane change. It slows the vehicle down until the lane change point is reached and then proceeds to switch lanes. + +## Lane Change ahead + +Checks whether the next waypoint is a lane change and inititates the lane change sequence accordingly. + +## Approach + +Calculates a virtual stop line at the lane change point and publishes lc_app_blocked for motion planner to slow down while too far away. + +If the lane change is not blocked (currently not implemented) the car does not slow down (30 km/h). + +Once the car is within a set distance of the virtual stop line and not blocked it returns SUCCESS. SUCCESS is also returned when the car stops at the stop line. + +## Wait + +Waits at the lane change point until the lane change is not blocked (not implemented). + +## Enter + +Inititates the lane change with 20 km/h and continues driving on the next lane until the lane change waypoint is far enough away. + +## Leave + +Simply exits the behaviour. diff --git a/doc/planning/behaviours/LeaveParkingSpace.md b/doc/planning/behaviours/LeaveParkingSpace.md new file mode 100644 index 00000000..51192638 --- /dev/null +++ b/doc/planning/behaviours/LeaveParkingSpace.md @@ -0,0 +1,13 @@ +# Leave Parking Space Behavior + +**Summary:** This file explains the Leave Parking Space behavior. + +- [Leave Parking Space Behavior](#leaveparkingspace-behavior) + - [General](#general) + +## General + +The leave parking space behaviour is only executed at the beginning of the simulation to leave the parking space. + +The behaviour calculates the euclidian distance between the starting position and the current position to determine whether the car has fully left the parking space. If that is the case a "called" +flag is set to true so that this behaviour is never executed again and FAILURE is returned to end the behaviour. Otherwise it stays in RUNNING. diff --git a/doc/planning/behaviours/Overtake.md b/doc/planning/behaviours/Overtake.md new file mode 100644 index 00000000..3058995a --- /dev/null +++ b/doc/planning/behaviours/Overtake.md @@ -0,0 +1,44 @@ +# Overtake Behavior + +**Summary:** This file explains the Overtake behavior. + +- [General](#general) +- [Overtake ahead](#overtake-ahead) +- [Approach](#approach) +- [Wait](#wait) +- [Enter](#enter) +- [Leave](#leave) + +## General + +This behaviour is used to overtake an object in close proximity. This behaviour is currently not working and more like a initial prototype. + +## Overtake ahead + +Checks whether there is a object in front of the car that needs overtaking. + +Estimates whether the car would collide with the object soon. If that is the case a counter gets incremented. When that counter reaches 4 SUCCESS is returned. If the object is not blocking the trajectory, FAILURE is returned. + +## Approach + +This is running while the obstacle is still in front of the car. + +Checks whether the oncoming traffic is far away or clear, if that is the case then ot_app_free is published as the current behaviour for the motion_planner and returns SUCCESS. Otherwise ot_app_blocked is published for the car to slow down. + +If the car stops behind the obstacle SUCCESS is also returned. + +## Wait + +This handles wating for clear oncoming traffic if the car has stopped behind the obstacle. If the overtake is clear ot_wait_free gets published and SUCCESS is returned. Otherwise ot_wait_stopped gets published and the behaviour stays in RUNNING. + +If the obstacle in front is gone the behaviour is aborted with FAILURE. + +## Enter + +Handles switching the lane for overtaking. + +Waits for motion planner to finish the trajectory changes and for it to set the overtake_success flag. + +## Leave + +Runs until the overtake is fully finished and then leaves the behaviour. diff --git a/doc/planning/Unstuck_Behavior.md b/doc/planning/behaviours/Unstuck.md similarity index 100% rename from doc/planning/Unstuck_Behavior.md rename to doc/planning/behaviours/Unstuck.md diff --git a/doc/research/paf24/planning/ACC.md b/doc/research/paf24/planning/ACC.md new file mode 100644 index 00000000..366a02c5 --- /dev/null +++ b/doc/research/paf24/planning/ACC.md @@ -0,0 +1,115 @@ +# ACC + +This file gives a general overview on possible ACC implementations and reflects the current state of the ACC. + +## General + +The main goal of an ACC (Adaptive Cruise Control) is to follow a car driving in front while keeping a safe distance to it. This can be achieved by adjusting the speed to the speed of the car in front. +In general, classic ACC systems are designed for higher velocity (e.g. > 40 km/h). Apart from that, there are Stop & Go systems that support lower velocites (e.g. < 40 km/h). In our case, both systems might be needed and it might be reasonable to develop two different systems for ACC and Stop & Go. +The threshold to distinguish between the two systems has to be chosen reasonably. +There are basically three different techniques that can be used to implement an ACC: PID Control, Model Predictive Control and Fuzzy Logic Control. Another option is CACC (Cooperative Adaptive Cruise Control) but this is not relevant for our project since it requires communication between the vehicles. + +### PID Control + +The PID Controller consists of three terms: the proportional term, the integral term and the derivative term. One possible simple controller model looks as follows: + +$$ v_f(t) = v_f(t - t_s) + k_p e(t-t_s) + k_i \int_{0}^{t} e(\tau) d\tau + k_d \dot{e}(t - t_s) $$ +$$ e(t-t_s) = \Delta x(t - t_s) - t_{hw,d} v_f (t - t_s) $$ + +- $v_f$: follower vehicle velocity (transmitted to the acting component) +- $t_s$: sampling time +- $k_p$ and $k_i$ and $k_d$: coefficients for proportional, integral and derivative terms +- $e$: distance error (difference between actual distance $\Delta x$ and desired distance $\Delta x_d$) +- $t_{hw,d}$: desired time headway (duration between the arrival of the first car at a certain waypoint and the arrival of the following car at the same waypoint) + +### Model Predictive Control (MPC) + +It calculates the current control action by solving an online, iterative and finite-horizon optimization of the model. +Procedure: + +1. Prediction of future system states based on current states +2. Computation of the cost function for a finite time horizon in the future +3. Implementation of the first step of the solved control sequence +4. Application of the feedback control loop to compensate for the predictive error and model inaccuracy +5. Sampling of new current states and repitition of the process + +### Fuzzy Logic Control (FLC) + +Provides a unified control framework to offer both functions: ACC and Stop & Go. + +![Example of a FLC control algorithm showing input variables (relative distance, host vehicle speed), fuzzy rules processing, and output variable](../../../assets/research_assets/ACC_FLC_Example_1.PNG) + +## ACC in our project + +### Current implementation + +- General behaviour: + checks for obstacle distance < safety distance + + calculates new speed based on obstacle distance + + else keep current speed + +- publishes speed to acc_velocity +- safe distance calculation is currently not correct, uses speed + (speed * 0.36)² which results in wrong distances +- if car in front of us ignores speed limits we ignore them as well +- some parts of unstuck routine are in ACC and need to be refactored +- same goes for publishing of current waypoint, this should not be in ACC + +In summary, the current implementation is not sufficient and needs major refactoring. + +### Concept for new implementation + +The new concept for the ACC is to take the trajectory, look at all or a limited subset of the next points and add a target velocity and the current behaviour to each point. +This way the Acting has more knowledge about what the car is doing and can adjust accordingly in a local manner. +For this a new trajectory message type was implemented in #511. + +![trajectoryMsg](https://github.com/user-attachments/assets/0b452f1a-4c60-45b2-882f-3a50118c9cb9) + +Since behaviour is passed as an ID a new enum for behaviours was implemented in utils.py as well. + +The general idea for speeds above the 40 km/h mark is to calculate a proper safety distance, a general target velocity and velocity targets based on PID. For speeds lower than that a stop and go system can be discussed if it is really needed. + +For safety distance we would like to calculate it like FLC but that most likely needs some adjustments as setting the distance to 100m when there is no vehicle ahead seems unreasonable. +We can just directly set the velocity to the speed limit in that case. + +For a general speed target we either take the speed of the car in front or the speed limit, whichever is lower. In cases where the car in front is substantially slower than the speed limit ACC could inititate overtaking. + +Since we want to calculate the desired speed at each point of the trajectory, the way PID calculates velocity seems reasonable since we can treat the trajectory points as different points in time for the sampling time. +For example let's say we sample every fifth point and calculate the velocity for that, then we can just interpolate every other point inbetween. + +$v_f(t - t_s)$ would then simply be the velocity of the fifth point before the current one. Theoretically this allows us to dynamically adjust the sampling time as well if needed. + +For the distance error we can use the safety distance as the desired distance. The distance at time t needs to be predicted based on the (predicted) distance at the prior sample point and the calculated speed at the prior sample point. + +We calculate velocities like that up to the point where the actual distance is within 5% of the optimal safety distance. For points further than that we simply use the desired general speed. + +![accDiagram](https://github.com/user-attachments/assets/9a7b4572-f041-4da0-900c-51ab20d1904b) + +### Possible next steps + +A seperate file for the new ACC should be created to not disturb the system. + +The parts that might get cut from ACC like current waypoint and unstuck routine need to be evaluated for necessity and if need be moved to somewhere more fitting. + +Implement publisher for new message type. + +Start implementing safety distance and general target speed logic. Subscriber logic could be taken from old implementation. + +Implement PID logic + +### Requirements + +- obstacle speed +- obstacle distance + +## Discussion + +- How to test adaptations of the ACC? (Suggestion: Create test scenarios which represent different situations like driving straight forward behing a leading vehicle, no leading vehicle, sudden breaking, etc.) +- Which output should be transfered to the Acting component? (Suggestion: The desired speed is published in the new trajectory. No acceleration data is needed, the acceleration is handled by the acting component.) +- Which input do we get from the Perception component? (Suggestion: The distance and velocity of the car in front would be really helpful.) + +## Sources + +He, Yinglong et al. (2019). Adaptive Cruise Control Strategies Implemented on Experimental Vehicles: A Review. IFAC-PapersOnLine. 52. 21-27. 10.1016/j.ifacol.2019.09.004. +[Link](https://www.researchgate.net/publication/335934496_Adaptive_Cruise_Control_Strategies_Implemented_on_Experimental_Vehicles_A_Review)