diff --git a/code/agent/config/rviz_config.rviz b/code/agent/config/rviz_config.rviz index 3f32f23c..2e419cc2 100644 --- a/code/agent/config/rviz_config.rviz +++ b/code/agent/config/rviz_config.rviz @@ -69,6 +69,23 @@ Visualization Manager: PointCloud2: false Value: true Zoom Factor: 1 + - Class: rviz/Image + Enabled: true + Image Rendering: background and overlay + Image Topic: /paf/hero/Center/segmented_image + Name: VisonNode Output + Overlay Alpha: 0.5 + Queue Size: 2 + Transport Hint: raw + Unreliable: false + Value: true + Visibility: + Grid: true + Imu: true + Path: true + PointCloud2: true + Value: true + Zoom Factor: 1 - Alpha: 1 Class: rviz_plugin_tutorials/Imu Color: 204; 51; 204 diff --git a/code/perception/src/vision_node.py b/code/perception/src/vision_node.py index 519a968f..4897391c 100755 --- a/code/perception/src/vision_node.py +++ b/code/perception/src/vision_node.py @@ -19,7 +19,7 @@ from torchvision.utils import draw_bounding_boxes, draw_segmentation_masks import numpy as np from ultralytics import NAS, YOLO, RTDETR, SAM, FastSAM -# import rospy +import rospy """ VisionNode: @@ -214,7 +214,7 @@ def handle_camera_image(self, image): vision_result = self.predict_ultralytics(image) # publish image to rviz - """img_msg = self.bridge.cv2_to_imgmsg(vision_result, + img_msg = self.bridge.cv2_to_imgmsg(vision_result, encoding="rgb8") img_msg.header = image.header side = rospy.resolve_name(img_msg.header.frame_id).split('/')[2] @@ -225,9 +225,9 @@ def handle_camera_image(self, image): if side == "Left": self.publisher_left.publish(img_msg) if side == "Right": - self.publisher_right.publish(img_msg)""" + self.publisher_right.publish(img_msg) - locals().clear() + # locals().clear() # print(f"Published Image on Side: {side}") pass @@ -255,7 +255,7 @@ def handle_dist_array(self, dist_array): desired_encoding='passthrough') # print("RECEIVED DIST") self.dist_arrays = dist_array - locals().clear() + # locals().clear() def predict_torch(self, image): self.model.eval() @@ -332,10 +332,10 @@ def predict_ultralytics(self, image): # obj_dist4 = distances_copy[x4][y4].copy() # print(obj_dist1, obj_dist3) - """abs_distance = np.sqrt( + abs_distance = np.sqrt( obj_dist1[0]**2 + obj_dist1[1]**2 + - obj_dist1[2]**2)""" + obj_dist1[2]**2) # create 2d glass plane at object # with box dimension @@ -379,24 +379,23 @@ def predict_ultralytics(self, image): distance_output.append(float(np.inf))""" c_boxes.append(torch.tensor(pixels)) - # c_labels.append(f"Class: {cls}," - # f"Meters: {round(abs_distance, 2)}," - # f"({round(float(obj_dist1[0]), 2)}," - # f"{round(float(obj_dist1[1]), 2)}," - # f"{round(float(obj_dist1[2]), 2)})") + c_labels.append(f"Class: {cls}," + f"Meters: {round(abs_distance, 2)}," + f"({round(float(obj_dist1[0]), 2)}," + f"{round(float(obj_dist3[1]), 2)})") # print("DISTANCE_ARRAY: ", distance_output) self.distance_publisher.publish( Float32MultiArray(data=distance_output)) - """transposed_image = np.transpose(cv_image, (2, 0, 1)) + transposed_image = np.transpose(cv_image, (2, 0, 1)) image_np_with_detections = torch.tensor(transposed_image, - dtype=torch.uint8)""" + dtype=torch.uint8) if 9 in output[0].boxes.cls: self.process_traffic_lights(output[0], cv_image, image.header) - """c_boxes = torch.stack(c_boxes) + c_boxes = torch.stack(c_boxes) box = draw_bounding_boxes(image_np_with_detections, c_boxes, c_labels, @@ -405,9 +404,9 @@ def predict_ultralytics(self, image): font_size=12) np_box_img = np.transpose(box.detach().numpy(), (1, 2, 0)) - box_img = cv2.cvtColor(np_box_img, cv2.COLOR_BGR2RGB)""" - locals().clear() - # return box_img + box_img = cv2.cvtColor(np_box_img, cv2.COLOR_BGR2RGB) + # locals().clear() + return box_img # return output[0].plot() @@ -438,7 +437,7 @@ def process_traffic_lights(self, prediction, cv_image, image_header): traffic_light_image.header = image_header self.traffic_light_publisher.publish(traffic_light_image) - locals().clear() + # locals().clear() def create_mask(self, input_image, model_output): output_predictions = torch.argmax(model_output, dim=0) diff --git a/code/planning/src/local_planner/collision_check.py b/code/planning/src/local_planner/collision_check.py index a1d896d8..b7039f8d 100755 --- a/code/planning/src/local_planner/collision_check.py +++ b/code/planning/src/local_planner/collision_check.py @@ -183,7 +183,7 @@ def calculate_rule_of_thumb(emergency, speed): Returns: float: distance calculated with rule of thumb """ - reaction_distance = 0 + reaction_distance = speed * 0.36 braking_distance = (speed * 0.36)**2 if emergency: return reaction_distance + braking_distance / 2