Skip to content

Commit

Permalink
feat: Reactivate Segmentation Image Rviz for debugging
Browse files Browse the repository at this point in the history
  • Loading branch information
samuelkuehnel committed Mar 10, 2024
1 parent b7cd586 commit 96c3d50
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 20 deletions.
17 changes: 17 additions & 0 deletions code/agent/config/rviz_config.rviz
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,23 @@ Visualization Manager:
PointCloud2: false
Value: true
Zoom Factor: 1
- Class: rviz/Image
Enabled: true
Image Rendering: background and overlay
Image Topic: /paf/hero/Center/segmented_image
Name: VisonNode Output
Overlay Alpha: 0.5
Queue Size: 2
Transport Hint: raw
Unreliable: false
Value: true
Visibility:
Grid: true
Imu: true
Path: true
PointCloud2: true
Value: true
Zoom Factor: 1
- Alpha: 1
Class: rviz_plugin_tutorials/Imu
Color: 204; 51; 204
Expand Down
37 changes: 18 additions & 19 deletions code/perception/src/vision_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from torchvision.utils import draw_bounding_boxes, draw_segmentation_masks
import numpy as np
from ultralytics import NAS, YOLO, RTDETR, SAM, FastSAM
# import rospy
import rospy

"""
VisionNode:
Expand Down Expand Up @@ -214,7 +214,7 @@ def handle_camera_image(self, image):
vision_result = self.predict_ultralytics(image)

# publish image to rviz
"""img_msg = self.bridge.cv2_to_imgmsg(vision_result,
img_msg = self.bridge.cv2_to_imgmsg(vision_result,
encoding="rgb8")
img_msg.header = image.header
side = rospy.resolve_name(img_msg.header.frame_id).split('/')[2]
Expand All @@ -225,9 +225,9 @@ def handle_camera_image(self, image):
if side == "Left":
self.publisher_left.publish(img_msg)
if side == "Right":
self.publisher_right.publish(img_msg)"""
self.publisher_right.publish(img_msg)

locals().clear()
# locals().clear()
# print(f"Published Image on Side: {side}")
pass

Expand Down Expand Up @@ -255,7 +255,7 @@ def handle_dist_array(self, dist_array):
desired_encoding='passthrough')
# print("RECEIVED DIST")
self.dist_arrays = dist_array
locals().clear()
# locals().clear()

def predict_torch(self, image):
self.model.eval()
Expand Down Expand Up @@ -332,10 +332,10 @@ def predict_ultralytics(self, image):
# obj_dist4 = distances_copy[x4][y4].copy()
# print(obj_dist1, obj_dist3)

"""abs_distance = np.sqrt(
abs_distance = np.sqrt(
obj_dist1[0]**2 +
obj_dist1[1]**2 +
obj_dist1[2]**2)"""
obj_dist1[2]**2)

# create 2d glass plane at object
# with box dimension
Expand Down Expand Up @@ -379,24 +379,23 @@ def predict_ultralytics(self, image):
distance_output.append(float(np.inf))"""

c_boxes.append(torch.tensor(pixels))
# c_labels.append(f"Class: {cls},"
# f"Meters: {round(abs_distance, 2)},"
# f"({round(float(obj_dist1[0]), 2)},"
# f"{round(float(obj_dist1[1]), 2)},"
# f"{round(float(obj_dist1[2]), 2)})")
c_labels.append(f"Class: {cls},"
f"Meters: {round(abs_distance, 2)},"
f"({round(float(obj_dist1[0]), 2)},"
f"{round(float(obj_dist3[1]), 2)})")

# print("DISTANCE_ARRAY: ", distance_output)
self.distance_publisher.publish(
Float32MultiArray(data=distance_output))

"""transposed_image = np.transpose(cv_image, (2, 0, 1))
transposed_image = np.transpose(cv_image, (2, 0, 1))
image_np_with_detections = torch.tensor(transposed_image,
dtype=torch.uint8)"""
dtype=torch.uint8)

if 9 in output[0].boxes.cls:
self.process_traffic_lights(output[0], cv_image, image.header)

"""c_boxes = torch.stack(c_boxes)
c_boxes = torch.stack(c_boxes)
box = draw_bounding_boxes(image_np_with_detections,
c_boxes,
c_labels,
Expand All @@ -405,9 +404,9 @@ def predict_ultralytics(self, image):
font_size=12)
np_box_img = np.transpose(box.detach().numpy(),
(1, 2, 0))
box_img = cv2.cvtColor(np_box_img, cv2.COLOR_BGR2RGB)"""
locals().clear()
# return box_img
box_img = cv2.cvtColor(np_box_img, cv2.COLOR_BGR2RGB)
# locals().clear()
return box_img

# return output[0].plot()

Expand Down Expand Up @@ -438,7 +437,7 @@ def process_traffic_lights(self, prediction, cv_image, image_header):
traffic_light_image.header = image_header
self.traffic_light_publisher.publish(traffic_light_image)

locals().clear()
# locals().clear()

def create_mask(self, input_image, model_output):
output_predictions = torch.argmax(model_output, dim=0)
Expand Down
2 changes: 1 addition & 1 deletion code/planning/src/local_planner/collision_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def calculate_rule_of_thumb(emergency, speed):
Returns:
float: distance calculated with rule of thumb
"""
reaction_distance = 0
reaction_distance = speed * 0.36
braking_distance = (speed * 0.36)**2
if emergency:
return reaction_distance + braking_distance / 2
Expand Down

0 comments on commit 96c3d50

Please sign in to comment.