diff --git a/build/docker-compose.yml b/build/docker-compose.yml
index d32f2854..a44d725e 100644
--- a/build/docker-compose.yml
+++ b/build/docker-compose.yml
@@ -59,6 +59,7 @@ services:
#command: bash -c "sleep 10 && python3 /opt/leaderboard/leaderboard/leaderboard_evaluator.py --debug=0 --routes=/opt/leaderboard/data/routes_devtest.xml --agent=/opt/leaderboard/leaderboard/autoagents/npc_agent.py --host=carla-simulator --track=SENSORS"
# command: bash -c "sleep 10 && roslaunch agent/launch/dev.launch"
command: bash -c "sleep 10 && sudo chown -R carla:carla ../code/ && sudo chmod -R a+w ../code/ && python3 /opt/leaderboard/leaderboard/leaderboard_evaluator.py --debug=0 --routes=/opt/leaderboard/data/routes_devtest.xml --agent=/workspace/code/agent/src/agent/agent.py --host=carla-simulator --track=MAP"
+
logging:
driver: "local"
environment:
diff --git a/code/agent/config/rviz_config.rviz b/code/agent/config/rviz_config.rviz
index 7064cc88..5c10eca9 100644
--- a/code/agent/config/rviz_config.rviz
+++ b/code/agent/config/rviz_config.rviz
@@ -63,11 +63,11 @@ Visualization Manager:
Unreliable: false
Value: true
Visibility:
- Grid: true
- Imu: true
- Path: true
- PointCloud2: true
- Value: true
+ Grid: false
+ Imu: false
+ Path: false
+ PointCloud2: false
+ Value: false
Zoom Factor: 1
- Class: rviz/Image
Enabled: true
@@ -327,4 +327,4 @@ Window Geometry:
collapsed: false
Width: 2488
X: 1992
- Y: 27
+ Y: 27
\ No newline at end of file
diff --git a/code/perception/launch/perception.launch b/code/perception/launch/perception.launch
index 0a24ba5e..3adc596e 100644
--- a/code/perception/launch/perception.launch
+++ b/code/perception/launch/perception.launch
@@ -33,15 +33,30 @@
-
-
+ - deeplabv3_resnet101
+ - yolov8x-seg
+ -->
+
+
diff --git a/code/perception/src/traffic_light_detection/dataset.dvc b/code/perception/src/traffic_light_detection/dataset.dvc
new file mode 100644
index 00000000..73aa6bd3
--- /dev/null
+++ b/code/perception/src/traffic_light_detection/dataset.dvc
@@ -0,0 +1,6 @@
+outs:
+- md5: 3a559397ebc58c1ecf142dea18d03367.dir
+ size: 13745063
+ nfiles: 2723
+ hash: md5
+ path: dataset
diff --git a/code/perception/src/traffic_light_detection/dvc.lock b/code/perception/src/traffic_light_detection/dvc.lock
new file mode 100644
index 00000000..d9f625ce
--- /dev/null
+++ b/code/perception/src/traffic_light_detection/dvc.lock
@@ -0,0 +1,34 @@
+schema: '2.0'
+stages:
+ train:
+ cmd: python src/traffic_light_detection/traffic_light_training.py
+ deps:
+ - path: dataset
+ md5: 3a559397ebc58c1ecf142dea18d03367.dir
+ size: 13745063
+ nfiles: 2723
+ - path: src
+ hash: md5
+ md5: b6c9cb867c89ad6e86403d9c33538136.dir
+ size: 23777
+ nfiles: 10
+ params:
+ params.yaml:
+ train:
+ epochs: 100
+ batch_size: 32
+ outs:
+ - path: dvclive/metrics.json
+ hash: md5
+ md5: af33de699558fbfd3edee1607ba88f81
+ size: 218
+ - path: dvclive/plots
+ hash: md5
+ md5: 774919de9e9d6820ac6821d0819829c1.dir
+ size: 8900
+ nfiles: 4
+ - path: models
+ hash: md5
+ md5: ee67bac2f189d2cc5a199d91ba3295ac.dir
+ size: 10815
+ nfiles: 1
diff --git a/code/perception/src/traffic_light_detection/dvc.yaml b/code/perception/src/traffic_light_detection/dvc.yaml
new file mode 100644
index 00000000..d08afa7e
--- /dev/null
+++ b/code/perception/src/traffic_light_detection/dvc.yaml
@@ -0,0 +1,21 @@
+stages:
+ train:
+ cmd: python src/traffic_light_detection/traffic_light_training.py
+ deps:
+ - dataset
+ - src
+ params:
+ - params.yaml:
+ outs:
+ - models
+ metrics:
+ - dvclive/metrics.json:
+ cache: false
+ plots:
+ - dvclive/plots:
+ cache: false
+metrics:
+- dvclive/metrics.json
+plots:
+- dvclive/plots/metrics:
+ x: step
diff --git a/code/perception/src/traffic_light_detection/src/data_generation/transforms.py b/code/perception/src/traffic_light_detection/src/data_generation/transforms.py
index f1f992d2..41dc8bfc 100644
--- a/code/perception/src/traffic_light_detection/src/data_generation/transforms.py
+++ b/code/perception/src/traffic_light_detection/src/data_generation/transforms.py
@@ -28,7 +28,7 @@ def __call__(self, image):
image = torchvision.transforms.Pad((0, pad))(image)
else:
image = torchvision.transforms.Pad((pad, 0))(image)
- image = torchvision.transforms.Resize(self.size)(image)
+ image = torchvision.transforms.Resize(self.size, antialias=True)(image)
return image
diff --git a/code/perception/src/traffic_light_detection/src/traffic_light_detection/classification_model.py b/code/perception/src/traffic_light_detection/src/traffic_light_detection/classification_model.py
index 9c5fb339..f44d2c31 100644
--- a/code/perception/src/traffic_light_detection/src/traffic_light_detection/classification_model.py
+++ b/code/perception/src/traffic_light_detection/src/traffic_light_detection/classification_model.py
@@ -59,10 +59,10 @@ def load_model(cfg):
if path is not None:
try:
state_dict = torch.load(path)
- model.load_state_dict(state_dict).eval()
+ model.load_state_dict(state_dict)
print(f"Pretrained model loaded from {path}")
return model
- except (Exception, ):
- print(f"No pretrained model found at {path}. "
+ except Exception as e:
+ print(f"No pretrained model found at {path}: {e}\n"
f"Created new model with random weights.")
return model.eval()
diff --git a/code/perception/src/traffic_light_detection/src/traffic_light_detection/traffic_light_inference.py b/code/perception/src/traffic_light_detection/src/traffic_light_detection/traffic_light_inference.py
index 40dd4c24..4631660b 100644
--- a/code/perception/src/traffic_light_detection/src/traffic_light_detection/traffic_light_inference.py
+++ b/code/perception/src/traffic_light_detection/src/traffic_light_detection/traffic_light_inference.py
@@ -1,5 +1,4 @@
import argparse
-from pathlib import Path
import torch.cuda
import torchvision.transforms as t
@@ -24,6 +23,9 @@ def parse_args():
'model_acc_99.53_val_100.0.pt',
help='path to pretrained model',
type=str)
+ parser.add_argument('--image', default=None,
+ help='/dataset/val/green/green_83.png',
+ type=str)
return parser.parse_args()
@@ -66,8 +68,7 @@ def __call__(self, img):
# main function for testing purposes
if __name__ == '__main__':
args = parse_args()
- image_path = str(Path(__file__).resolve().parents[2].resolve())
- image_path += "/dataset/val/green/green_83.png"
+ image_path = args.image
image = load_image(image_path)
classifier = TrafficLightInference(args.model)
pred = classifier(image)
diff --git a/code/perception/src/vision_node.py b/code/perception/src/vision_node.py
index 726ea4de..d736253a 100755
--- a/code/perception/src/vision_node.py
+++ b/code/perception/src/vision_node.py
@@ -19,6 +19,7 @@
from torchvision.utils import draw_bounding_boxes, draw_segmentation_masks
import numpy as np
from time import perf_counter
+from ultralytics import NAS, YOLO, RTDETR, SAM, FastSAM
"""
VisionNode:
@@ -55,17 +56,32 @@ def __init__(self, name, **kwargs):
weights=DeepLabV3_ResNet101_Weights.DEFAULT),
DeepLabV3_ResNet101_Weights.DEFAULT,
"segmentation",
- "pyTorch")
+ "pyTorch"),
+ 'yolov8n': (YOLO, "yolov8n.pt", "detection", "ultralytics"),
+ 'yolov8s': (YOLO, "yolov8s.pt", "detection", "ultralytics"),
+ 'yolov8m': (YOLO, "yolov8m.pt", "detection", "ultralytics"),
+ 'yolov8l': (YOLO, "yolov8l.pt", "detection", "ultralytics"),
+ 'yolov8x': (YOLO, "yolov8x.pt", "detection", "ultralytics"),
+ 'yolo_nas_l': (NAS, "yolo_nas_l.pt", "detection", "ultralytics"),
+ 'yolo_nas_m': (NAS, "yolo_nas_m.pt", "detection", "ultralytics"),
+ 'yolo_nas_s': (NAS, "yolo_nas_s.pt", "detection", "ultralytics"),
+ 'rtdetr-l': (RTDETR, "rtdetr-l.pt", "detection", "ultralytics"),
+ 'rtdetr-x': (RTDETR, "rtdetr-x.pt", "detection", "ultralytics"),
+ 'yolov8x-seg': (YOLO, "yolov8x-seg.pt", "segmentation",
+ "ultralytics"),
+ 'sam_l': (SAM, "sam_l.pt", "detection", "ultralytics"),
+ 'FastSAM-x': (FastSAM, "FastSAM-x.pt", "detection", "ultralytics"),
+
}
+ print(torch.__version__)
+
# general setup
self.bridge = CvBridge()
self.role_name = self.get_param("role_name", "hero")
self.side = self.get_param("side", "Center")
- # self.device = torch.device("cuda"
- # if torch.cuda.is_available() else "cpu") Cuda Memory Issues
- self.device = torch.device("cpu")
- print("VisionNode working on: ", self.device)
+ self.device = torch.device("cuda"
+ if torch.cuda.is_available() else "cpu")
# publish / subscribe setup
self.setup_camera_subscriptions()
@@ -80,9 +96,22 @@ def __init__(self, name, **kwargs):
self.type = model_info[2]
self.framework = model_info[3]
print("Vision Node Configuration:")
+ print("Device -> ", self.device)
print(f"Model -> {self.get_param('model')},")
print(f"Type -> {self.type}, Framework -> {self.framework}")
- self.model.to(self.device)
+ torch.cuda.memory.set_per_process_memory_fraction(0.1)
+
+ # pyTorch and CUDA setup
+ if self.framework == "pyTorch":
+ for param in self.model.parameters():
+ param.requires_grad = False
+ self.model.to(self.device)
+
+ # ultralytics setup
+ if self.framework == "ultralytics":
+ self.model = self.model(self.weights)
+
+ # tensorflow setup
def setup_camera_subscriptions(self):
self.new_subscription(
@@ -101,6 +130,30 @@ def setup_camera_publishers(self):
def handle_camera_image(self, image):
startTime = perf_counter()
+
+ # free up cuda memory
+ if self.device == "cuda":
+ torch.cuda.empty_cache()
+
+ print("Before Model: ", perf_counter() - startTime)
+
+ if self.framework == "pyTorch":
+ vision_result = self.predict_torch(image)
+
+ if self.framework == "ultralytics":
+ vision_result = self.predict_ultralytics(image)
+
+ print("After Model: ", perf_counter() - startTime)
+
+ # publish image to rviz
+ img_msg = self.bridge.cv2_to_imgmsg(vision_result,
+ encoding="passthrough")
+ img_msg.header = image.header
+ self.publisher.publish(img_msg)
+
+ pass
+
+ def predict_torch(self, image):
self.model.eval()
cv_image = self.bridge.imgmsg_to_cv2(img_msg=image,
desired_encoding='passthrough')
@@ -114,39 +167,41 @@ def handle_camera_image(self, image):
input_image = preprocess(cv_image).unsqueeze(dim=0)
input_image = input_image.to(self.device)
- print("Before Model: ", perf_counter() - startTime)
prediction = self.model(input_image)
- print("After Model: ", perf_counter() - startTime)
+
if (self.type == "detection"):
vision_result = self.apply_bounding_boxes(cv_image, prediction[0])
if (self.type == "segmentation"):
vision_result = self.create_mask(cv_image, prediction['out'])
- img_msg = self.bridge.cv2_to_imgmsg(vision_result,
- encoding="passthrough")
- img_msg.header = image.header
+ return vision_result
- self.publisher.publish(img_msg)
- print("After Publish: ", perf_counter() - startTime)
+ def predict_ultralytics(self, image):
+ cv_image = self.bridge.imgmsg_to_cv2(img_msg=image,
+ desired_encoding='passthrough')
+ cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)
+ print(cv_image.shape)
- pass
+ output = self.model(cv_image)
+
+ return output[0].plot()
def create_mask(self, input_image, model_output):
output_predictions = torch.argmax(model_output, dim=0)
-
for i in range(21):
output_predictions[i] = output_predictions[i] == i
output_predictions = output_predictions.to(dtype=torch.bool)
- input_image = t.ToTensor()(input_image)
- input_image = input_image.to(dtype=torch.uint8)
print(output_predictions.shape)
- print(input_image.shape)
- segmented_image = draw_segmentation_masks(input_image,
- output_predictions)
- cv_segmented = cv2.cvtColor(segmented_image.detach().numpy(),
- cv2.COLOR_BGR2RGB)
+ transposed_image = np.transpose(input_image, (2, 0, 1))
+ tensor_image = torch.tensor(transposed_image)
+ tensor_image = tensor_image.to(dtype=torch.uint8)
+ segmented_image = draw_segmentation_masks(tensor_image,
+ output_predictions,
+ alpha=0.6)
+ cv_segmented = segmented_image.detach().cpu().numpy()
+ cv_segmented = np.transpose(cv_segmented, (1, 2, 0))
return cv_segmented
def apply_bounding_boxes(self, input_image, model_output):
diff --git a/code/planning/behavior_agent/src/behavior_agent/behavior_tree.py b/code/planning/behavior_agent/src/behavior_agent/behavior_tree.py
index 4df7e287..1ac702c1 100755
--- a/code/planning/behavior_agent/src/behavior_agent/behavior_tree.py
+++ b/code/planning/behavior_agent/src/behavior_agent/behavior_tree.py
@@ -61,47 +61,7 @@ def grow_a_tree(role_name):
("Leave Change")
])
]),
- Inverter(Selector("Overtaking", children=[
- behaviours.traffic_objects.NotSlowedByCarInFront
- ("Not Slowed By Car in Front?"),
- Selector("Number of Lanes", children=[
- Sequence("Multi Lane", children=[
- behaviours.road_features.MultiLane("Multi Lane?"),
- behaviours.road_features.LeftLaneAvailable
- ("Left Lane Available?"),
- behaviours.traffic_objects.WaitLeftLaneFree
- ("Wait for Left Lane Free"),
- behaviours.maneuvers.SwitchLaneLeft("Switch Lane Left")
- ]),
- Sequence("Single Lane", children=[
- behaviours.road_features.SingleLineDotted
- ("Single Lane with dotted Line?"),
- behaviours.traffic_objects.WaitLeftLaneFree
- ("Wait for Left Lane Free"),
- behaviours.maneuvers.SwitchLaneLeft
- ("Switch Lane Left"),
- Selector("Driving on Left Side", children=[
- Sequence("Overtake", children=[
- behaviours.traffic_objects.OvertakingPossible
- ("Overtaking Possible?"),
- behaviours.maneuvers.Overtake("Overtake"),
- behaviours.maneuvers.SwitchLaneRight
- ("Switch Lane Right")
- ]),
- behaviours.maneuvers.SwitchLaneRight("Switch Lane Right")
- ])
- ])
- ]),
- Running("Can't Overtake")
- ])),
- Sequence("Back to Right Lane", children=[
- behaviours.road_features.RightLaneAvailable("Right Lane Available"),
- behaviours.traffic_objects.NotSlowedByCarInFrontRight
- ("Not Slowed By Car in Front Right?"),
- behaviours.traffic_objects.
- WaitRightLaneFree("Wait for Right Lane Free"),
- behaviours.maneuvers.SwitchLaneRight("Switch Lane Right")
- ])
+
]),
behaviours.maneuvers.Cruise("Cruise")
])
diff --git a/code/planning/behavior_agent/src/behavior_agent/behaviours/behavior_speed.py b/code/planning/behavior_agent/src/behavior_agent/behaviours/behavior_speed.py
new file mode 100755
index 00000000..4eb9cf47
--- /dev/null
+++ b/code/planning/behavior_agent/src/behavior_agent/behaviours/behavior_speed.py
@@ -0,0 +1,66 @@
+
+from collections import namedtuple
+
+
+def convert_to_ms(speed):
+ return speed / 3.6
+
+
+Behavior = namedtuple("Behavior", ("name", "speed"))
+
+# Changed target_speed_pub to curr_behavior_pub
+
+# Intersection - Behaviors
+
+# Approach
+
+int_app_init = Behavior("int_app_init", convert_to_ms(30.0))
+
+# No Traffic Light or Sign -> stop dynamically at Stopline
+int_app_no_sign = Behavior("int_app_no_sign", -2)
+
+int_app_green = Behavior("int_app_green", convert_to_ms(30.0))
+
+# Wait
+
+int_wait = Behavior("int_wait", 0)
+
+# Enter
+
+int_enter_no_light = Behavior("int_enter_no_light", convert_to_ms(50.0))
+
+int_enter_empty_str = Behavior("int_enter_empty_string", convert_to_ms(18.0))
+
+int_enter_light = Behavior("int_enter_light", convert_to_ms(50.0))
+
+# Exit
+
+int_exit = Behavior("int_exit", -1) # Get SpeedLimit dynamically
+
+
+# Lane Change
+
+# Approach
+
+lc_app_init = Behavior("lc_app_blocked", convert_to_ms(30.0))
+
+
+# TODO: Find out purpose of v_stop in lane_change (lines: 105 - 128)
+lc_app_blocked = Behavior("lc_app_blocked", 0.5)
+
+# Wait
+
+# Has a publisher but doesnt publish anything ??
+
+# Enter
+
+lc_enter_init = Behavior("lc_enter_init", convert_to_ms(20.0))
+
+# Exit
+
+lc_exit = Behavior("lc_exit", -1) # Get SpeedLimit dynamically
+
+
+# Cruise
+
+cruise = Behavior("Cruise", -1)
diff --git a/code/planning/behavior_agent/src/behavior_agent/behaviours/intersection.py b/code/planning/behavior_agent/src/behavior_agent/behaviours/intersection.py
index b8d7e8db..22697f27 100755
--- a/code/planning/behavior_agent/src/behavior_agent/behaviours/intersection.py
+++ b/code/planning/behavior_agent/src/behavior_agent/behaviours/intersection.py
@@ -1,9 +1,11 @@
import py_trees
import numpy as np
-from std_msgs.msg import Float32
+from std_msgs.msg import String
import rospy
+from .import behavior_speed as bs
+
"""
Source: https://github.com/ll7/psaf2
"""
@@ -41,9 +43,9 @@ def setup(self, timeout):
successful
:return: True, as the set up is successful.
"""
- self.target_speed_pub = rospy.Publisher("/paf/hero/"
- "max_tree_velocity",
- Float32, queue_size=1)
+ self.curr_behavior_pub = rospy.Publisher("/paf/hero/"
+ "curr_behavior",
+ String, queue_size=1)
self.blackboard = py_trees.blackboard.Blackboard()
return True
@@ -66,7 +68,7 @@ def initialise(self):
self.traffic_light_distance = np.inf
self.traffic_light_status = ''
self.virtual_stopline_distance = np.inf
- self.target_speed_pub.publish(convert_to_ms(30.0))
+ self.curr_behavior_pub.publish(bs.int_app_init.name)
self.last_virtual_distance = np.inf
def update(self):
@@ -117,16 +119,7 @@ def update(self):
self.virtual_stopline_distance = 0.0
rospy.loginfo(f"Stopline distance: {self.virtual_stopline_distance}")
-
target_distance = 3.0
- # calculate speed needed for stopping
- v_stop = max(convert_to_ms(10.),
- convert_to_ms((self.virtual_stopline_distance / 30)
- * 50))
- if v_stop > convert_to_ms(50.0):
- v_stop = convert_to_ms(50.0)
- if self.virtual_stopline_distance < target_distance:
- v_stop = 0.0
# stop when there is no or red/yellow traffic light or a stop sign is
# detected
if self.traffic_light_status == '' \
@@ -135,13 +128,13 @@ def update(self):
or (self.stop_sign_detected and
not self.traffic_light_detected):
- rospy.loginfo(f"slowing down: {v_stop}")
- self.target_speed_pub.publish(v_stop)
+ rospy.loginfo("slowing down!")
+ self.curr_behavior_pub.publish(bs.int_app_no_sign.name)
# approach slowly when traffic light is green as traffic lights are
# higher priority than traffic signs this behavior is desired
if self.traffic_light_status == 'green':
- self.target_speed_pub.publish(convert_to_ms(30))
+ self.curr_behavior_pub.publish(bs.int_app_green.name)
# get speed
speedometer = self.blackboard.get("/carla/hero/Speed")
@@ -224,9 +217,9 @@ def setup(self, timeout):
successful
:return: True, as the set up is successful.
"""
- self.target_speed_pub = rospy.Publisher("/paf/hero/"
- "max_tree_velocity", Float32,
- queue_size=1)
+ self.curr_behavior_pub = rospy.Publisher("/paf/hero/"
+ "curr_behavior", String,
+ queue_size=1)
self.blackboard = py_trees.blackboard.Blackboard()
return True
@@ -279,7 +272,7 @@ def update(self):
if traffic_light_status == "red" or \
traffic_light_status == "yellow":
rospy.loginfo(f"Light Status: {traffic_light_status}")
- self.target_speed_pub.publish(0)
+ self.curr_behavior_pub.publish(bs.int_wait.name)
return py_trees.common.Status.RUNNING
else:
rospy.loginfo(f"Light Status: {traffic_light_status}")
@@ -333,9 +326,9 @@ def setup(self, timeout):
successful
:return: True, as the set up is successful.
"""
- self.target_speed_pub = rospy.Publisher("/paf/hero/"
- "max_tree_velocity", Float32,
- queue_size=1)
+ self.curr_behavior_pub = rospy.Publisher("/paf/hero/"
+ "curr_behavior", String,
+ queue_size=1)
self.blackboard = py_trees.blackboard.Blackboard()
return True
@@ -352,16 +345,16 @@ def initialise(self):
rospy.loginfo("Enter Intersection")
light_status_msg = self.blackboard.get("/paf/hero/traffic_light")
if light_status_msg is None:
- self.target_speed_pub.publish(convert_to_ms(50.0))
+ self.curr_behavior_pub.publish(bs.int_enter_no_light.name)
return True
else:
traffic_light_status = light_status_msg.color
if traffic_light_status == "":
- self.target_speed_pub.publish(convert_to_ms(18.0))
+ self.curr_behavior_pub.publish(bs.int_enter_empty_str.name)
else:
rospy.loginfo(f"Light Status: {traffic_light_status}")
- self.target_speed_pub.publish(convert_to_ms(50.0))
+ self.curr_behavior_pub.publish(bs.int_enter_light.name)
def update(self):
"""
@@ -433,9 +426,9 @@ def setup(self, timeout):
successful
:return: True, as the set up is successful.
"""
- self.target_speed_pub = rospy.Publisher("/paf/hero/"
- "max_tree_velocity", Float32,
- queue_size=1)
+ self.curr_behavior_pub = rospy.Publisher("/paf/hero/"
+ "curr_behavior", String,
+ queue_size=1)
self.blackboard = py_trees.blackboard.Blackboard()
return True
@@ -453,16 +446,15 @@ def initialise(self):
rospy.loginfo("Leave Intersection")
street_speed_msg = self.blackboard.get("/paf/hero/speed_limit")
if street_speed_msg is not None:
- self.target_speed_pub.publish(street_speed_msg.data)
+ # self.curr_behavior_pub.publish(street_speed_msg.data)
+ self.curr_behavior_pub.publish(bs.int_exit.name)
return True
def update(self):
"""
When is this called?
Every time your behaviour is ticked.
- What to do here?
- - Triggering, checking, monitoring. Anything...but do not block!
- - Set a feedback message
+ What to do here?exit_int
- return a py_trees.common.Status.[RUNNING, SUCCESS, FAILURE]
Abort this subtree
:return: py_trees.common.Status.FAILURE, to exit this subtree
diff --git a/code/planning/behavior_agent/src/behavior_agent/behaviours/lane_change.py b/code/planning/behavior_agent/src/behavior_agent/behaviours/lane_change.py
index d4b84d19..4433cc2e 100644
--- a/code/planning/behavior_agent/src/behavior_agent/behaviours/lane_change.py
+++ b/code/planning/behavior_agent/src/behavior_agent/behaviours/lane_change.py
@@ -1,11 +1,13 @@
import py_trees
import numpy as np
-from std_msgs.msg import Float32
+from std_msgs.msg import String
# from nav_msgs.msg import Odometry
# from custom_carla_msgs.srv import UpdateLocalPath
import rospy
+from . import behavior_speed as bs
+
"""
Source: https://github.com/ll7/psaf2
"""
@@ -43,9 +45,9 @@ def setup(self, timeout):
successful
:return: True, as the set up is successful.
"""
- self.target_speed_pub = rospy.Publisher("/paf/hero/"
- "max_tree_velocity",
- Float32, queue_size=1)
+ self.curr_behavior_pub = rospy.Publisher("/paf/hero/"
+ "curr_behavior",
+ String, queue_size=1)
# rospy.wait_for_service('update_local_path') # TODO is this necessary?
# self.update_local_path =
# rospy.ServiceProxy("update_local_path", UpdateLocalPath)
@@ -68,7 +70,7 @@ def initialise(self):
self.change_detected = False
self.change_distance = np.inf
self.virtual_change_distance = np.inf
- self.target_speed_pub.publish(convert_to_ms(30.0))
+ self.curr_behavior_pub.publish(bs.lc_init.name)
def update(self):
"""
@@ -98,12 +100,6 @@ def update(self):
if self.change_distance != np.inf and self.change_detected:
self.virtual_change_distance = self.change_distance
- # calculate speed needed for stopping
- v_stop = max(convert_to_ms(5.),
- convert_to_ms((self.virtual_change_distance / 30) ** 1.5
- * 50))
- if v_stop > convert_to_ms(50.0):
- v_stop = convert_to_ms(30.0)
# slow down before lane change
if self.virtual_change_distance < 15.0:
if self.change_option == 5:
@@ -120,9 +116,8 @@ def update(self):
# self.update_local_path(leave_intersection=True)
return py_trees.common.Status.SUCCESS
else:
- v_stop = 0.5
- rospy.loginfo(f"Change blocked slowing down: {v_stop}")
- self.target_speed_pub.publish(v_stop)
+ rospy.loginfo("Change blocked slowing down")
+ self.curr_behavior_pub.publish(bs.lc_app_blocked.name)
# get speed
speedometer = self.blackboard.get("/carla/hero/Speed")
@@ -194,9 +189,9 @@ def setup(self, timeout):
successful
:return: True, as the set up is successful.
"""
- self.target_speed_pub = rospy.Publisher("/paf/hero/"
- "max_tree_velocity", Float32,
- queue_size=1)
+ self.curr_behavior_pub = rospy.Publisher("/paf/hero/"
+ "curr_behavior", String,
+ queue_size=1)
self.blackboard = py_trees.blackboard.Blackboard()
return True
@@ -303,9 +298,9 @@ def setup(self, timeout):
successful
:return: True, as the set up is successful.
"""
- self.target_speed_pub = rospy.Publisher("/paf/hero/"
- "max_tree_velocity", Float32,
- queue_size=1)
+ self.curr_behavior_pub = rospy.Publisher("/paf/hero/"
+ "curr_behavior", String,
+ queue_size=1)
# rospy.wait_for_service('update_local_path')
# self.update_local_path = rospy.ServiceProxy("update_local_path",
# UpdateLocalPath)
@@ -323,7 +318,7 @@ def initialise(self):
the intersection.
"""
rospy.loginfo("Enter next Lane")
- self.target_speed_pub.publish(convert_to_ms(20.0))
+ self.curr_behavior_pub.publish(bs.lc_enter_init.name)
def update(self):
"""
@@ -396,9 +391,9 @@ def setup(self, timeout):
successful
:return: True, as the set up is successful.
"""
- self.target_speed_pub = rospy.Publisher("/paf/hero/"
- "max_tree_velocity", Float32,
- queue_size=1)
+ self.curr_behavior_pub = rospy.Publisher("/paf/hero/"
+ "curr_behavior", String,
+ queue_size=1)
self.blackboard = py_trees.blackboard.Blackboard()
return True
@@ -415,7 +410,8 @@ def initialise(self):
rospy.loginfo("Leave Change")
street_speed_msg = self.blackboard.get("/paf/hero/speed_limit")
if street_speed_msg is not None:
- self.target_speed_pub.publish(street_speed_msg.data)
+ # self.curr_behavior_pub.publish(street_speed_msg.data)
+ self.curr_behavior_pub.publish(bs.lc_exit.name)
return True
def update(self):
diff --git a/code/planning/behavior_agent/src/behavior_agent/behaviours/maneuvers.py b/code/planning/behavior_agent/src/behavior_agent/behaviours/maneuvers.py
index ce672563..5a17c25a 100755
--- a/code/planning/behavior_agent/src/behavior_agent/behaviours/maneuvers.py
+++ b/code/planning/behavior_agent/src/behavior_agent/behaviours/maneuvers.py
@@ -1,4 +1,9 @@
import py_trees
+import rospy
+from std_msgs.msg import String
+
+from . import behavior_speed as bs
+# from behavior_agent.msg import BehaviorSpeed
"""
Source: https://github.com/ll7/psaf2
@@ -264,6 +269,7 @@ def __init__(self, name):
:param name: name of the behaviour
"""
super(Cruise, self).__init__(name)
+ rospy.loginfo("Cruise started")
def setup(self, timeout):
"""
@@ -277,6 +283,11 @@ def setup(self, timeout):
successful
:return: True, as there is nothing to set up.
"""
+
+ self.curr_behavior_pub = rospy.Publisher("/paf/hero/"
+ "curr_behavior",
+ String, queue_size=1)
+
self.blackboard = py_trees.blackboard.Blackboard()
return True
@@ -290,6 +301,7 @@ def initialise(self):
Any initialisation you need before putting your behaviour to work.
:return: True
"""
+ rospy.loginfo("Starting Cruise")
return True
def update(self):
@@ -308,6 +320,7 @@ def update(self):
:return: py_trees.common.Status.RUNNING, keeps the decision tree from
finishing
"""
+ self.curr_behavior_pub.publish(bs.cruise.name)
return py_trees.common.Status.RUNNING
def terminate(self, new_status):
diff --git a/code/planning/local_planner/launch/local_planner.launch b/code/planning/local_planner/launch/local_planner.launch
index 140322e9..5fb371a5 100644
--- a/code/planning/local_planner/launch/local_planner.launch
+++ b/code/planning/local_planner/launch/local_planner.launch
@@ -11,4 +11,8 @@
-->
+
+
+
+
diff --git a/code/planning/local_planner/src/behavior_speed.py b/code/planning/local_planner/src/behavior_speed.py
new file mode 100755
index 00000000..4eb9cf47
--- /dev/null
+++ b/code/planning/local_planner/src/behavior_speed.py
@@ -0,0 +1,66 @@
+
+from collections import namedtuple
+
+
+def convert_to_ms(speed):
+ return speed / 3.6
+
+
+Behavior = namedtuple("Behavior", ("name", "speed"))
+
+# Changed target_speed_pub to curr_behavior_pub
+
+# Intersection - Behaviors
+
+# Approach
+
+int_app_init = Behavior("int_app_init", convert_to_ms(30.0))
+
+# No Traffic Light or Sign -> stop dynamically at Stopline
+int_app_no_sign = Behavior("int_app_no_sign", -2)
+
+int_app_green = Behavior("int_app_green", convert_to_ms(30.0))
+
+# Wait
+
+int_wait = Behavior("int_wait", 0)
+
+# Enter
+
+int_enter_no_light = Behavior("int_enter_no_light", convert_to_ms(50.0))
+
+int_enter_empty_str = Behavior("int_enter_empty_string", convert_to_ms(18.0))
+
+int_enter_light = Behavior("int_enter_light", convert_to_ms(50.0))
+
+# Exit
+
+int_exit = Behavior("int_exit", -1) # Get SpeedLimit dynamically
+
+
+# Lane Change
+
+# Approach
+
+lc_app_init = Behavior("lc_app_blocked", convert_to_ms(30.0))
+
+
+# TODO: Find out purpose of v_stop in lane_change (lines: 105 - 128)
+lc_app_blocked = Behavior("lc_app_blocked", 0.5)
+
+# Wait
+
+# Has a publisher but doesnt publish anything ??
+
+# Enter
+
+lc_enter_init = Behavior("lc_enter_init", convert_to_ms(20.0))
+
+# Exit
+
+lc_exit = Behavior("lc_exit", -1) # Get SpeedLimit dynamically
+
+
+# Cruise
+
+cruise = Behavior("Cruise", -1)
diff --git a/code/planning/local_planner/src/motion_planning.py b/code/planning/local_planner/src/motion_planning.py
new file mode 100755
index 00000000..7b1e5982
--- /dev/null
+++ b/code/planning/local_planner/src/motion_planning.py
@@ -0,0 +1,207 @@
+#!/usr/bin/env python
+# import rospy
+# import tf.transformations
+import ros_compatibility as roscomp
+from ros_compatibility.node import CompatibleNode
+from rospy import Publisher, Subscriber
+from std_msgs.msg import String, Float32
+import numpy as np
+
+# from behavior_agent.msg import BehaviorSpeed
+from perception.msg import Waypoint, LaneChange
+import behavior_speed as bs
+
+# from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion
+# from carla_msgs.msg import CarlaRoute # , CarlaWorldInfo
+# from nav_msgs.msg import Path
+# from std_msgs.msg import String
+# from std_msgs.msg import Float32MultiArray
+
+
+def convert_to_ms(speed):
+ return speed / 3.6
+
+
+class MotionPlanning(CompatibleNode):
+ """
+ This node selects speeds according to the behavior in the Decision Tree
+ and the ACC.
+ Later this Node should compute a local Trajectory and forward
+ it to the Acting.
+ """
+
+ def __init__(self):
+ super(MotionPlanning, self).__init__('MotionPlanning')
+ self.role_name = self.get_param("role_name", "hero")
+ self.control_loop_rate = self.get_param("control_loop_rate", 0.5)
+ self.logdebug("MotionPlanning started")
+
+ self.target_speed = 0.0
+ self.__curr_behavior = None
+ self.__acc_speed = 0.0
+ self.__stopline = None # (Distance, isStopline)
+ self.__change_point = None # (Distance, isLaneChange, roadOption)
+
+ # Subscriber
+ self.curr_behavior_sub: Subscriber = self.new_subscription(
+ String,
+ f"/paf/{self.role_name}/curr_behavior",
+ self.__set_curr_behavior,
+ qos_profile=1)
+
+ self.acc_sub: Subscriber = self.new_subscription(
+ Float32,
+ f"/paf/{self.role_name}/acc_velocity",
+ self.__set_acc_speed,
+ qos_profile=1)
+
+ self.stopline_sub: Subscriber = self.new_subscription(
+ Waypoint,
+ f"/paf/{self.role_name}/waypoint_distance",
+ self.__set_stopline,
+ qos_profile=1)
+
+ self.change_point_sub: Subscriber = self.new_subscription(
+ LaneChange,
+ f"/paf/{self.role_name}/lane_change_distance",
+ self.__set_change_point,
+ qos_profile=1)
+
+ # Publisher
+ self.velocity_pub: Publisher = self.new_publisher(
+ Float32,
+ f"/paf/{self.role_name}/target_velocity",
+ qos_profile=1)
+
+ def update_target_speed(self, acc_speed, behavior):
+ be_speed = self.get_speed_by_behavior(behavior)
+
+ self.target_speed = min(be_speed, acc_speed)
+ self.velocity_pub.publish(self.target_speed)
+
+ def __set_acc_speed(self, data: Float32):
+ self.__acc_speed = data.data
+
+ def __set_curr_behavior(self, data: String):
+ self.__curr_behavior = data.data
+
+ def __set_stopline(self, data: Waypoint) -> float:
+ if data is not None:
+ self.__stopline = (data.distance, data.isStopLine)
+
+ def __set_change_point(self, data: LaneChange):
+ if data is not None:
+ self.__change_point = \
+ (data.distance, data.isLaneChange, data.roadOption)
+
+ def get_speed_by_behavior(self, behavior: str) -> float:
+ speed = 0.0
+ split = "_"
+ short_behavior = behavior.partition(split)[0]
+ if short_behavior == "int":
+ speed = self.__get_speed_intersection(behavior)
+ elif short_behavior == "lc":
+ speed = self.__get_speed_lanechange(behavior)
+ else:
+ speed = self.__get_speed_cruise()
+
+ return speed
+
+ def __get_speed_intersection(self, behavior: str) -> float:
+ speed = 0.0
+ if behavior == bs.int_app_init.name:
+ speed = bs.int_app_init.speed
+ elif behavior == bs.int_app_green.name:
+ speed = bs.int_app_green.speed
+ elif behavior == bs.int_app_no_sign.name:
+ speed = self.__calc_speed_to_stop_intersection()
+ elif behavior == bs.int_wait.name:
+ speed == bs.int_wait.speed
+ elif behavior == bs.int_enter_no_light:
+ speed = bs.int_enter_no_light.speed
+ elif behavior == bs.int_enter_empty_str.name:
+ speed = bs.int_enter_empty_str.speed
+ elif behavior == bs.int_enter_light.name:
+ speed == bs.int_enter_light.speed
+ elif behavior == bs.int_exit:
+ speed = bs.int_exit.speed
+
+ return speed
+
+ def __get_speed_lanechange(self, behavior: str) -> float:
+ speed = 0.0
+ if behavior == bs.lc_app_init.name:
+ speed = bs.lc_app_init.speed
+ elif behavior == bs.lc_app_blocked.name:
+ speed = bs.lc_app_blocked.speed # calc_speed_to_stop_lanechange()
+ elif behavior == bs.lc_enter_init.name:
+ speed = bs.lc_enter_init.speed
+ elif behavior == bs.lc_exit.name:
+ speed = bs.lc_exit.speed
+
+ return speed
+
+ def __get_speed_cruise(self) -> float:
+ return self.__acc_speed
+
+ def __calc_speed_to_stop_intersection(self) -> float:
+ target_distance = 3.0
+ virtual_stopline_distance = self.__calc_virtual_stopline()
+ # calculate speed needed for stopping
+ v_stop = max(convert_to_ms(10.),
+ convert_to_ms((virtual_stopline_distance / 30)
+ * 50))
+ if v_stop > convert_to_ms(50.0):
+ v_stop = convert_to_ms(50.0)
+ if virtual_stopline_distance < target_distance:
+ v_stop = 0.0
+
+ # TODO: Find out purpose
+ def __calc_speed_to_stop_lanechange(self) -> float:
+ if self.__change_point[0] != np.inf and self.__change_point[1]:
+ stopline = self.__change_point[0]
+ else:
+ return 100
+
+ v_stop = max(convert_to_ms(5.),
+ convert_to_ms((stopline / 30) ** 1.5
+ * 50))
+ if v_stop > convert_to_ms(50.0):
+ v_stop = convert_to_ms(30.0)
+ return v_stop
+
+ def __calc_virtual_stopline(self) -> float:
+ if self.__stopline[0] != np.inf and self.__stopline[1]:
+ return self.__stopline[0]
+ elif self.traffic_light_detected:
+ return self.traffic_light_distance
+ else:
+ return 0.0
+
+ def run(self):
+ """
+ Control loop
+ :return:
+ """
+
+ def loop(timer_event=None):
+ self.update_target_speed(self.__acc_speed, self.__curr_behavior)
+
+ self.new_timer(self.control_loop_rate, loop)
+ self.spin()
+
+
+if __name__ == "__main__":
+ """
+ main function starts the MotionPlanning node
+ :param args:
+ """
+ roscomp.init('MotionPlanning')
+
+ try:
+ node = MotionPlanning()
+ node.run()
+ except KeyboardInterrupt:
+ pass
+ finally:
+ roscomp.shutdown()
diff --git a/code/requirements.txt b/code/requirements.txt
index c15e4287..3b0948d0 100644
--- a/code/requirements.txt
+++ b/code/requirements.txt
@@ -11,3 +11,4 @@ scipy==1.10.0
xmltodict==0.13.0
py-trees==2.1.6
numpy==1.23.5
+ultralytics==8.0.220
\ No newline at end of file
diff --git a/doc/03_research/03_planning/00_paf23/BT_paper.png b/doc/00_assets/planning/BT_paper.png
similarity index 100%
rename from doc/03_research/03_planning/00_paf23/BT_paper.png
rename to doc/00_assets/planning/BT_paper.png
diff --git a/doc/03_research/03_planning/00_paf23/BehaviorTree_medium.png b/doc/00_assets/planning/BehaviorTree_medium.png
similarity index 100%
rename from doc/03_research/03_planning/00_paf23/BehaviorTree_medium.png
rename to doc/00_assets/planning/BehaviorTree_medium.png
diff --git a/doc/03_research/03_planning/00_paf23/Globalplan.png b/doc/00_assets/planning/Globalplan.png
similarity index 100%
rename from doc/03_research/03_planning/00_paf23/Globalplan.png
rename to doc/00_assets/planning/Globalplan.png
diff --git a/doc/03_research/03_planning/00_paf23/Planning.png b/doc/00_assets/planning/Planning.png
similarity index 100%
rename from doc/03_research/03_planning/00_paf23/Planning.png
rename to doc/00_assets/planning/Planning.png
diff --git a/doc/03_research/03_planning/00_paf23/Planning_paf21.png b/doc/00_assets/planning/Planning_paf21.png
similarity index 100%
rename from doc/03_research/03_planning/00_paf23/Planning_paf21.png
rename to doc/00_assets/planning/Planning_paf21.png
diff --git a/doc/03_research/03_planning/00_paf23/intersection_scenario.png b/doc/00_assets/planning/intersection_scenario.png
similarity index 100%
rename from doc/03_research/03_planning/00_paf23/intersection_scenario.png
rename to doc/00_assets/planning/intersection_scenario.png
diff --git a/doc/03_research/03_planning/00_paf23/localplan.png b/doc/00_assets/planning/localplan.png
similarity index 100%
rename from doc/03_research/03_planning/00_paf23/localplan.png
rename to doc/00_assets/planning/localplan.png
diff --git a/doc/03_research/03_planning/00_paf23/overtaking_scenario.png b/doc/00_assets/planning/overtaking_scenario.png
similarity index 100%
rename from doc/03_research/03_planning/00_paf23/overtaking_scenario.png
rename to doc/00_assets/planning/overtaking_scenario.png
diff --git a/doc/03_research/03_planning/00_paf23/overview.jpg b/doc/00_assets/planning/overview.jpg
similarity index 100%
rename from doc/03_research/03_planning/00_paf23/overview.jpg
rename to doc/00_assets/planning/overview.jpg
diff --git a/doc/03_research/03_planning/00_paf23/overview.png b/doc/00_assets/planning/overview.png
similarity index 100%
rename from doc/03_research/03_planning/00_paf23/overview.png
rename to doc/00_assets/planning/overview.png
diff --git a/doc/03_research/03_planning/00_paf23/overview_paper1.png b/doc/00_assets/planning/overview_paper1.png
similarity index 100%
rename from doc/03_research/03_planning/00_paf23/overview_paper1.png
rename to doc/00_assets/planning/overview_paper1.png
diff --git a/doc/03_research/03_planning/00_paf23/prios.png b/doc/00_assets/planning/prios.png
similarity index 100%
rename from doc/03_research/03_planning/00_paf23/prios.png
rename to doc/00_assets/planning/prios.png
diff --git a/doc/03_research/03_planning/00_paf23/01_Planning.md b/doc/03_research/03_planning/00_paf23/01_Planning.md
index da344b00..a36283ac 100644
--- a/doc/03_research/03_planning/00_paf23/01_Planning.md
+++ b/doc/03_research/03_planning/00_paf23/01_Planning.md
@@ -6,7 +6,7 @@ Finding the optimal path from start to goal, taking into account the static and
### [PAF21 - 2](https://github.com/ll7/paf21-2)
-![Planning](Planning_paf21.png)
+![Planning](../../../00_assets/planning/Planning_paf21.png)
Input:
@@ -55,7 +55,7 @@ Map Manager
### [Autoware](https://github.com/autowarefoundation/autoware)
-![https://autowarefoundation.github.io/autoware-documentation/main/design/autoware-architecture/planning/](Planning.png)
+![https://autowarefoundation.github.io/autoware-documentation/main/design/autoware-architecture/planning/](../../../00_assets/planning/Planning.png)
Input:
diff --git a/doc/03_research/03_planning/00_paf23/02_PlanningPaf22.md b/doc/03_research/03_planning/00_paf23/02_PlanningPaf22.md
index a28289f8..605605e7 100644
--- a/doc/03_research/03_planning/00_paf23/02_PlanningPaf22.md
+++ b/doc/03_research/03_planning/00_paf23/02_PlanningPaf22.md
@@ -4,7 +4,7 @@
## Architecture
-![overview](overview.jpg)
+![overview](../../../00_assets/planning/overview.jpg)
### Preplanning
diff --git a/doc/03_research/03_planning/00_paf23/architecture.md b/doc/03_research/03_planning/00_paf23/03_PlannedArchitecture.md
similarity index 91%
rename from doc/03_research/03_planning/00_paf23/architecture.md
rename to doc/03_research/03_planning/00_paf23/03_PlannedArchitecture.md
index e85c7947..172cb9ca 100644
--- a/doc/03_research/03_planning/00_paf23/architecture.md
+++ b/doc/03_research/03_planning/00_paf23/03_PlannedArchitecture.md
@@ -4,7 +4,7 @@ Provide an overview for a possible planning architecture consisting of Global P
## Overview
-![overview](overview.png)
+![overview](../../../00_assets/planning/overview.png)
The **Global Plan** gathers all data relevant to build a copy of the town the car is driving in. It also computes an optimal global path, which includes all waypoints. The Decision Making can order a recalculation of the global path.
@@ -19,7 +19,7 @@ Motions like lane changing must be approved by the decision making and they get
### Global Plan
-![overview](Globalplan.png)
+![overview](../../../00_assets/planning/Globalplan.png)
*Map Generator:* Gathers map data from Carla and prepares it for the PrePlanner
@@ -69,7 +69,7 @@ See Behaviour Tree.
### Local Plan
-![Local Plan](localplan.png)
+![Local Plan](../../../00_assets/planning/localplan.png)
*Local Preplan:* Segements the global path and calculates the middle of the lane. Is not called in every cycle.
@@ -128,4 +128,4 @@ See Behaviour Tree.
Red must have for next Milestone, Orange needed for future milestones, Green can already be used or is not that important
-![prios](prios.png)
+![prios](../../../00_assets/planning/prios.png)
diff --git a/doc/03_research/03_planning/00_paf23/Local_planning_for_first_milestone.md b/doc/03_research/03_planning/00_paf23/04_Local_planning_for_first_milestone.md
similarity index 82%
rename from doc/03_research/03_planning/00_paf23/Local_planning_for_first_milestone.md
rename to doc/03_research/03_planning/00_paf23/04_Local_planning_for_first_milestone.md
index e47f05eb..431ffee9 100644
--- a/doc/03_research/03_planning/00_paf23/Local_planning_for_first_milestone.md
+++ b/doc/03_research/03_planning/00_paf23/04_Local_planning_for_first_milestone.md
@@ -16,7 +16,7 @@ Julius Miller
Paper: [Behavior Planning for Autonomous Driving: Methodologies, Applications, and Future Orientation](https://www.researchgate.net/publication/369181112_Behavior_Planning_for_Autonomous_Driving_Methodologies_Applications_and_Future_Orientation)
-![Overview_interfaces](overview_paper1.png)
+![Overview_interfaces](../../../00_assets/planning/overview_paper1.png)
Rule-based planning
@@ -49,7 +49,7 @@ Leader, Track-Speed
Github: [Decision Making with Behaviour Tree](https://github.com/kirilcvetkov92/Path-planning?source=post_page-----8db1575fec2c--------------------------------)
-![github_tree](BehaviorTree_medium.png)
+![github_tree](../../../00_assets/planning/BehaviorTree_medium.png)
- No Intersection
- Collision Detection in behaviour Tree
@@ -58,7 +58,7 @@ Paper: [Behavior Trees for
decision-making in Autonomous
Driving](https://www.diva-portal.org/smash/get/diva2:907048/FULLTEXT01.pdf)
-![Behaviour Tree](BT_paper.png)
+![Behaviour Tree](../../../00_assets/planning/BT_paper.png)
- simple simulation
- Car only drives straight
@@ -81,17 +81,17 @@ Low Level Decision:
- Emergency Brake
- ACC
-![localplan](localplan.png)
+![localplan](../../../00_assets/planning/localplan.png)
Scenarios:
-![Intersection](intersection_scenario.png)
+![Intersection](../../../00_assets/planning/intersection_scenario.png)
Left: Behaviour Intersection is triggered for motion planning, acc publishes speed. -> Lower speed is used to approach intersection
Right: Behaviour Intersection is used for motion planning, acc is ignored (no object in front)
-![Overtake](overtaking_scenario.png)
+![Overtake](../../../00_assets/planning/overtaking_scenario.png)
Left: Overtake gets triggered to maintain speed, acc is ignored
diff --git a/doc/06_perception/07_vision_node.md b/doc/06_perception/07_vision_node.md
index 1b72ead4..2b9b7bd3 100644
--- a/doc/06_perception/07_vision_node.md
+++ b/doc/06_perception/07_vision_node.md
@@ -1,25 +1,39 @@
# Vision Node
-The Visison Node serves as a replacement for the previous segmentation-node.
-It provides an adaptive interface that is able to perform object-detection or image-segmentation
+The Visison Node provides an adaptive interface that is able to perform object-detection and/or image-segmentation
on several different models. The model can be specified as a parameter in the perception.launch file.
+The VisionNode is currently using the yolov8x-seg model.
+
## Usage
The following code shows how the Vision-Node is specified in perception.launch
`
-
+
-
+
`
@@ -31,19 +45,65 @@ The Vision-Node will automatically switch between object-detection, imagesegment
For now the Vision-Node only supports pyTorch models. Within the next sprint it should be able to
accept other frameworks aswell. It should also be possible to run object-detection and image-segmentation at the same time.
+## Model overview
+
+| Model | Type | Stable | Comments |
+|---------------------------------------|--------------|--------|---------------------------------------|
+| fasterrcnn_resnet50_fpn_v2 | detection | no | CUDA-Problems |
+| fasterrcnn_mobilenet_v3_large_320_fpn | detection | no | CUDA-Problems |
+| yolov8n | detection | yes | |
+| yolov8s | detection | yes | |
+| yolov8m | detection | yes | |
+| yolov8l | detection | yes | |
+| yolov8x | detection | yes | |
+| yolo_nas_l | detection | no | Missing super_gradients package error |
+| yolo_nas_m | detection | no | Missing super_gradients package error |
+| yolo_nas_s | detection | no | Missing super_gradients package error |
+| rtdetr-l | detection | yes | |
+| rtdetr-x | detection | yes | |
+| sam_l | detection | no | Ultralytics Error |
+| FastSAM-x | detection | no | CUDA Problems |
+| deeplabv3_resnet101 | segmentation | no | CUDA Problems, Segmentation Problems |
+| yolov8x-seg | segmentation | yes | |
+
## How it works
### Initialization
The Vision-Node contains a Dictionary with all it's models. Depending on the model parameter it will initialize the correct model and weights.
-`
-self.model_dict = {
- "fasterrcnn_resnet50_fpn_v2": (fasterrcnn_resnet50_fpn_v2(weights=FasterRCNN_ResNet50_FPN_V2_Weights.DEFAULT), FasterRCNN_ResNet50_FPN_V2_Weights.DEFAULT, "detection", "pyTorch"),
- "fasterrcnn_mobilenet_v3_large_320_fpn": (fasterrcnn_mobilenet_v3_large_320_fpn(weights=FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.DEFAULT), FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.DEFAULT, "detection", "pyTorch"),
- "deeplabv3_resnet101": (deeplabv3_resnet101(weights=DeepLabV3_ResNet101_Weights.DEFAULT), DeepLabV3_ResNet101_Weights.DEFAULT, "segmentation", "pyTorch")
- }
-`
+`self.model_dict = {
+ "fasterrcnn_resnet50_fpn_v2":
+ (fasterrcnn_resnet50_fpn_v2(
+ weights=FasterRCNN_ResNet50_FPN_V2_Weights.DEFAULT),
+ FasterRCNN_ResNet50_FPN_V2_Weights.DEFAULT,
+ "detection",
+ "pyTorch"),
+ "fasterrcnn_mobilenet_v3_large_320_fpn":
+ (fasterrcnn_mobilenet_v3_large_320_fpn(
+ weights=FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.DEFAULT),
+ FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.DEFAULT,
+ "detection",
+ "pyTorch"),
+ "deeplabv3_resnet101":
+ (deeplabv3_resnet101(
+ weights=DeepLabV3_ResNet101_Weights.DEFAULT),
+ DeepLabV3_ResNet101_Weights.DEFAULT,
+ "segmentation",
+ "pyTorch"),
+ 'yolov8n': (YOLO, "yolov8n.pt", "detection", "ultralytics"),
+ 'yolov8s': (YOLO, "yolov8s.pt", "detection", "ultralytics"),
+ 'yolov8m': (YOLO, "yolov8m.pt", "detection", "ultralytics"),
+ 'yolov8l': (YOLO, "yolov8l.pt", "detection", "ultralytics"),
+ 'yolov8x': (YOLO, "yolov8x.pt", "detection", "ultralytics"),
+ 'yolo_nas_l': (NAS, "yolo_nas_l.pt", "detection", "ultralytics"),
+ 'yolo_nas_m': (NAS, "yolo_nas_m.pt", "detection", "ultralytics"),
+ 'yolo_nas_s': (NAS, "yolo_nas_s.pt", "detection", "ultralytics"),
+ 'rtdetr-l': (RTDETR, "rtdetr-l.pt", "detection", "ultralytics"),
+ 'rtdetr-x': (RTDETR, "rtdetr-x.pt", "detection", "ultralytics"),
+ 'yolov8x-seg': (YOLO, "yolov8x-seg.pt", "segmentation", "ultralytics"),
+ 'sam_l': (SAM, "sam_l.pt", "detection", "ultralytics"),
+ 'FastSAM-x': (FastSAM, "FastSAM-x.pt", "detection", "ultralytics")}`
### Core
@@ -61,18 +121,21 @@ This function is automatically triggered by the Camera-Subscriber of the Vision-
## Visualization
-The Vision-Node implements an ImagePublisher under the topic: "/paf//Center/segmented_image"
+The Vision-Node implements an ImagePublisher under the topic: "/paf/hero/Center/segmented_image"
+
+The Configuration File of RViz has been changed accordingly to display the published images alongside with the Camera.
-The Configuartion File of RViz has been changed accordingly to display the published images alongside with the Camera.
+The build in Visualization of the YOLO-Models works very well.
## Known Issues
### Time
-First experiments showed that the handle_camera_image function is way to slow to be used reliably. It takes around 1.5 seconds to handle one image.
+When running on YOLO-Models the Time issue is fixed because ultralytics has some way of managing the CUDA-Resources very well.
-Right now the Vision-Node is not using cuda due to cuda-memory-issues that couldn't be fixed right away.
+When running on different models, the CUDA-Error persists.
-The performance is expected to rise quite a bit when using cuda.
+## Segmentation
-Also their is lots more room for testing different models inside the Vision-Node to evualte their accuracy and time-performance.
+For some reason the create_segmentation mask function works in a standalone project, but not in the Vision-Node.
+I stopped debugging, because the YOLO-Models work way better and build a very good and stable baseline.
diff --git a/doc/06_perception/experiments/model_evaluation/README.md b/doc/06_perception/experiments/object-detection-model_evaluation/README.md
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/README.md
rename to doc/06_perception/experiments/object-detection-model_evaluation/README.md
diff --git a/doc/06_perception/experiments/model_evaluation/asset-copies/1619_PT_fasterrcnn_resnet50_fpn_v2.jpg b/doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_PT_fasterrcnn_resnet50_fpn_v2.jpg
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/asset-copies/1619_PT_fasterrcnn_resnet50_fpn_v2.jpg
rename to doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_PT_fasterrcnn_resnet50_fpn_v2.jpg
diff --git a/doc/06_perception/experiments/model_evaluation/asset-copies/1619_TF_faster-rcnn.jpg b/doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_TF_faster-rcnn.jpg
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/asset-copies/1619_TF_faster-rcnn.jpg
rename to doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_TF_faster-rcnn.jpg
diff --git a/doc/06_perception/experiments/model_evaluation/asset-copies/1619_yolo_nas_l.jpg b/doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolo_nas_l.jpg
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/asset-copies/1619_yolo_nas_l.jpg
rename to doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolo_nas_l.jpg
diff --git a/doc/06_perception/experiments/model_evaluation/asset-copies/1619_yolo_rtdetr_x.jpg b/doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolo_rtdetr_x.jpg
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/asset-copies/1619_yolo_rtdetr_x.jpg
rename to doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolo_rtdetr_x.jpg
diff --git a/doc/06_perception/experiments/model_evaluation/asset-copies/1619_yolov8x.jpg b/doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolov8x.jpg
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/asset-copies/1619_yolov8x.jpg
rename to doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolov8x.jpg
diff --git a/doc/06_perception/experiments/model_evaluation/asset-copies/1619_yolov8x_seg.jpg b/doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolov8x_seg.jpg
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/asset-copies/1619_yolov8x_seg.jpg
rename to doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolov8x_seg.jpg
diff --git a/doc/06_perception/experiments/model_evaluation/globals.py b/doc/06_perception/experiments/object-detection-model_evaluation/globals.py
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/globals.py
rename to doc/06_perception/experiments/object-detection-model_evaluation/globals.py
diff --git a/doc/06_perception/experiments/model_evaluation/pt.py b/doc/06_perception/experiments/object-detection-model_evaluation/pt.py
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/pt.py
rename to doc/06_perception/experiments/object-detection-model_evaluation/pt.py
diff --git a/doc/06_perception/experiments/model_evaluation/pylot.py b/doc/06_perception/experiments/object-detection-model_evaluation/pylot.py
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/pylot.py
rename to doc/06_perception/experiments/object-detection-model_evaluation/pylot.py
diff --git a/doc/06_perception/experiments/model_evaluation/requirements.txt b/doc/06_perception/experiments/object-detection-model_evaluation/requirements.txt
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/requirements.txt
rename to doc/06_perception/experiments/object-detection-model_evaluation/requirements.txt
diff --git a/doc/06_perception/experiments/model_evaluation/yolo.py b/doc/06_perception/experiments/object-detection-model_evaluation/yolo.py
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/yolo.py
rename to doc/06_perception/experiments/object-detection-model_evaluation/yolo.py
diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/README.md b/doc/06_perception/experiments/traffic-light-detection_evaluation/README.md
new file mode 100644
index 00000000..46e8e0af
--- /dev/null
+++ b/doc/06_perception/experiments/traffic-light-detection_evaluation/README.md
@@ -0,0 +1,43 @@
+# Evaluation of the PAF22 Traffic Light Detection
+
+In this experiment, the existing Traffic Light Detection from PAF22 has been tested.
+The goals was to be able to verify, that it is suitable for PAF23.
+
+## Model
+
+The architecture of the model is a Convolutional Neural Network (CNN) and it consists of the following layers:
+
+1. **Convolutional Layer 1**: This layer uses a 2D convolution over an input signal composed of several input planes, with in_channels input channels, 4 output channels, a kernel size of 5, and padding set to 'same'. This means the output size is the same as the input size.
+2. **Batch Normalization Layer**: This layer applies Batch Normalization over a 4D input (a mini-batch of 2D inputs with additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift.
+3. **Convolutional Layer 2**: This layer is similar to the first convolutional layer but it takes the output of the first layer (4 channels) as input.
+4. **Max Pooling Layer 1**: This layer uses a 2D max pooling over an input signal composed of several input planes, with a kernel size of (2, 2).
+5. **Convolutional Layer 3**: This layer is similar to the previous convolutional layers but it has a kernel size of 3.
+6. **Max Pooling Layer 2**: This layer is similar to the first max pooling layer.
+7. **Convolutional Layer 4**: This layer is similar to the previous convolutional layers.
+8. **Max Pooling Layer 3**: This layer is similar to the previous max pooling layers.
+9. **Flatten Layer**: This layer flattens the input by removing the spatial dimensions.
+10. **Dropout Layer**: This layer randomly zeroes some of the elements of the input tensor with probability p=0.3 using samples from a Bernoulli distribution.
+11. **Linear Layer**: This layer applies a linear transformation to the incoming data. It has 64 input features and num_classes output features.
+
+## Dataset
+
+The existing dataset of PAF22 consists of 2340 images (combined) of the categories red, yellow, green, backside. There are also 382 validation images (combined).
+
+The data can be accessed through DVC.
+
+## Training
+
+Running the training with `dvc exp run` in the traffic light detection directory, results in a trained model with >99% accuracy & validation.
+
+## Examples
+
+Result | Large | Small |
+|-----------|----------|----------|
+Green | ![Green-Large](assets/green_4.png) | ![Green-Small](assets/green_22.jpg)
+Yellow | ![Yellow-Large](assets/yellow_1.png) | ![Yellow-Small](assets/yellow_18.jpg)
+Red | ![Red-Large](assets/red_10.png) | ![Red-Small](assets/red_20.png)
+Back | ![Back-Large](assets/back_1.png) | ![Back-Small](assets/back_14.jpg)
+
+## Verdict
+
+The high accuracy and manual testing of the above example images verified, that the existing PAF22 traffic light detection model can be used for PAF23.
diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/back_1.png b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/back_1.png
new file mode 100644
index 00000000..b0a6a2f6
Binary files /dev/null and b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/back_1.png differ
diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/back_14.jpg b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/back_14.jpg
new file mode 100644
index 00000000..8fa865b1
Binary files /dev/null and b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/back_14.jpg differ
diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/green_22.jpg b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/green_22.jpg
new file mode 100644
index 00000000..5df96d68
Binary files /dev/null and b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/green_22.jpg differ
diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/green_4.png b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/green_4.png
new file mode 100644
index 00000000..a65fccf8
Binary files /dev/null and b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/green_4.png differ
diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/red_10.png b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/red_10.png
new file mode 100644
index 00000000..c7192ae2
Binary files /dev/null and b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/red_10.png differ
diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/red_20.png b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/red_20.png
new file mode 100644
index 00000000..365e7529
Binary files /dev/null and b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/red_20.png differ
diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/yellow_1.png b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/yellow_1.png
new file mode 100644
index 00000000..b39e5182
Binary files /dev/null and b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/yellow_1.png differ
diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/yellow_18.jpg b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/yellow_18.jpg
new file mode 100644
index 00000000..12881d31
Binary files /dev/null and b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/yellow_18.jpg differ
diff --git a/doc/07_planning/motion_planning.md b/doc/07_planning/motion_planning.md
new file mode 100644
index 00000000..175fbb25
--- /dev/null
+++ b/doc/07_planning/motion_planning.md
@@ -0,0 +1,56 @@
+# Motion Planning
+
+**Summary:** [motion_planning.py](.../code/planning/local_planner/src/motion_planning.py):
+The motion planning is responsible for collecting all the speeds from the different components and choosing the optimal one to be fowarded into the acting.
+
+---
+
+## Author
+
+Julius Miller
+
+## Date
+
+17.12.2023
+
+## Prerequisite
+
+---
+
+- [Motion Planning](#motion-planning)
+ - [Author](#author)
+ - [Date](#date)
+ - [Prerequisite](#prerequisite)
+ - [Description](#description)
+ - [Inputs](#inputs)
+ - [Outputs](#outputs)
+
+
+---
+
+## Description
+
+This node currently gathers the behavior speed and the acc_speed and chooses the lower one to be forwarded to the acting.
+At the moment this is enough since the only present behaviors are Intersection, Lane Change and Cruise.
+
+When the Overtaking behavior will be added, choosing the minimum speed will not be sufficient.
+
+### Inputs
+
+This node subscribes to the following topics:
+
+- Current Behavior:
+ - `/paf/{role_name}/curr_behavior` (String)
+- ACC Velocity:
+ - `/paf/{role_name}/acc_velocity` (Float32)
+- Waypoint:
+ - `/paf/{role_name}/waypoint_distance` (Waypoint)
+- Lane Change:
+ - `/paf/{role_name}/lane_change_distance` (LaneChange)
+
+### Outputs
+
+This node publishes the following topics:
+
+- Target Velocity
+ - `/paf/{role_name}/target_velocity` (Float32)