diff --git a/build/docker-compose.yml b/build/docker-compose.yml
index a68e97c4..1545f945 100644
--- a/build/docker-compose.yml
+++ b/build/docker-compose.yml
@@ -57,8 +57,8 @@ services:
tty: true
shm_size: 2gb
#command: bash -c "sleep 10 && python3 /opt/leaderboard/leaderboard/leaderboard_evaluator.py --debug=0 --routes=/opt/leaderboard/data/routes_devtest.xml --agent=/opt/leaderboard/leaderboard/autoagents/npc_agent.py --host=carla-simulator --track=SENSORS"
- #command: bash -c "sleep 10 && roslaunch agent/launch/dev.launch"
- command: bash -c "sleep 10 && sudo chown -R carla:carla ../code/ && sudo chmod -R a+w ../code/ && python3 /opt/leaderboard/leaderboard/leaderboard_evaluator.py --debug=0 --routes=/opt/leaderboard/data/routes_devtest.xml --agent=/workspace/code/agent/src/agent/agent.py --host=carla-simulator --track=MAP"
+ command: bash -c "sleep 10 && roslaunch agent/launch/dev.launch"
+ #command: bash -c "sleep 10 && sudo chown -R carla:carla ../code/ && sudo chmod -R a+w ../code/ && python3 /opt/leaderboard/leaderboard/leaderboard_evaluator.py --debug=0 --routes=/opt/leaderboard/data/routes_devtest.xml --agent=/workspace/code/agent/src/agent/agent.py --host=carla-simulator --track=MAP"
logging:
driver: "local"
environment:
diff --git a/code/agent/config/rviz_config.rviz b/code/agent/config/rviz_config.rviz
index 7064cc88..5c10eca9 100644
--- a/code/agent/config/rviz_config.rviz
+++ b/code/agent/config/rviz_config.rviz
@@ -63,11 +63,11 @@ Visualization Manager:
Unreliable: false
Value: true
Visibility:
- Grid: true
- Imu: true
- Path: true
- PointCloud2: true
- Value: true
+ Grid: false
+ Imu: false
+ Path: false
+ PointCloud2: false
+ Value: false
Zoom Factor: 1
- Class: rviz/Image
Enabled: true
@@ -327,4 +327,4 @@ Window Geometry:
collapsed: false
Width: 2488
X: 1992
- Y: 27
+ Y: 27
\ No newline at end of file
diff --git a/code/perception/launch/perception.launch b/code/perception/launch/perception.launch
index 0a24ba5e..3adc596e 100644
--- a/code/perception/launch/perception.launch
+++ b/code/perception/launch/perception.launch
@@ -33,15 +33,30 @@
-
-
+ - deeplabv3_resnet101
+ - yolov8x-seg
+ -->
+
+
diff --git a/code/perception/src/traffic_light_detection/dataset.dvc b/code/perception/src/traffic_light_detection/dataset.dvc
new file mode 100644
index 00000000..73aa6bd3
--- /dev/null
+++ b/code/perception/src/traffic_light_detection/dataset.dvc
@@ -0,0 +1,6 @@
+outs:
+- md5: 3a559397ebc58c1ecf142dea18d03367.dir
+ size: 13745063
+ nfiles: 2723
+ hash: md5
+ path: dataset
diff --git a/code/perception/src/traffic_light_detection/dvc.lock b/code/perception/src/traffic_light_detection/dvc.lock
new file mode 100644
index 00000000..d9f625ce
--- /dev/null
+++ b/code/perception/src/traffic_light_detection/dvc.lock
@@ -0,0 +1,34 @@
+schema: '2.0'
+stages:
+ train:
+ cmd: python src/traffic_light_detection/traffic_light_training.py
+ deps:
+ - path: dataset
+ md5: 3a559397ebc58c1ecf142dea18d03367.dir
+ size: 13745063
+ nfiles: 2723
+ - path: src
+ hash: md5
+ md5: b6c9cb867c89ad6e86403d9c33538136.dir
+ size: 23777
+ nfiles: 10
+ params:
+ params.yaml:
+ train:
+ epochs: 100
+ batch_size: 32
+ outs:
+ - path: dvclive/metrics.json
+ hash: md5
+ md5: af33de699558fbfd3edee1607ba88f81
+ size: 218
+ - path: dvclive/plots
+ hash: md5
+ md5: 774919de9e9d6820ac6821d0819829c1.dir
+ size: 8900
+ nfiles: 4
+ - path: models
+ hash: md5
+ md5: ee67bac2f189d2cc5a199d91ba3295ac.dir
+ size: 10815
+ nfiles: 1
diff --git a/code/perception/src/traffic_light_detection/dvc.yaml b/code/perception/src/traffic_light_detection/dvc.yaml
new file mode 100644
index 00000000..d08afa7e
--- /dev/null
+++ b/code/perception/src/traffic_light_detection/dvc.yaml
@@ -0,0 +1,21 @@
+stages:
+ train:
+ cmd: python src/traffic_light_detection/traffic_light_training.py
+ deps:
+ - dataset
+ - src
+ params:
+ - params.yaml:
+ outs:
+ - models
+ metrics:
+ - dvclive/metrics.json:
+ cache: false
+ plots:
+ - dvclive/plots:
+ cache: false
+metrics:
+- dvclive/metrics.json
+plots:
+- dvclive/plots/metrics:
+ x: step
diff --git a/code/perception/src/traffic_light_detection/src/data_generation/transforms.py b/code/perception/src/traffic_light_detection/src/data_generation/transforms.py
index f1f992d2..41dc8bfc 100644
--- a/code/perception/src/traffic_light_detection/src/data_generation/transforms.py
+++ b/code/perception/src/traffic_light_detection/src/data_generation/transforms.py
@@ -28,7 +28,7 @@ def __call__(self, image):
image = torchvision.transforms.Pad((0, pad))(image)
else:
image = torchvision.transforms.Pad((pad, 0))(image)
- image = torchvision.transforms.Resize(self.size)(image)
+ image = torchvision.transforms.Resize(self.size, antialias=True)(image)
return image
diff --git a/code/perception/src/traffic_light_detection/src/traffic_light_detection/classification_model.py b/code/perception/src/traffic_light_detection/src/traffic_light_detection/classification_model.py
index 9c5fb339..f44d2c31 100644
--- a/code/perception/src/traffic_light_detection/src/traffic_light_detection/classification_model.py
+++ b/code/perception/src/traffic_light_detection/src/traffic_light_detection/classification_model.py
@@ -59,10 +59,10 @@ def load_model(cfg):
if path is not None:
try:
state_dict = torch.load(path)
- model.load_state_dict(state_dict).eval()
+ model.load_state_dict(state_dict)
print(f"Pretrained model loaded from {path}")
return model
- except (Exception, ):
- print(f"No pretrained model found at {path}. "
+ except Exception as e:
+ print(f"No pretrained model found at {path}: {e}\n"
f"Created new model with random weights.")
return model.eval()
diff --git a/code/perception/src/traffic_light_detection/src/traffic_light_detection/traffic_light_inference.py b/code/perception/src/traffic_light_detection/src/traffic_light_detection/traffic_light_inference.py
index 40dd4c24..4631660b 100644
--- a/code/perception/src/traffic_light_detection/src/traffic_light_detection/traffic_light_inference.py
+++ b/code/perception/src/traffic_light_detection/src/traffic_light_detection/traffic_light_inference.py
@@ -1,5 +1,4 @@
import argparse
-from pathlib import Path
import torch.cuda
import torchvision.transforms as t
@@ -24,6 +23,9 @@ def parse_args():
'model_acc_99.53_val_100.0.pt',
help='path to pretrained model',
type=str)
+ parser.add_argument('--image', default=None,
+ help='/dataset/val/green/green_83.png',
+ type=str)
return parser.parse_args()
@@ -66,8 +68,7 @@ def __call__(self, img):
# main function for testing purposes
if __name__ == '__main__':
args = parse_args()
- image_path = str(Path(__file__).resolve().parents[2].resolve())
- image_path += "/dataset/val/green/green_83.png"
+ image_path = args.image
image = load_image(image_path)
classifier = TrafficLightInference(args.model)
pred = classifier(image)
diff --git a/code/perception/src/vision_node.py b/code/perception/src/vision_node.py
index 726ea4de..d736253a 100755
--- a/code/perception/src/vision_node.py
+++ b/code/perception/src/vision_node.py
@@ -19,6 +19,7 @@
from torchvision.utils import draw_bounding_boxes, draw_segmentation_masks
import numpy as np
from time import perf_counter
+from ultralytics import NAS, YOLO, RTDETR, SAM, FastSAM
"""
VisionNode:
@@ -55,17 +56,32 @@ def __init__(self, name, **kwargs):
weights=DeepLabV3_ResNet101_Weights.DEFAULT),
DeepLabV3_ResNet101_Weights.DEFAULT,
"segmentation",
- "pyTorch")
+ "pyTorch"),
+ 'yolov8n': (YOLO, "yolov8n.pt", "detection", "ultralytics"),
+ 'yolov8s': (YOLO, "yolov8s.pt", "detection", "ultralytics"),
+ 'yolov8m': (YOLO, "yolov8m.pt", "detection", "ultralytics"),
+ 'yolov8l': (YOLO, "yolov8l.pt", "detection", "ultralytics"),
+ 'yolov8x': (YOLO, "yolov8x.pt", "detection", "ultralytics"),
+ 'yolo_nas_l': (NAS, "yolo_nas_l.pt", "detection", "ultralytics"),
+ 'yolo_nas_m': (NAS, "yolo_nas_m.pt", "detection", "ultralytics"),
+ 'yolo_nas_s': (NAS, "yolo_nas_s.pt", "detection", "ultralytics"),
+ 'rtdetr-l': (RTDETR, "rtdetr-l.pt", "detection", "ultralytics"),
+ 'rtdetr-x': (RTDETR, "rtdetr-x.pt", "detection", "ultralytics"),
+ 'yolov8x-seg': (YOLO, "yolov8x-seg.pt", "segmentation",
+ "ultralytics"),
+ 'sam_l': (SAM, "sam_l.pt", "detection", "ultralytics"),
+ 'FastSAM-x': (FastSAM, "FastSAM-x.pt", "detection", "ultralytics"),
+
}
+ print(torch.__version__)
+
# general setup
self.bridge = CvBridge()
self.role_name = self.get_param("role_name", "hero")
self.side = self.get_param("side", "Center")
- # self.device = torch.device("cuda"
- # if torch.cuda.is_available() else "cpu") Cuda Memory Issues
- self.device = torch.device("cpu")
- print("VisionNode working on: ", self.device)
+ self.device = torch.device("cuda"
+ if torch.cuda.is_available() else "cpu")
# publish / subscribe setup
self.setup_camera_subscriptions()
@@ -80,9 +96,22 @@ def __init__(self, name, **kwargs):
self.type = model_info[2]
self.framework = model_info[3]
print("Vision Node Configuration:")
+ print("Device -> ", self.device)
print(f"Model -> {self.get_param('model')},")
print(f"Type -> {self.type}, Framework -> {self.framework}")
- self.model.to(self.device)
+ torch.cuda.memory.set_per_process_memory_fraction(0.1)
+
+ # pyTorch and CUDA setup
+ if self.framework == "pyTorch":
+ for param in self.model.parameters():
+ param.requires_grad = False
+ self.model.to(self.device)
+
+ # ultralytics setup
+ if self.framework == "ultralytics":
+ self.model = self.model(self.weights)
+
+ # tensorflow setup
def setup_camera_subscriptions(self):
self.new_subscription(
@@ -101,6 +130,30 @@ def setup_camera_publishers(self):
def handle_camera_image(self, image):
startTime = perf_counter()
+
+ # free up cuda memory
+ if self.device == "cuda":
+ torch.cuda.empty_cache()
+
+ print("Before Model: ", perf_counter() - startTime)
+
+ if self.framework == "pyTorch":
+ vision_result = self.predict_torch(image)
+
+ if self.framework == "ultralytics":
+ vision_result = self.predict_ultralytics(image)
+
+ print("After Model: ", perf_counter() - startTime)
+
+ # publish image to rviz
+ img_msg = self.bridge.cv2_to_imgmsg(vision_result,
+ encoding="passthrough")
+ img_msg.header = image.header
+ self.publisher.publish(img_msg)
+
+ pass
+
+ def predict_torch(self, image):
self.model.eval()
cv_image = self.bridge.imgmsg_to_cv2(img_msg=image,
desired_encoding='passthrough')
@@ -114,39 +167,41 @@ def handle_camera_image(self, image):
input_image = preprocess(cv_image).unsqueeze(dim=0)
input_image = input_image.to(self.device)
- print("Before Model: ", perf_counter() - startTime)
prediction = self.model(input_image)
- print("After Model: ", perf_counter() - startTime)
+
if (self.type == "detection"):
vision_result = self.apply_bounding_boxes(cv_image, prediction[0])
if (self.type == "segmentation"):
vision_result = self.create_mask(cv_image, prediction['out'])
- img_msg = self.bridge.cv2_to_imgmsg(vision_result,
- encoding="passthrough")
- img_msg.header = image.header
+ return vision_result
- self.publisher.publish(img_msg)
- print("After Publish: ", perf_counter() - startTime)
+ def predict_ultralytics(self, image):
+ cv_image = self.bridge.imgmsg_to_cv2(img_msg=image,
+ desired_encoding='passthrough')
+ cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)
+ print(cv_image.shape)
- pass
+ output = self.model(cv_image)
+
+ return output[0].plot()
def create_mask(self, input_image, model_output):
output_predictions = torch.argmax(model_output, dim=0)
-
for i in range(21):
output_predictions[i] = output_predictions[i] == i
output_predictions = output_predictions.to(dtype=torch.bool)
- input_image = t.ToTensor()(input_image)
- input_image = input_image.to(dtype=torch.uint8)
print(output_predictions.shape)
- print(input_image.shape)
- segmented_image = draw_segmentation_masks(input_image,
- output_predictions)
- cv_segmented = cv2.cvtColor(segmented_image.detach().numpy(),
- cv2.COLOR_BGR2RGB)
+ transposed_image = np.transpose(input_image, (2, 0, 1))
+ tensor_image = torch.tensor(transposed_image)
+ tensor_image = tensor_image.to(dtype=torch.uint8)
+ segmented_image = draw_segmentation_masks(tensor_image,
+ output_predictions,
+ alpha=0.6)
+ cv_segmented = segmented_image.detach().cpu().numpy()
+ cv_segmented = np.transpose(cv_segmented, (1, 2, 0))
return cv_segmented
def apply_bounding_boxes(self, input_image, model_output):
diff --git a/code/planning/local_planner/CMakeLists.txt b/code/planning/local_planner/CMakeLists.txt
new file mode 100644
index 00000000..32a5947b
--- /dev/null
+++ b/code/planning/local_planner/CMakeLists.txt
@@ -0,0 +1,202 @@
+cmake_minimum_required(VERSION 3.0.2)
+project(local_planner)
+
+## Compile as C++11, supported in ROS Kinetic and newer
+# add_compile_options(-std=c++11)
+
+## Find catkin macros and libraries
+## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
+## is used, also find other catkin packages
+find_package(catkin REQUIRED)
+
+## System dependencies are found with CMake's conventions
+# find_package(Boost REQUIRED COMPONENTS system)
+
+
+## Uncomment this if the package has a setup.py. This macro ensures
+## modules and global scripts declared therein get installed
+## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
+# catkin_python_setup()
+
+################################################
+## Declare ROS messages, services and actions ##
+################################################
+
+## To declare and build messages, services or actions from within this
+## package, follow these steps:
+## * Let MSG_DEP_SET be the set of packages whose message types you use in
+## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
+## * In the file package.xml:
+## * add a build_depend tag for "message_generation"
+## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
+## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
+## but can be declared for certainty nonetheless:
+## * add a exec_depend tag for "message_runtime"
+## * In this file (CMakeLists.txt):
+## * add "message_generation" and every package in MSG_DEP_SET to
+## find_package(catkin REQUIRED COMPONENTS ...)
+## * add "message_runtime" and every package in MSG_DEP_SET to
+## catkin_package(CATKIN_DEPENDS ...)
+## * uncomment the add_*_files sections below as needed
+## and list every .msg/.srv/.action file to be processed
+## * uncomment the generate_messages entry below
+## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
+
+## Generate messages in the 'msg' folder
+# add_message_files(
+# FILES
+# Message1.msg
+# Message2.msg
+# )
+
+## Generate services in the 'srv' folder
+# add_service_files(
+# FILES
+# Service1.srv
+# Service2.srv
+# )
+
+## Generate actions in the 'action' folder
+# add_action_files(
+# FILES
+# Action1.action
+# Action2.action
+# )
+
+## Generate added messages and services with any dependencies listed here
+# generate_messages(
+# DEPENDENCIES
+# std_msgs # Or other packages containing msgs
+# )
+
+################################################
+## Declare ROS dynamic reconfigure parameters ##
+################################################
+
+## To declare and build dynamic reconfigure parameters within this
+## package, follow these steps:
+## * In the file package.xml:
+## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
+## * In this file (CMakeLists.txt):
+## * add "dynamic_reconfigure" to
+## find_package(catkin REQUIRED COMPONENTS ...)
+## * uncomment the "generate_dynamic_reconfigure_options" section below
+## and list every .cfg file to be processed
+
+## Generate dynamic reconfigure parameters in the 'cfg' folder
+# generate_dynamic_reconfigure_options(
+# cfg/DynReconf1.cfg
+# cfg/DynReconf2.cfg
+# )
+
+###################################
+## catkin specific configuration ##
+###################################
+## The catkin_package macro generates cmake config files for your package
+## Declare things to be passed to dependent projects
+## INCLUDE_DIRS: uncomment this if your package contains header files
+## LIBRARIES: libraries you create in this project that dependent projects also need
+## CATKIN_DEPENDS: catkin_packages dependent projects also need
+## DEPENDS: system dependencies of this project that dependent projects also need
+catkin_package(
+# INCLUDE_DIRS include
+# LIBRARIES planning
+# CATKIN_DEPENDS other_catkin_pkg
+# DEPENDS system_lib
+)
+
+###########
+## Build ##
+###########
+
+## Specify additional locations of header files
+## Your package locations should be listed before other locations
+include_directories(
+# include
+# ${catkin_INCLUDE_DIRS}
+)
+
+## Declare a C++ library
+# add_library(${PROJECT_NAME}
+# src/${PROJECT_NAME}/planning.cpp
+# )
+
+## Add cmake target dependencies of the library
+## as an example, code may need to be generated before libraries
+## either from message generation or dynamic reconfigure
+# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
+
+## Declare a C++ executable
+## With catkin_make all packages are built within a single CMake context
+## The recommended prefix ensures that target names across packages don't collide
+# add_executable(${PROJECT_NAME}_node src/planning_node.cpp)
+
+## Rename C++ executable without prefix
+## The above recommended prefix causes long target names, the following renames the
+## target back to the shorter version for ease of user use
+## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
+# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
+
+## Add cmake target dependencies of the executable
+## same as for the library above
+# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
+
+## Specify libraries to link a library or executable target against
+# target_link_libraries(${PROJECT_NAME}_node
+# ${catkin_LIBRARIES}
+# )
+
+#############
+## Install ##
+#############
+
+# all install targets should use catkin DESTINATION variables
+# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
+
+## Mark executable scripts (Python etc.) for installation
+## in contrast to setup.py, you can choose the destination
+# catkin_install_python(PROGRAMS
+# scripts/my_python_script
+# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+# )
+
+## Mark executables for installation
+## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
+# install(TARGETS ${PROJECT_NAME}_node
+# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+# )
+
+## Mark libraries for installation
+## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
+# install(TARGETS ${PROJECT_NAME}
+# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
+# )
+
+## Mark cpp header files for installation
+# install(DIRECTORY include/${PROJECT_NAME}/
+# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
+# FILES_MATCHING PATTERN "*.h"
+# PATTERN ".svn" EXCLUDE
+# )
+
+## Mark other files for installation (e.g. launch and bag files, etc.)
+# install(FILES
+# # myfile1
+# # myfile2
+# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
+# )
+
+#############
+## Testing ##
+#############
+
+## Add gtest based cpp test target and link libraries
+# catkin_add_gtest(${PROJECT_NAME}-test test/test_planning.cpp)
+# if(TARGET ${PROJECT_NAME}-test)
+# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
+# endif()
+
+## Add folders to be run by python nosetests
+# catkin_add_nosetests(test)
diff --git a/code/planning/local_planner/__init__.py b/code/planning/local_planner/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/code/planning/local_planner/launch/local_planner.launch b/code/planning/local_planner/launch/local_planner.launch
new file mode 100644
index 00000000..c469c05e
--- /dev/null
+++ b/code/planning/local_planner/launch/local_planner.launch
@@ -0,0 +1,6 @@
+
+
+
+
+
+
diff --git a/code/planning/local_planner/package.xml b/code/planning/local_planner/package.xml
new file mode 100644
index 00000000..6dc4269a
--- /dev/null
+++ b/code/planning/local_planner/package.xml
@@ -0,0 +1,59 @@
+
+
+ local_planner
+ 0.0.0
+ The local planning package
+
+
+
+
+ carla
+
+
+
+
+
+ TODO
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ catkin
+
+
+
+
+
+
+
+
diff --git a/code/planning/local_planner/setup.py b/code/planning/local_planner/setup.py
new file mode 100644
index 00000000..e5a88c1c
--- /dev/null
+++ b/code/planning/local_planner/setup.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+from distutils.core import setup
+from catkin_pkg.python_setup import generate_distutils_setup
+
+setup_args = generate_distutils_setup(packages=['local_planner'],
+ package_dir={'': 'src'})
+setup(**setup_args)
diff --git a/code/planning/local_planner/src/collision_check.py b/code/planning/local_planner/src/collision_check.py
new file mode 100755
index 00000000..34bcdbb4
--- /dev/null
+++ b/code/planning/local_planner/src/collision_check.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+# import rospy
+# import tf.transformations
+import ros_compatibility as roscomp
+from ros_compatibility.node import CompatibleNode
+
+# from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion
+# from carla_msgs.msg import CarlaRoute # , CarlaWorldInfo
+# from nav_msgs.msg import Path
+# from std_msgs.msg import String
+# from std_msgs.msg import Float32MultiArray
+
+
+class CollisionCheck(CompatibleNode):
+ """
+ This is currently a test node. In the future this node will be
+ responsible for detecting collisions and reporting them.
+ """
+
+ def __init__(self):
+ super(CollisionCheck, self).__init__('CollisionCheck')
+ self.role_name = self.get_param("role_name", "hero")
+ self.control_loop_rate = self.get_param("control_loop_rate", 1)
+ self.current_speed = 50 / 3.6 # m/ss
+ # TODO: Add Subscriber for Speed and Obstacles
+ self.logdebug("CollisionCheck started")
+
+ def update(self, speed):
+ self.current_speed = speed
+
+ def time_to_collision(self, obstacle_speed, distance):
+ return distance / (self.current_speed - obstacle_speed)
+
+ def meters_to_collision(self, obstacle_speed, distance):
+ return self.time_to_collision(obstacle_speed, distance) * \
+ self.current_speed
+
+ # PAF 22
+ def calculate_safe_dist(self) -> float:
+ """
+ Calculates the distance you have to keep to the vehicle in front to
+ have t_reaction to react to the vehicle suddenly stopping
+ The formula replicates official recommendations for safe distances
+ """
+ t_reaction = 1 # s
+ t_breaking = 1 # s
+ a = 8 # m/s^2
+ v = self.current_speed
+ s = - 0.5 * a * t_breaking ** 2 + v * t_breaking + v * t_reaction
+ return s + 5
+
+ def calculate_rule_of_thumb(self, emergency):
+ reaction_distance = self.current_speed
+ braking_distance = (self.current_speed * 0.36)**2
+ if emergency:
+ return reaction_distance + braking_distance / 2
+ else:
+ return reaction_distance + braking_distance
+
+ def check_crash(self, obstacle):
+ distance, obstacle_speed = obstacle
+
+ collision_time = self.time_to_collision(obstacle_speed, distance)
+ collision_meter = self.meters_to_collision(obstacle_speed, distance)
+
+ safe_distance = self.calculate_safe_dist()
+ safe_distance2 = self.calculate_rule_of_thumb(False)
+ emergency_distance2 = self.calculate_rule_of_thumb(True)
+
+ # TODO: Convert to Publishers
+ if collision_time > 0:
+ if distance < emergency_distance2:
+ print(f"Emergency Brake needed, {emergency_distance2:.2f}")
+ print(f"Ego reaches obstacle after {collision_time:.2f} seconds.")
+ print(f"Ego reaches obstacle after {collision_meter:.2f} meters.")
+ print(f"Safe Distance PAF 22: {safe_distance:.2f}")
+ print(f"Safe Distance Thumb: {safe_distance2:.2f}")
+ else:
+ print("Ego slower then car in front")
+
+ def run(self):
+ """
+ Control loop
+ :return:
+ """
+
+ def loop(timer_event=None):
+ pass
+
+ self.new_timer(self.control_loop_rate, loop)
+ self.spin()
+
+
+if __name__ == "__main__":
+ """
+ main function starts the CollisionCheck node
+ :param args:
+ """
+ roscomp.init('CollisionCheck')
+
+ try:
+ node = CollisionCheck()
+ node.run()
+ except KeyboardInterrupt:
+ pass
+ finally:
+ roscomp.shutdown()
diff --git a/code/planning/planning_runner/launch/planning_runner.launch b/code/planning/planning_runner/launch/planning_runner.launch
index 30f01411..b5597b71 100755
--- a/code/planning/planning_runner/launch/planning_runner.launch
+++ b/code/planning/planning_runner/launch/planning_runner.launch
@@ -2,4 +2,6 @@
+
+
diff --git a/code/requirements.txt b/code/requirements.txt
index c15e4287..3b0948d0 100644
--- a/code/requirements.txt
+++ b/code/requirements.txt
@@ -11,3 +11,4 @@ scipy==1.10.0
xmltodict==0.13.0
py-trees==2.1.6
numpy==1.23.5
+ultralytics==8.0.220
\ No newline at end of file
diff --git a/doc/03_research/03_planning/00_paf23/BT_paper.png b/doc/03_research/03_planning/00_paf23/BT_paper.png
new file mode 100644
index 00000000..0a711e41
Binary files /dev/null and b/doc/03_research/03_planning/00_paf23/BT_paper.png differ
diff --git a/doc/03_research/03_planning/00_paf23/BehaviorTree_medium.png b/doc/03_research/03_planning/00_paf23/BehaviorTree_medium.png
new file mode 100644
index 00000000..edc11092
Binary files /dev/null and b/doc/03_research/03_planning/00_paf23/BehaviorTree_medium.png differ
diff --git a/doc/03_research/03_planning/00_paf23/Local_planning_for_first_milestone.md b/doc/03_research/03_planning/00_paf23/Local_planning_for_first_milestone.md
new file mode 100644
index 00000000..e47f05eb
--- /dev/null
+++ b/doc/03_research/03_planning/00_paf23/Local_planning_for_first_milestone.md
@@ -0,0 +1,104 @@
+# Local Planning for first milestone
+
+**Summary:** This document states the implementation plan for the local planning.
+
+---
+
+## Author
+
+Julius Miller
+
+## Date
+
+03.12.2023
+
+## Research
+
+Paper: [Behavior Planning for Autonomous Driving: Methodologies, Applications, and Future Orientation](https://www.researchgate.net/publication/369181112_Behavior_Planning_for_Autonomous_Driving_Methodologies_Applications_and_Future_Orientation)
+
+![Overview_interfaces](overview_paper1.png)
+
+Rule-based planning
+
+Advantages:
+
+- Simple implementation.
+- Low computational
+power.
+- Real-time operation.
+- Adapt the rationality of
+human thinking.
+- Its behavior can be easily
+traced and explained
+
+Disadvantages:
+
+- Inability to handle
+complex environments.
+- Risk of rules explosion.
+- Inability to handle
+uncertainty.
+- Low ability to handle
+unplanned situations
+
+Paper: [A Rule-Based Behaviour Planner for Autonomous Driving , pp 263 -279](https://link.springer.com/chapter/10.1007/978-3-031-21541-4_17)
+
+- Two-layer rule-based theory
+- Behaviours: Emergency-Stop, Stop, Yield, Decelerate-To-Halt, Pass-Obstacle, Follow-
+Leader, Track-Speed
+
+Github: [Decision Making with Behaviour Tree](https://github.com/kirilcvetkov92/Path-planning?source=post_page-----8db1575fec2c--------------------------------)
+
+![github_tree](BehaviorTree_medium.png)
+
+- No Intersection
+- Collision Detection in behaviour Tree
+
+Paper: [Behavior Trees for
+decision-making in Autonomous
+Driving](https://www.diva-portal.org/smash/get/diva2:907048/FULLTEXT01.pdf)
+
+![Behaviour Tree](BT_paper.png)
+
+- simple simulation
+- Car only drives straight
+
+## New Architecture for first milestone
+
+- Keeping it simple
+- Iterative Progress
+- Divide decisions into high level and low level to keep behaviour tree small.
+
+High Level Decisions:
+
+- Intersection
+- Lane Change
+- Cruise (NoOp)
+- (Overtake - limit for multilane)
+
+Low Level Decision:
+
+- Emergency Brake
+- ACC
+
+![localplan](localplan.png)
+
+Scenarios:
+
+![Intersection](intersection_scenario.png)
+
+Left: Behaviour Intersection is triggered for motion planning, acc publishes speed. -> Lower speed is used to approach intersection
+
+Right: Behaviour Intersection is used for motion planning, acc is ignored (no object in front)
+
+![Overtake](overtaking_scenario.png)
+
+Left: Overtake gets triggered to maintain speed, acc is ignored
+
+Right: Overtake not possible, acc reduces speed to avoid collision
+
+What needs to be done:
+
+- Implement ACC
+- Implement motion planning
+- Change publishers in behaviours (only publish name of task)
diff --git a/doc/03_research/03_planning/00_paf23/intersection_scenario.png b/doc/03_research/03_planning/00_paf23/intersection_scenario.png
new file mode 100644
index 00000000..a1250eeb
Binary files /dev/null and b/doc/03_research/03_planning/00_paf23/intersection_scenario.png differ
diff --git a/doc/03_research/03_planning/00_paf23/localplan.png b/doc/03_research/03_planning/00_paf23/localplan.png
index 14424349..3f288dfe 100644
Binary files a/doc/03_research/03_planning/00_paf23/localplan.png and b/doc/03_research/03_planning/00_paf23/localplan.png differ
diff --git a/doc/03_research/03_planning/00_paf23/overtaking_scenario.png b/doc/03_research/03_planning/00_paf23/overtaking_scenario.png
new file mode 100644
index 00000000..8707de3e
Binary files /dev/null and b/doc/03_research/03_planning/00_paf23/overtaking_scenario.png differ
diff --git a/doc/03_research/03_planning/00_paf23/overview_paper1.png b/doc/03_research/03_planning/00_paf23/overview_paper1.png
new file mode 100644
index 00000000..d189a78b
Binary files /dev/null and b/doc/03_research/03_planning/00_paf23/overview_paper1.png differ
diff --git a/doc/06_perception/06_vision_node.md b/doc/06_perception/06_vision_node.md
index 1b72ead4..2b9b7bd3 100644
--- a/doc/06_perception/06_vision_node.md
+++ b/doc/06_perception/06_vision_node.md
@@ -1,25 +1,39 @@
# Vision Node
-The Visison Node serves as a replacement for the previous segmentation-node.
-It provides an adaptive interface that is able to perform object-detection or image-segmentation
+The Visison Node provides an adaptive interface that is able to perform object-detection and/or image-segmentation
on several different models. The model can be specified as a parameter in the perception.launch file.
+The VisionNode is currently using the yolov8x-seg model.
+
## Usage
The following code shows how the Vision-Node is specified in perception.launch
`
-
+
-
+
`
@@ -31,19 +45,65 @@ The Vision-Node will automatically switch between object-detection, imagesegment
For now the Vision-Node only supports pyTorch models. Within the next sprint it should be able to
accept other frameworks aswell. It should also be possible to run object-detection and image-segmentation at the same time.
+## Model overview
+
+| Model | Type | Stable | Comments |
+|---------------------------------------|--------------|--------|---------------------------------------|
+| fasterrcnn_resnet50_fpn_v2 | detection | no | CUDA-Problems |
+| fasterrcnn_mobilenet_v3_large_320_fpn | detection | no | CUDA-Problems |
+| yolov8n | detection | yes | |
+| yolov8s | detection | yes | |
+| yolov8m | detection | yes | |
+| yolov8l | detection | yes | |
+| yolov8x | detection | yes | |
+| yolo_nas_l | detection | no | Missing super_gradients package error |
+| yolo_nas_m | detection | no | Missing super_gradients package error |
+| yolo_nas_s | detection | no | Missing super_gradients package error |
+| rtdetr-l | detection | yes | |
+| rtdetr-x | detection | yes | |
+| sam_l | detection | no | Ultralytics Error |
+| FastSAM-x | detection | no | CUDA Problems |
+| deeplabv3_resnet101 | segmentation | no | CUDA Problems, Segmentation Problems |
+| yolov8x-seg | segmentation | yes | |
+
## How it works
### Initialization
The Vision-Node contains a Dictionary with all it's models. Depending on the model parameter it will initialize the correct model and weights.
-`
-self.model_dict = {
- "fasterrcnn_resnet50_fpn_v2": (fasterrcnn_resnet50_fpn_v2(weights=FasterRCNN_ResNet50_FPN_V2_Weights.DEFAULT), FasterRCNN_ResNet50_FPN_V2_Weights.DEFAULT, "detection", "pyTorch"),
- "fasterrcnn_mobilenet_v3_large_320_fpn": (fasterrcnn_mobilenet_v3_large_320_fpn(weights=FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.DEFAULT), FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.DEFAULT, "detection", "pyTorch"),
- "deeplabv3_resnet101": (deeplabv3_resnet101(weights=DeepLabV3_ResNet101_Weights.DEFAULT), DeepLabV3_ResNet101_Weights.DEFAULT, "segmentation", "pyTorch")
- }
-`
+`self.model_dict = {
+ "fasterrcnn_resnet50_fpn_v2":
+ (fasterrcnn_resnet50_fpn_v2(
+ weights=FasterRCNN_ResNet50_FPN_V2_Weights.DEFAULT),
+ FasterRCNN_ResNet50_FPN_V2_Weights.DEFAULT,
+ "detection",
+ "pyTorch"),
+ "fasterrcnn_mobilenet_v3_large_320_fpn":
+ (fasterrcnn_mobilenet_v3_large_320_fpn(
+ weights=FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.DEFAULT),
+ FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.DEFAULT,
+ "detection",
+ "pyTorch"),
+ "deeplabv3_resnet101":
+ (deeplabv3_resnet101(
+ weights=DeepLabV3_ResNet101_Weights.DEFAULT),
+ DeepLabV3_ResNet101_Weights.DEFAULT,
+ "segmentation",
+ "pyTorch"),
+ 'yolov8n': (YOLO, "yolov8n.pt", "detection", "ultralytics"),
+ 'yolov8s': (YOLO, "yolov8s.pt", "detection", "ultralytics"),
+ 'yolov8m': (YOLO, "yolov8m.pt", "detection", "ultralytics"),
+ 'yolov8l': (YOLO, "yolov8l.pt", "detection", "ultralytics"),
+ 'yolov8x': (YOLO, "yolov8x.pt", "detection", "ultralytics"),
+ 'yolo_nas_l': (NAS, "yolo_nas_l.pt", "detection", "ultralytics"),
+ 'yolo_nas_m': (NAS, "yolo_nas_m.pt", "detection", "ultralytics"),
+ 'yolo_nas_s': (NAS, "yolo_nas_s.pt", "detection", "ultralytics"),
+ 'rtdetr-l': (RTDETR, "rtdetr-l.pt", "detection", "ultralytics"),
+ 'rtdetr-x': (RTDETR, "rtdetr-x.pt", "detection", "ultralytics"),
+ 'yolov8x-seg': (YOLO, "yolov8x-seg.pt", "segmentation", "ultralytics"),
+ 'sam_l': (SAM, "sam_l.pt", "detection", "ultralytics"),
+ 'FastSAM-x': (FastSAM, "FastSAM-x.pt", "detection", "ultralytics")}`
### Core
@@ -61,18 +121,21 @@ This function is automatically triggered by the Camera-Subscriber of the Vision-
## Visualization
-The Vision-Node implements an ImagePublisher under the topic: "/paf//Center/segmented_image"
+The Vision-Node implements an ImagePublisher under the topic: "/paf/hero/Center/segmented_image"
+
+The Configuration File of RViz has been changed accordingly to display the published images alongside with the Camera.
-The Configuartion File of RViz has been changed accordingly to display the published images alongside with the Camera.
+The build in Visualization of the YOLO-Models works very well.
## Known Issues
### Time
-First experiments showed that the handle_camera_image function is way to slow to be used reliably. It takes around 1.5 seconds to handle one image.
+When running on YOLO-Models the Time issue is fixed because ultralytics has some way of managing the CUDA-Resources very well.
-Right now the Vision-Node is not using cuda due to cuda-memory-issues that couldn't be fixed right away.
+When running on different models, the CUDA-Error persists.
-The performance is expected to rise quite a bit when using cuda.
+## Segmentation
-Also their is lots more room for testing different models inside the Vision-Node to evualte their accuracy and time-performance.
+For some reason the create_segmentation mask function works in a standalone project, but not in the Vision-Node.
+I stopped debugging, because the YOLO-Models work way better and build a very good and stable baseline.
diff --git a/doc/06_perception/experiments/model_evaluation/README.md b/doc/06_perception/experiments/object-detection-model_evaluation/README.md
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/README.md
rename to doc/06_perception/experiments/object-detection-model_evaluation/README.md
diff --git a/doc/06_perception/experiments/model_evaluation/asset-copies/1619_PT_fasterrcnn_resnet50_fpn_v2.jpg b/doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_PT_fasterrcnn_resnet50_fpn_v2.jpg
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/asset-copies/1619_PT_fasterrcnn_resnet50_fpn_v2.jpg
rename to doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_PT_fasterrcnn_resnet50_fpn_v2.jpg
diff --git a/doc/06_perception/experiments/model_evaluation/asset-copies/1619_TF_faster-rcnn.jpg b/doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_TF_faster-rcnn.jpg
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/asset-copies/1619_TF_faster-rcnn.jpg
rename to doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_TF_faster-rcnn.jpg
diff --git a/doc/06_perception/experiments/model_evaluation/asset-copies/1619_yolo_nas_l.jpg b/doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolo_nas_l.jpg
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/asset-copies/1619_yolo_nas_l.jpg
rename to doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolo_nas_l.jpg
diff --git a/doc/06_perception/experiments/model_evaluation/asset-copies/1619_yolo_rtdetr_x.jpg b/doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolo_rtdetr_x.jpg
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/asset-copies/1619_yolo_rtdetr_x.jpg
rename to doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolo_rtdetr_x.jpg
diff --git a/doc/06_perception/experiments/model_evaluation/asset-copies/1619_yolov8x.jpg b/doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolov8x.jpg
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/asset-copies/1619_yolov8x.jpg
rename to doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolov8x.jpg
diff --git a/doc/06_perception/experiments/model_evaluation/asset-copies/1619_yolov8x_seg.jpg b/doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolov8x_seg.jpg
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/asset-copies/1619_yolov8x_seg.jpg
rename to doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolov8x_seg.jpg
diff --git a/doc/06_perception/experiments/model_evaluation/globals.py b/doc/06_perception/experiments/object-detection-model_evaluation/globals.py
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/globals.py
rename to doc/06_perception/experiments/object-detection-model_evaluation/globals.py
diff --git a/doc/06_perception/experiments/model_evaluation/pt.py b/doc/06_perception/experiments/object-detection-model_evaluation/pt.py
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/pt.py
rename to doc/06_perception/experiments/object-detection-model_evaluation/pt.py
diff --git a/doc/06_perception/experiments/model_evaluation/pylot.py b/doc/06_perception/experiments/object-detection-model_evaluation/pylot.py
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/pylot.py
rename to doc/06_perception/experiments/object-detection-model_evaluation/pylot.py
diff --git a/doc/06_perception/experiments/model_evaluation/requirements.txt b/doc/06_perception/experiments/object-detection-model_evaluation/requirements.txt
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/requirements.txt
rename to doc/06_perception/experiments/object-detection-model_evaluation/requirements.txt
diff --git a/doc/06_perception/experiments/model_evaluation/yolo.py b/doc/06_perception/experiments/object-detection-model_evaluation/yolo.py
similarity index 100%
rename from doc/06_perception/experiments/model_evaluation/yolo.py
rename to doc/06_perception/experiments/object-detection-model_evaluation/yolo.py
diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/README.md b/doc/06_perception/experiments/traffic-light-detection_evaluation/README.md
new file mode 100644
index 00000000..46e8e0af
--- /dev/null
+++ b/doc/06_perception/experiments/traffic-light-detection_evaluation/README.md
@@ -0,0 +1,43 @@
+# Evaluation of the PAF22 Traffic Light Detection
+
+In this experiment, the existing Traffic Light Detection from PAF22 has been tested.
+The goals was to be able to verify, that it is suitable for PAF23.
+
+## Model
+
+The architecture of the model is a Convolutional Neural Network (CNN) and it consists of the following layers:
+
+1. **Convolutional Layer 1**: This layer uses a 2D convolution over an input signal composed of several input planes, with in_channels input channels, 4 output channels, a kernel size of 5, and padding set to 'same'. This means the output size is the same as the input size.
+2. **Batch Normalization Layer**: This layer applies Batch Normalization over a 4D input (a mini-batch of 2D inputs with additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift.
+3. **Convolutional Layer 2**: This layer is similar to the first convolutional layer but it takes the output of the first layer (4 channels) as input.
+4. **Max Pooling Layer 1**: This layer uses a 2D max pooling over an input signal composed of several input planes, with a kernel size of (2, 2).
+5. **Convolutional Layer 3**: This layer is similar to the previous convolutional layers but it has a kernel size of 3.
+6. **Max Pooling Layer 2**: This layer is similar to the first max pooling layer.
+7. **Convolutional Layer 4**: This layer is similar to the previous convolutional layers.
+8. **Max Pooling Layer 3**: This layer is similar to the previous max pooling layers.
+9. **Flatten Layer**: This layer flattens the input by removing the spatial dimensions.
+10. **Dropout Layer**: This layer randomly zeroes some of the elements of the input tensor with probability p=0.3 using samples from a Bernoulli distribution.
+11. **Linear Layer**: This layer applies a linear transformation to the incoming data. It has 64 input features and num_classes output features.
+
+## Dataset
+
+The existing dataset of PAF22 consists of 2340 images (combined) of the categories red, yellow, green, backside. There are also 382 validation images (combined).
+
+The data can be accessed through DVC.
+
+## Training
+
+Running the training with `dvc exp run` in the traffic light detection directory, results in a trained model with >99% accuracy & validation.
+
+## Examples
+
+Result | Large | Small |
+|-----------|----------|----------|
+Green | ![Green-Large](assets/green_4.png) | ![Green-Small](assets/green_22.jpg)
+Yellow | ![Yellow-Large](assets/yellow_1.png) | ![Yellow-Small](assets/yellow_18.jpg)
+Red | ![Red-Large](assets/red_10.png) | ![Red-Small](assets/red_20.png)
+Back | ![Back-Large](assets/back_1.png) | ![Back-Small](assets/back_14.jpg)
+
+## Verdict
+
+The high accuracy and manual testing of the above example images verified, that the existing PAF22 traffic light detection model can be used for PAF23.
diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/back_1.png b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/back_1.png
new file mode 100644
index 00000000..b0a6a2f6
Binary files /dev/null and b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/back_1.png differ
diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/back_14.jpg b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/back_14.jpg
new file mode 100644
index 00000000..8fa865b1
Binary files /dev/null and b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/back_14.jpg differ
diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/green_22.jpg b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/green_22.jpg
new file mode 100644
index 00000000..5df96d68
Binary files /dev/null and b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/green_22.jpg differ
diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/green_4.png b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/green_4.png
new file mode 100644
index 00000000..a65fccf8
Binary files /dev/null and b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/green_4.png differ
diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/red_10.png b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/red_10.png
new file mode 100644
index 00000000..c7192ae2
Binary files /dev/null and b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/red_10.png differ
diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/red_20.png b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/red_20.png
new file mode 100644
index 00000000..365e7529
Binary files /dev/null and b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/red_20.png differ
diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/yellow_1.png b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/yellow_1.png
new file mode 100644
index 00000000..b39e5182
Binary files /dev/null and b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/yellow_1.png differ
diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/yellow_18.jpg b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/yellow_18.jpg
new file mode 100644
index 00000000..12881d31
Binary files /dev/null and b/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/yellow_18.jpg differ