From 6c9b3da4bcb8475170e0f2a0822ba638f92660d0 Mon Sep 17 00:00:00 2001 From: JulianTrommer Date: Tue, 8 Oct 2024 15:11:01 +0200 Subject: [PATCH 1/2] Removed PyCharm & changed docstring settings --- .markdownlint.yaml | 2 +- build/docker/agent/Dockerfile | 2 +- build/docker/agent/Dockerfile_Submission | 2 +- doc/02_development/04_coding_style.md | 4 - doc/02_development/06_pycharm_setup.md | 81 ------------------- doc/02_development/08_project_management.md | 6 -- .../templates/template_class.py | 24 ++---- .../templates/template_wiki_page.md | 36 +++------ 8 files changed, 21 insertions(+), 136 deletions(-) delete mode 100644 doc/02_development/06_pycharm_setup.md diff --git a/.markdownlint.yaml b/.markdownlint.yaml index b49b73db..fe64f4e8 100755 --- a/.markdownlint.yaml +++ b/.markdownlint.yaml @@ -6,7 +6,7 @@ MD013: tables: false MD004: - style: "consistent" + style: dash MD051: false diff --git a/build/docker/agent/Dockerfile b/build/docker/agent/Dockerfile index 107ece04..1917b372 100644 --- a/build/docker/agent/Dockerfile +++ b/build/docker/agent/Dockerfile @@ -139,7 +139,7 @@ ENV CARLA_SIM_HOST=localhost ENV CARLA_SIM_WAIT_SECS=15 ENV SCENARIO_RUNNER_PATH=/opt/scenario_runner -# setup python path for PyCharm integration +# setup python path RUN echo /catkin_ws/install/lib/python3/dist-packages >> /home/$USERNAME/.local/lib/python3.8/site-packages/carla.pth && \ echo /catkin_ws/devel/lib/python3/dist-packages >> /home/$USERNAME/.local/lib/python3.8/site-packages/carla.pth && \ echo /opt/ros/noetic/lib/python3/dist-packages >> /home/$USERNAME/.local/lib/python3.8/site-packages/carla.pth && \ diff --git a/build/docker/agent/Dockerfile_Submission b/build/docker/agent/Dockerfile_Submission index a329247e..128a8bd8 100644 --- a/build/docker/agent/Dockerfile_Submission +++ b/build/docker/agent/Dockerfile_Submission @@ -141,7 +141,7 @@ ENV CARLA_SIM_HOST=localhost ENV CARLA_SIM_WAIT_SECS=15 ENV SCENARIO_RUNNER_PATH=/opt/scenario_runner -# setup python path for PyCharm integration +# setup python path RUN echo /catkin_ws/install/lib/python3/dist-packages >> /home/$USERNAME/.local/lib/python3.8/site-packages/carla.pth && \ echo /catkin_ws/devel/lib/python3/dist-packages >> /home/$USERNAME/.local/lib/python3.8/site-packages/carla.pth && \ echo /opt/ros/noetic/lib/python3/dist-packages >> /home/$USERNAME/.local/lib/python3.8/site-packages/carla.pth && \ diff --git a/doc/02_development/04_coding_style.md b/doc/02_development/04_coding_style.md index 0455af0c..390d6859 100644 --- a/doc/02_development/04_coding_style.md +++ b/doc/02_development/04_coding_style.md @@ -20,10 +20,6 @@ VSCode Extensions: - autoDostring - Python Docstring Generator by Nils Werner -To get the ReST format like in Pycharm: - -- Go to Extension setting and change it under `Auto Doctring:Docstring Format` to `sphinx-notypes` - --- - [Coding style guidelines](#coding-style-guidelines) diff --git a/doc/02_development/06_pycharm_setup.md b/doc/02_development/06_pycharm_setup.md deleted file mode 100644 index 7dc4e8d1..00000000 --- a/doc/02_development/06_pycharm_setup.md +++ /dev/null @@ -1,81 +0,0 @@ -# PyCharm Professional - -(Kept from previous group [paf22]) - -For a seamless development experience, we recommend the use of [PyCharm Professional](https://www.jetbrains.com/pycharm/). - -## Getting an education license - -To use PyCharm Professional, you need a license. -Fortunately, all students of Uni-Augsburg can get a free education license using their @uni-a.de mail-address. - -For this, follow the process on the Jetbrains website: [Request Education License](https://www.jetbrains.com/shop/eform/students). - -After completing this process, you can continue to install PyCharm Professional - -## Installing PyCharm professional - -### Jetbrains Toolbox - -The easiest way to install PyCharm Professional and keep it up to date, is to use [Jetbrains Toolbox](https://www.jetbrains.com/toolbox-app/). - -For easy installation, there is a [convenience script](https://github.com/nagygergo/jetbrains-toolbox-install), -that downloads JetBrains toolbox and installs it to the right folder. - -```shell -sudo curl -fsSL https://raw.githubusercontent.com/nagygergo/jetbrains-toolbox-install/master/jetbrains-toolbox.sh | bash -``` - -After this you can open the toolbox with the following command: - -```shell -jetbrains-toolbox -``` - -The interface should open, and you can easily install _PyCharm Professional_. - -### Setting up docker-compose standalone binary - -To use the docker-compose integration of PyCharm Professional, -you additionally need to install the standalone version of [docker-compose](https://docs.docker.com/compose/install/other/) - -```shell -# Download binary -sudo curl -SL https://github.com/docker/compose/releases/download/v2.12.2/docker-compose-linux-x86_64 -o /usr/local/bin/docker-compose - -# Make binary executable -sudo chmod +x /usr/local/bin/docker-compose - -# Create symbolic link to make the binary discoverable -sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose - -``` - -### Setting up the paf22 project with docker-compose interpreter - -After opening and activating PyCharm Professional with your education license, open the existing paf22 project-folder in PyCharm Professional. - -The last step is to set up the docker-compose integration. -For this, please follow this [official guide](https://www.jetbrains.com/help/pycharm/using-docker-compose-as-a-remote-interpreter.html#docker-compose-remote), while selecting `./build/docker-compose.yml` as configuration file and `agent` as service. - -After the initial indexing, PyCharm will provide intelligent code feedback and refactoring options in Python. - -## 🚨 Common Problems - -* Error when committing via PyCharm (error message may vary): - - ```shell - ... - .git/hooks/pre-commit: 9: ././build/hooks/pre-commit.d/20-markdown.sh: b5: not found - ``` - - This may happen if you installed b5 in your conda environment instead of the native one. - To fix this, install b5 in your native environment: - - ```shell - conda deactivate - sudo apt-get install python pip - pip install b5 - ``` - - After that, the commit should work! diff --git a/doc/02_development/08_project_management.md b/doc/02_development/08_project_management.md index c82c2035..97821432 100644 --- a/doc/02_development/08_project_management.md +++ b/doc/02_development/08_project_management.md @@ -86,12 +86,6 @@ CARLA simulator crashes on startup on your machine. To create a pull request, go to the [branches overview](https://github.com/ll7/paf22/branches) and select ``New Pull Request`` for the branch you want to create a PR for. ![img.png](../00_assets/branch_overview.png) -Alternatively you can create a PR directly from PyCharm using the ``Pull Request`` tab on the sidebar. - -![img.png](../00_assets/Pycharm_PR.png) - -For completing the pull request, fill out the template that opens up automatically. - Merge the pull request after the review process is complete and all the feedback from the reviewer has been worked in. For more information about the review process, see [Review process](./07_review_guideline.md). diff --git a/doc/02_development/templates/template_class.py b/doc/02_development/templates/template_class.py index 9db00607..164c3585 100644 --- a/doc/02_development/templates/template_class.py +++ b/doc/02_development/templates/template_class.py @@ -71,29 +71,15 @@ def test_function3(self): # inline comment # 6. Docstrings # ############################# def test_function4(self, param1, param2): - # This docstring style is supported by Sphinx and helps with automated API documentation creation, automatically created by PyCharm - """ - This is the description of the function. + # This docstring style is the default google style of the autoDocstring extension and helps with automated API documentation creation + """This is the description of the function. - :param param1: first parameter - :param param2: second parameter - :return: return value(s) + Args: + param1 (_type_): _description_ + param2 (_type_): _description_ """ pass - def test_function5(self, param1, param2): - # This docstring style is supported by Sphinx and helps with automated API documentation creation, automatically created by VSCode extension autoDocstring - # VSCode Extentsion: autoDocstring- Python Docstring Generator by Nils Werner - # To get the ReST format like in Pycharm - # Go to Extension setting and change it under `Auto Doctring:Docstring Format` to `sphinx-notypes` - """_summary_ - - :param param1: _description_ - :param param2: _description_ - :return: _description_ - """ - return param1 - # main function of the class def main(self): print("Hello World") diff --git a/doc/02_development/templates/template_wiki_page.md b/doc/02_development/templates/template_wiki_page.md index 68deb358..9ef7fd3c 100644 --- a/doc/02_development/templates/template_wiki_page.md +++ b/doc/02_development/templates/template_wiki_page.md @@ -20,34 +20,24 @@ VSCode Extensions: --- -How to generate a TOC in VSCode and Pycharm: +How to generate a TOC in VSCode: VSCode: -1. Install Markdown All in One via Extensions -2. ``Ctrl+Shift+P`` -3. Command "Create Table of Contents" - -Cosmetic change: Markdown All in One uses `-` as unordered list indicator, to change it to `*` like in Pycharm - -Go to Extension setting and change it under `Markdown>Extension>Toc>Unordered List:Marker` - -Pycharm: - -1. ``Alt+Ins`` -2. Select Table of Contents -3. To update Table of Contents follow Step 1. and select Update Table of Contents +1. ``Ctrl+Shift+P`` +2. Command "Create Table of Contents" -* [Title of wiki page](#title-of-wiki-page) - * [Author](#author) - * [Date](#date) - * [Prerequisite](#prerequisite) - * [Cheat Sheet](#cheat-sheet) - * [Basics](#basics) - * [Extended](#extended) - * [more Content](#more-content) - * [Sources](#sources) +- [Title of wiki page](#title-of-wiki-page) + - [Author](#author) + - [Date](#date) + - [Prerequisite](#prerequisite) + - [Cheat Sheet](#cheat-sheet) + - [Basics](#basics) + - [Extended](#extended) + - [My Great Heading {#custom-id}](#my-great-heading-custom-id) + - [more Content](#more-content) + - [Sources](#sources) ## Cheat Sheet From 87178de2b7c5f4b1a7f14b49b74a94175c6f8dbd Mon Sep 17 00:00:00 2001 From: JulianTrommer Date: Tue, 8 Oct 2024 15:22:44 +0200 Subject: [PATCH 2/2] Changed docs to adhere to new markdown linting --- .../templates/template_wiki_page.md | 14 +- .../templates/template_wiki_page_empty.md | 18 +- doc/03_research/01_acting/01_basics_acting.md | 100 ++--- .../01_acting/02_implementation_acting.md | 56 +-- .../01_acting/03_paf21_1_acting.md | 30 +- .../01_acting/05_autoware_acting.md | 30 +- doc/03_research/01_acting/Readme.md | 14 +- .../03_first_implementation_plan.md | 67 +-- doc/03_research/02_perception/Readme.md | 16 +- .../03_planning/00_paf22/02_basics.md | 242 +++++------ .../03_planning/00_paf22/03_Implementation.md | 73 ++-- .../00_paf22/04_decision_making.md | 214 +++++----- .../00_paf22/05_Navigation_Data.md | 38 +- .../00_paf22/06_state_machine_design.md | 131 +++--- .../03_planning/00_paf22/07_OpenDrive.md | 232 +++++------ .../07_reevaluation_desicion_making.md | 49 ++- doc/03_research/03_planning/Readme.md | 4 +- .../02_informations_from_leaderboard.md | 118 +++--- .../04_requirements/03_requirements.md | 69 ++-- .../04_requirements/04_use_cases.md | 386 ++++++++++++------ doc/03_research/04_requirements/Readme.md | 6 +- doc/03_research/Readme.md | 8 +- doc/06_perception/02_dataset_structure.md | 20 +- .../03_lidar_distance_utility.md | 10 +- doc/06_perception/04_efficientps.md | 44 +- 25 files changed, 1068 insertions(+), 921 deletions(-) diff --git a/doc/02_development/templates/template_wiki_page.md b/doc/02_development/templates/template_wiki_page.md index 9ef7fd3c..8679286f 100644 --- a/doc/02_development/templates/template_wiki_page.md +++ b/doc/02_development/templates/template_wiki_page.md @@ -16,7 +16,7 @@ Josef Kircher VSCode Extensions: -* Markdown All in One by Yu Zhang (for TOC) +- Markdown All in One by Yu Zhang (for TOC) --- @@ -74,9 +74,9 @@ Ordered List --- Unordered List -* First item -* Second item -* Third item +- First item +- Second item +- Third item --- Code @@ -142,10 +142,10 @@ Strikethrough Task List -* [x] Write the press release -* [ ] Update the website +- [x] Write the press release +- [ ] Update the website -* [ ] Contact the media +- [ ] Contact the media --- diff --git a/doc/02_development/templates/template_wiki_page_empty.md b/doc/02_development/templates/template_wiki_page_empty.md index bd0eb1ff..2992fd64 100644 --- a/doc/02_development/templates/template_wiki_page_empty.md +++ b/doc/02_development/templates/template_wiki_page_empty.md @@ -16,18 +16,20 @@ Josef Kircher VSCode Extensions: -* Markdown All in One by Yu Zhang (for TOC) +- Markdown All in One by Yu Zhang (for TOC) --- + -* [Title of wiki page](#title-of-wiki-page) - * [Author](#author) - * [Date](#date) - * [Prerequisite](#prerequisite) - * [Some Content](#some-content) - * [more Content](#more-content) - * [Sources](#sources) +- [Title of wiki page](#title-of-wiki-page) + - [Author](#author) + - [Date](#date) + - [Prerequisite](#prerequisite) + - [Some Content](#some-content) + - [more Content](#more-content) + - [Sources](#sources) + ## Some Content ## more Content diff --git a/doc/03_research/01_acting/01_basics_acting.md b/doc/03_research/01_acting/01_basics_acting.md index deeab2e1..1b6b41f2 100644 --- a/doc/03_research/01_acting/01_basics_acting.md +++ b/doc/03_research/01_acting/01_basics_acting.md @@ -19,38 +19,38 @@ Gabriel Schwald, Julian Graf The job of this domain is to translate a preplanned trajectory into actual steering controls for the vehicle. -* safety: - * never exceeding vehicle limits - * never exceeding speed limits - * never leaf path -* driving comfort? +- safety: + - never exceeding vehicle limits + - never exceeding speed limits + - never leaf path +- driving comfort? ## Solutions from old PAF projects ### [Paf 20/1](https://github.com/ll7/psaf1/tree/master/psaf_ros/psaf_steering) -* [carla_ackermann_control](https://carla.readthedocs.io/projects/ros-bridge/en/latest/carla_ackermann_control/) modified for [twist-msgs](http://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/Twist.html) -* input: [twist-msgs](http://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/Twist.html) (for velocity) -* velocity control: PID -* lateral control: PD (heading error) +- [carla_ackermann_control](https://carla.readthedocs.io/projects/ros-bridge/en/latest/carla_ackermann_control/) modified for [twist-msgs](http://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/Twist.html) +- input: [twist-msgs](http://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/Twist.html) (for velocity) +- velocity control: PID +- lateral control: PD (heading error) ### [Paf 21/1](https://github.com/ll7/paf21-1/wiki/Vehicle-Controller) -* input: waypoints -* curve detection: returns distance to next curve -* calculation of max curve speed as sqrt(friction_coefficient x gravity_accel x radius) -* in Curve: [naive Controller](###Pure_Pursuit) -* on straights: [Stanley Controller](###Stanley) -* interface to rosbridge +- input: waypoints +- curve detection: returns distance to next curve +- calculation of max curve speed as sqrt(friction_coefficient x gravity_accel x radius) +- in Curve: [naive Controller](###Pure_Pursuit) +- on straights: [Stanley Controller](###Stanley) +- interface to rosbridge ### [Paf 20/2](https://github.com/ll7/psaf2) and [Paf 21/2](https://github.com/ll7/paf21-2/tree/main/paf_ros/paf_actor#readme) -* input: odometry(position and velocity with uncertainty), local path -* lateral: [Stanley Controller](###Stanley) -* speed controller: pid -* ACC (Adaptive Cruise Control): (speed, distance) -> PID -* Unstuck-Routine (drive backwards) -* Emergency Modus: fastest possible braking ([Tests](https://github.com/ll7/paf21-2/blob/main/docs/paf_actor/backwards/braking.md) -> handbrake with throttle, 30° steering and reverse) +- input: odometry(position and velocity with uncertainty), local path +- lateral: [Stanley Controller](###Stanley) +- speed controller: pid +- ACC (Adaptive Cruise Control): (speed, distance) -> PID +- Unstuck-Routine (drive backwards) +- Emergency Modus: fastest possible braking ([Tests](https://github.com/ll7/paf21-2/blob/main/docs/paf_actor/backwards/braking.md) -> handbrake with throttle, 30° steering and reverse) ## Lateral control @@ -87,11 +87,11 @@ $$ \delta(t) = arctan(2L*\frac{sin(\alpha)}{K_d*v}) $$ -* simple controller -* ignores dynamic forces -* assumes no-slip condition -* possible improvement: vary the look-ahead distance based on vehicle velocity -* not really suited for straights, because ICR moves towards infinity this case +- simple controller +- ignores dynamic forces +- assumes no-slip condition +- possible improvement: vary the look-ahead distance based on vehicle velocity +- not really suited for straights, because ICR moves towards infinity this case ### Stanley @@ -118,7 +118,7 @@ The basic idea of MPC is to model the future behavior of the vehicle and compute ![MPC Controller](../../00_assets/research_assets/mpc.png) *source: [[5]](https://dingyan89.medium.com/three-methods-of-vehicle-lateral-control-pure-pursuit-stanley-and-mpc-db8cc1d32081)* -* cost function can be designed to account for driving comfort +- cost function can be designed to account for driving comfort ### [SMC](https://en.wikipedia.org/wiki/Sliding_mode_control) (sliding mode control) @@ -128,10 +128,10 @@ Real implementations of sliding mode control approximate theoretical behavior wi ![chattering](../../00_assets/research_assets/chattering.gif) *source: [[9]](https://ieeexplore.ieee.org/document/1644542)* -* simple -* robust -* stabile -* disadvantage: chattering -> controller is ill-suited for this application +- simple +- robust +- stabile +- disadvantage: chattering -> controller is ill-suited for this application Sources: @@ -155,20 +155,20 @@ PID: already implemented in [ROS](http://wiki.ros.org/pid) (and [CARLA](https:// Further information: -* +- ## Interface **subscribes** to: -* current position +- current position ([nav_msgs/Odometry Message](http://docs.ros.org/en/noetic/api/nav_msgs/html/msg/Odometry.html)) from Perception? -* path ([nav_msgs/Path Message](https://docs.ros.org/en/api/nav_msgs/html/msg/Path.html)) or target point ([geometry_msgs/Pose.msg](https://docs.ros.org/en/api/geometry_msgs/html/msg/Pose.html)) -* (maximal) velocity to drive -* (distance and speed of vehicle to follow) -* (commands for special routines) -* (Distance to obstacles for turning/min turning radius) -* (Road conditions) +- path ([nav_msgs/Path Message](https://docs.ros.org/en/api/nav_msgs/html/msg/Path.html)) or target point ([geometry_msgs/Pose.msg](https://docs.ros.org/en/api/geometry_msgs/html/msg/Pose.html)) +- (maximal) velocity to drive +- (distance and speed of vehicle to follow) +- (commands for special routines) +- (Distance to obstacles for turning/min turning radius) +- (Road conditions) **publishes**: [CarlaEgoVehicleControl.msg](https://carla.readthedocs.io/projects/ros-bridge/en/latest/ros_msgs/#carlaegovehiclecontrolmsg) or [ackermann_msgs/AckermannDrive.msg](https://docs.ros.org/en/api/ackermann_msgs/html/msg/AckermannDrive.html) @@ -177,10 +177,10 @@ Further information: In the [CarlaEgoVehicleInfo.msg](https://carla.readthedocs.io/projects/ros-bridge/en/latest/ros_msgs/#carlaegovehicleinfomsg) we get a [CarlaEgoVehicleInfoWheel.msg](https://carla.readthedocs.io/projects/ros-bridge/en/latest/ros_msgs/#carlaegovehicleinfowheelmsg) which provides us with -* tire_friction (a scalar value that indicates the friction of the wheel) -* max_steer_angle (the maximum angle in degrees that the wheel can steer) -* max_brake_torque (the maximum brake torque in Nm) -* max_handbrake_torque (the maximum handbrake torque in Nm) +- tire_friction (a scalar value that indicates the friction of the wheel) +- max_steer_angle (the maximum angle in degrees that the wheel can steer) +- max_brake_torque (the maximum brake torque in Nm) +- max_handbrake_torque (the maximum handbrake torque in Nm) The max curve speed can be calculated as sqrt(**friction_coefficient** *gravity_accel* curve_radius). @@ -193,12 +193,12 @@ For debugging purposes the vehicles path can be visualized using [carlaviz](http ## Additional functionality (open for discussion) -* ACC (Adaptive Cruise Control): reduces speed to keep set distance to vehicle in front (see also [cruise control technology review](https://www.sciencedirect.com/science/article/pii/S004579069700013X), +- ACC (Adaptive Cruise Control): reduces speed to keep set distance to vehicle in front (see also [cruise control technology review](https://www.sciencedirect.com/science/article/pii/S004579069700013X), [a comprehensive review of the development of adaptive cruise control systems](https://www.researchgate.net/publication/245309633_A_comprehensive_review_of_the_development_of_adaptive_cruise_control_systems), [towards an understanding of adaptive cruise control](https://www.sciencedirect.com/science/article/pii/S0968090X0000022X), [Encyclopedia of Systems and Control](https://dokumen.pub/encyclopedia-of-systems-and-control-2nd-ed-2021-3030441830-9783030441838.html)) -* emergency braking: stops the car as fast as possible -* emergency braking assistant: uses Lidar as proximity sensor and breaks if it would come to a collision without breaking -* parallel parking: executes [fixed parking sequence](https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5705869) to parallel park vehicle in given parking space -* U-Turn: performs u-turn -* Driving backwards: might a need different controller configuration -* Unstuck routine: performs fixed routine (e.g. driving backwards) if the car hasn't moved in a while +- emergency braking: stops the car as fast as possible +- emergency braking assistant: uses Lidar as proximity sensor and breaks if it would come to a collision without breaking +- parallel parking: executes [fixed parking sequence](https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5705869) to parallel park vehicle in given parking space +- U-Turn: performs u-turn +- Driving backwards: might a need different controller configuration +- Unstuck routine: performs fixed routine (e.g. driving backwards) if the car hasn't moved in a while diff --git a/doc/03_research/01_acting/02_implementation_acting.md b/doc/03_research/01_acting/02_implementation_acting.md index 0d7a216e..dd7b45d5 100644 --- a/doc/03_research/01_acting/02_implementation_acting.md +++ b/doc/03_research/01_acting/02_implementation_acting.md @@ -15,14 +15,14 @@ Gabriel Schwald --- -* [Requirements and challenges for an acting implementation](#requirements-and-challenges-for-an-acting-implementation) - * [Authors](#authors) - * [Date](#date) - * [Planned basic implementation of the Acting domain](#planned-basic-implementation-of-the-acting-domain) - * [List of basic functions](#list-of-basic-functions) - * [List of Inputs/Outputs](#list-of-inputsoutputs) - * [Challenges](#challenges) - * [Next steps](#next-steps) +- [Requirements and challenges for an acting implementation](#requirements-and-challenges-for-an-acting-implementation) + - [Authors](#authors) + - [Date](#date) + - [Planned basic implementation of the Acting domain](#planned-basic-implementation-of-the-acting-domain) + - [List of basic functions](#list-of-basic-functions) + - [List of Inputs/Outputs](#list-of-inputsoutputs) + - [Challenges](#challenges) + - [Next steps](#next-steps) This document sums up all functions already agreed upon in [#24](https://github.com/ll7/paf22/issues/24) regarding [acting](../01_acting/01_acting.md), that could be implemented in the next sprint. @@ -36,34 +36,34 @@ These goals lead to the following requirements: ## List of basic functions -* Longitudinal control - * PID controller -* Lateral control - * Pure Pursuit controller - * Stanley controller +- Longitudinal control + - PID controller +- Lateral control + - Pure Pursuit controller + - Stanley controller ## List of Inputs/Outputs -* Subscribes to: - * [nav_msgs/Odometry Message](http://docs.ros.org/en/noetic/api/nav_msgs/html/msg/Odometry.html) : to get the current position and heading - * [nav_msgs/Path Message](https://docs.ros.org/en/api/nav_msgs/html/msg/Path.html) : to get the current trajectory - * emergency breaking msg : to initiate emergency breaking - * speed limit msg : to get the maximum velocity -* Publishes: - * [CarlaEgoVehicleControl.msg](https://carla.readthedocs.io/projects/ros-bridge/en/latest/ros_msgs/#carlaegovehiclecontrolmsg) : to actually control the vehicles throttle, steering, ... +- Subscribes to: + - [nav_msgs/Odometry Message](http://docs.ros.org/en/noetic/api/nav_msgs/html/msg/Odometry.html) : to get the current position and heading + - [nav_msgs/Path Message](https://docs.ros.org/en/api/nav_msgs/html/msg/Path.html) : to get the current trajectory + - emergency breaking msg : to initiate emergency breaking + - speed limit msg : to get the maximum velocity +- Publishes: + - [CarlaEgoVehicleControl.msg](https://carla.readthedocs.io/projects/ros-bridge/en/latest/ros_msgs/#carlaegovehiclecontrolmsg) : to actually control the vehicles throttle, steering, ... ## Challenges A short list of challenges for the implementation of a basic acting domain and how they these could be tackled based on the requirements mentioned above. -* The vehicle needs to know its own position => [nav_msgs/Odometry Message](http://docs.ros.org/en/noetic/api/nav_msgs/html/msg/Odometry.html) or [GNSS](https://carla.readthedocs.io/en/latest/ref_sensors/#gnss-sensor) sensor -* The vehicle needs to know its own velocity => can be calculated from last/current position and time or the [speedometer](https://leaderboard.carla.org/#map-track) pseudosensor can be used -* The vehicle needs to know its planned trajectory => [nav_msgs/Path Message](https://docs.ros.org/en/api/nav_msgs/html/msg/Path.html) this trajectory may need to be updated to accommodate obstacles -* Longitudinal control => a simple PID controller should suffice -* lateral control => Pure Pursuit as well as Stanley controller should be implemented, following tests can show, where to use each controller. -* additional features: - * emergency breaking => this command is supposed to bypass longitudinal and lateral controllers (and should use the bug discoverd by [paf21-2](https://github.com/ll7/paf21-2/tree/main/paf_ros/paf_actor#bugabuses)) - * additional functionality mostly should be added here ... +- The vehicle needs to know its own position => [nav_msgs/Odometry Message](http://docs.ros.org/en/noetic/api/nav_msgs/html/msg/Odometry.html) or [GNSS](https://carla.readthedocs.io/en/latest/ref_sensors/#gnss-sensor) sensor +- The vehicle needs to know its own velocity => can be calculated from last/current position and time or the [speedometer](https://leaderboard.carla.org/#map-track) pseudosensor can be used +- The vehicle needs to know its planned trajectory => [nav_msgs/Path Message](https://docs.ros.org/en/api/nav_msgs/html/msg/Path.html) this trajectory may need to be updated to accommodate obstacles +- Longitudinal control => a simple PID controller should suffice +- lateral control => Pure Pursuit as well as Stanley controller should be implemented, following tests can show, where to use each controller. +- additional features: + - emergency breaking => this command is supposed to bypass longitudinal and lateral controllers (and should use the bug discoverd by [paf21-2](https://github.com/ll7/paf21-2/tree/main/paf_ros/paf_actor#bugabuses)) + - additional functionality mostly should be added here ... ## Next steps diff --git a/doc/03_research/01_acting/03_paf21_1_acting.md b/doc/03_research/01_acting/03_paf21_1_acting.md index 602205d9..c76dad25 100644 --- a/doc/03_research/01_acting/03_paf21_1_acting.md +++ b/doc/03_research/01_acting/03_paf21_1_acting.md @@ -2,34 +2,34 @@ ## Inputs -* waypoints of the planned route -* general odometry of the vehicle +- waypoints of the planned route +- general odometry of the vehicle ## Curve Detection -* Can detect curves on the planned trajectory -* Calculates the speed in which to drive the detected Curve +- Can detect curves on the planned trajectory +- Calculates the speed in which to drive the detected Curve ![Curve](../../00_assets/research_assets/curve_detection_paf21_1.png) ## Speed Control -* [CARLA Ackermann Control](https://carla.readthedocs.io/projects/ros-bridge/en/latest/carla_ackermann_control/) -* Speed is forwarded to the CARLA vehicle via Ackermann_message, which already includes a PID controller for safe driving/accelerating etc. -* no further controlling needed -> speed can be passed as calculated +- [CARLA Ackermann Control](https://carla.readthedocs.io/projects/ros-bridge/en/latest/carla_ackermann_control/) +- Speed is forwarded to the CARLA vehicle via Ackermann_message, which already includes a PID controller for safe driving/accelerating etc. +- no further controlling needed -> speed can be passed as calculated ## Steering Control ### Straight Trajectories -* **Stanley Steering Controller** - * Calculates steering angle from offset and heading error - * includes PID controller +- **Stanley Steering Controller** + - Calculates steering angle from offset and heading error + - includes PID controller ![Stanley Controller](../../00_assets/research_assets/stanley_paf21_1.png) ### Detected Curves -* **Naive Steering Controller** (close to pure pursuit) - * uses Vehicle Position + Orientation + Waypoints - * Calculate direction to drive to as vector - * direction - orientation = Steering angle at each point in time - * speed is calculated in Curve Detection and taken as is +- **Naive Steering Controller** (close to pure pursuit) + - uses Vehicle Position + Orientation + Waypoints + - Calculate direction to drive to as vector + - direction - orientation = Steering angle at each point in time + - speed is calculated in Curve Detection and taken as is diff --git a/doc/03_research/01_acting/05_autoware_acting.md b/doc/03_research/01_acting/05_autoware_acting.md index 8ba6b880..bb84218f 100644 --- a/doc/03_research/01_acting/05_autoware_acting.md +++ b/doc/03_research/01_acting/05_autoware_acting.md @@ -2,11 +2,11 @@ ## Inputs -* Odometry (position and orientation, from Localization module) -* Trajectory (output of Planning) -* Steering Status (current steering of vehicle, from Vehicle Interface) -* Actuation Status (acceleration, steering, brake actuations, from Vehicle Interface) -* (“vehicle signal commands” directly into Vehicle Interface -> Handbrake, Hazard Lights, Headlights, Horn, Stationary Locking, Turn Indicators, Wipers etc.) +- Odometry (position and orientation, from Localization module) +- Trajectory (output of Planning) +- Steering Status (current steering of vehicle, from Vehicle Interface) +- Actuation Status (acceleration, steering, brake actuations, from Vehicle Interface) +- (“vehicle signal commands” directly into Vehicle Interface -> Handbrake, Hazard Lights, Headlights, Horn, Stationary Locking, Turn Indicators, Wipers etc.) ### General Component Architecture @@ -18,19 +18,19 @@ ## [Trajectory Follower](https://autowarefoundation.github.io/autoware.universe/main/control/trajectory_follower_base/) -* generates control command to follow reference trajectory from Planning -* computes lateral (steering) and longitudinal (velocity) controls separately -* lateral controller: mpc (model predictive) or pure pursuit -* longitudinal: “currently only” PID controller +- generates control command to follow reference trajectory from Planning +- computes lateral (steering) and longitudinal (velocity) controls separately +- lateral controller: mpc (model predictive) or pure pursuit +- longitudinal: “currently only” PID controller ## Vehicle Command Gate -* filters control commands to prevent abnormal values -* sends commands to [Vehicle Interface](https://autowarefoundation.github.io/autoware-documentation/main/design/autoware-interfaces/components/vehicle-interface/) +- filters control commands to prevent abnormal values +- sends commands to [Vehicle Interface](https://autowarefoundation.github.io/autoware-documentation/main/design/autoware-interfaces/components/vehicle-interface/) ## Outputs -* steering angle -* steering torque -* speed -* acceleration +- steering angle +- steering torque +- speed +- acceleration diff --git a/doc/03_research/01_acting/Readme.md b/doc/03_research/01_acting/Readme.md index b1e75e53..5bc58da5 100644 --- a/doc/03_research/01_acting/Readme.md +++ b/doc/03_research/01_acting/Readme.md @@ -2,10 +2,10 @@ This folder contains all the results of our research on acting: -* **PAF22** -* [Basics](./01_basics_acting.md) -* [Implementation](./02_implementation_acting.md) -* **PAF23** -* [PAF21_1 Acting](./03_paf21_1_acting.md) -* [PAF21_2 Acting & Pylot Control](./04_paf21_2_and_pylot_acting.md) -* [Autoware Control](./05_autoware_acting.md) +- **PAF22** +- [Basics](./01_basics_acting.md) +- [Implementation](./02_implementation_acting.md) +- **PAF23** +- [PAF21_1 Acting](./03_paf21_1_acting.md) +- [PAF21_2 Acting & Pylot Control](./04_paf21_2_and_pylot_acting.md) +- [Autoware Control](./05_autoware_acting.md) diff --git a/doc/03_research/02_perception/03_first_implementation_plan.md b/doc/03_research/02_perception/03_first_implementation_plan.md index 77498f1c..65ddbbc2 100644 --- a/doc/03_research/02_perception/03_first_implementation_plan.md +++ b/doc/03_research/02_perception/03_first_implementation_plan.md @@ -15,23 +15,24 @@ Marco Riedenauer --- -* [First Implementation Plan](#first-implementation-plan) - * [Authors](#authors) - * [Date](#date) - * [Overview](#overview) - * [Panoptic Segmentation](#panoptic-segmentation) - * [Things and Stuff](#things-and-stuff) - * [Things](#things) - * [Stuff](#stuff) - * [Segmentation Overview](#segmentation-overview) - * [Image Panoptic Segmentation](#image-panoptic-segmentation) - * [LIDAR Panoptic Segmentation](#lidar-panoptic-segmentation) - * [Position Validation](#position-validation) - * [Obstacle Detection and Object Classification](#obstacle-detection-and-object-classification) - * [Lane Detection](#lane-detection) - * [Traffic Light Detection](#traffic-light-detection) - * [Traffic Sign Detection](#traffic-sign-detection) - * [Prediction](#prediction) +- [First Implementation Plan](#first-implementation-plan) + - [Authors](#authors) + - [Date](#date) + - [Overview](#overview) + - [Panoptic Segmentation](#panoptic-segmentation) + - [Things and Stuff](#things-and-stuff) + - [Things](#things) + - [Stuff](#stuff) + - [Segmentation Overview](#segmentation-overview) + - [Image Panoptic Segmentation](#image-panoptic-segmentation) + - [LIDAR Panoptic Segmentation](#lidar-panoptic-segmentation) + - [Position Validation](#position-validation) + - [Obstacle Detection and Object Classification](#obstacle-detection-and-object-classification) + - [Lane Detection](#lane-detection) + - [Traffic Light Detection](#traffic-light-detection) + - [Traffic Sign Detection](#traffic-sign-detection) + - [Prediction](#prediction) + - [Possible Issues/Milestones](#possible-issuesmilestones) --- @@ -58,11 +59,11 @@ Stuff is the term used to define objects that don’t have proper geometry but a There are three different kinds of image segmentation: -* **Semantic Segmentation**: \ +- **Semantic Segmentation**: \ Classification of every pixel or point in an image or LIDAR map into different classes (car, person, street, ...) -* **Instance Segmentation**: \ +- **Instance Segmentation**: \ Detection of the different instances of things. -* **Panoptic Segmentation**: \ +- **Panoptic Segmentation**: \ Combination of semantic segmentation and instance segmentation. Detection of stuff plus instances of things. ![Segmentation](../../00_assets/segmentation.png) @@ -129,11 +130,11 @@ As classification net I would recommend the [net implemented by PAF21-1](https:/ Possible states are: -* green -* orange -* red -* off -* backside +- green +- orange +- red +- off +- backside --- @@ -159,11 +160,11 @@ No implementation plan yet. ## Possible Issues/Milestones -* Implement/Adapt panoptic segmentation model (EfficientPS) -* (Implement/Adapt) LIDAR panoptic segmentation model (EfficientLPS) -* Choose datasets for training -* Generate own training data for fine-tuning -* Implement classification net for traffic light/sign classification -* Find ways for lane detection -* Find solutions/implementations for the projection of LIDAR, Radar and image data -* Position validation necessary? +- Implement/Adapt panoptic segmentation model (EfficientPS) +- (Implement/Adapt) LIDAR panoptic segmentation model (EfficientLPS) +- Choose datasets for training +- Generate own training data for fine-tuning +- Implement classification net for traffic light/sign classification +- Find ways for lane detection +- Find solutions/implementations for the projection of LIDAR, Radar and image data +- Position validation necessary? diff --git a/doc/03_research/02_perception/Readme.md b/doc/03_research/02_perception/Readme.md index 364be7af..170fe63f 100644 --- a/doc/03_research/02_perception/Readme.md +++ b/doc/03_research/02_perception/Readme.md @@ -2,11 +2,11 @@ This folder contains all the results of research on perception: -* **PAF22** - * [Basics](./02_basics.md) - * [First implementation plan](./03_first_implementation_plan.md) -* **PAF23** - * [Pylot Perception](./04_pylot.md) - * [PAF_21_2 Perception](./05_Research_PAF21-Perception.md) - * [PAF_21_1_Perception](./06_paf_21_1_perception.md) -* [Autoware Perception](./05-autoware-perception.md) +- **PAF22** + - [Basics](./02_basics.md) + - [First implementation plan](./03_first_implementation_plan.md) +- **PAF23** + - [Pylot Perception](./04_pylot.md) + - [PAF_21_2 Perception](./05_Research_PAF21-Perception.md) + - [PAF_21_1_Perception](./06_paf_21_1_perception.md) +- [Autoware Perception](./05-autoware-perception.md) diff --git a/doc/03_research/03_planning/00_paf22/02_basics.md b/doc/03_research/03_planning/00_paf22/02_basics.md index b8b75532..16f63ee7 100644 --- a/doc/03_research/03_planning/00_paf22/02_basics.md +++ b/doc/03_research/03_planning/00_paf22/02_basics.md @@ -10,32 +10,32 @@ Simon Erlbacher, Niklas Vogel --- -* [Grundrecherche im Planing](#grundrecherche-im-planing) - * [Authors](#authors) - * [Datum](#datum) - * [PAF 2021-1](#paf-2021-1) - * [Vehicle Controller](#vehicle-controller) - * [Decision-Making-Component](#decision-making-component) - * [PAF 2021-2](#paf-2021-2) - * [PAF 2020 (1 & 2)](#paf-2020-1--2) - * [Informationen aus alten Projekten](#informationen-aus-alten-projekten) - * [Planning Unterteilung](#planning-unterteilung) - * [Probleme](#probleme) - * [Lokalisierung](#lokalisierung) - * [Hindernisse erkennen](#hindernisse-erkennen) - * [Sicherheitseigenschaften](#sicherheitseigenschaften) - * [Decision Making (Behaviour Planner)](#decision-making-behaviour-planner) - * [Trajektorie](#trajektorie) - * [Trajektorie Tracking](#trajektorie-tracking) - * [Offene Fragen aus dem Issue](#offene-fragen-aus-dem-issue) - * [Was ist zu tun?](#was-ist-zu-tun) - * [Eingang](#eingang) - * [Ausgang](#ausgang) - * [Wie sehen die Daten vom Leaderboard für das Global Planning aus](#wie-sehen-die-daten-vom-leaderboard-für-das-global-planning-aus) - * [Daten aus dem LB und Global planning, wie kann daraus eine Trajektorie generiert werden](#daten-aus-dem-lb-und-global-planning-wie-kann-daraus-eine-trajektorie-generiert-werden) - * [Wie sieht die Grenze zwischen global und local plan aus?](#wie-sieht-die-grenze-zwischen-global-und-local-plan-aus) - * [Müssen Staus umfahren werden?](#müssen-staus-umfahren-werden) - * [Sollgeschwindigkeitsplanung](#sollgeschwindigkeitsplanung) +- [Grundrecherche im Planing](#grundrecherche-im-planing) + - [Authors](#authors) + - [Datum](#datum) + - [PAF 2021-1](#paf-2021-1) + - [Vehicle Controller](#vehicle-controller) + - [Decision-Making-Component](#decision-making-component) + - [PAF 2021-2](#paf-2021-2) + - [PAF 2020 (1 \& 2)](#paf-2020-1--2) + - [Informationen aus alten Projekten](#informationen-aus-alten-projekten) + - [Planning Unterteilung](#planning-unterteilung) + - [Probleme](#probleme) + - [Lokalisierung](#lokalisierung) + - [Hindernisse erkennen](#hindernisse-erkennen) + - [Sicherheitseigenschaften](#sicherheitseigenschaften) + - [Decision Making (Behaviour Planner)](#decision-making-behaviour-planner) + - [Trajektorie](#trajektorie) + - [Trajektorie Tracking](#trajektorie-tracking) + - [Offene Fragen aus dem Issue](#offene-fragen-aus-dem-issue) + - [Was ist zu tun?](#was-ist-zu-tun) + - [Eingang](#eingang) + - [Ausgang](#ausgang) + - [Wie sehen die Daten vom Leaderboard für das Global Planning aus](#wie-sehen-die-daten-vom-leaderboard-für-das-global-planning-aus) + - [Daten aus dem LB und Global planning, wie kann daraus eine Trajektorie generiert werden](#daten-aus-dem-lb-und-global-planning-wie-kann-daraus-eine-trajektorie-generiert-werden) + - [Wie sieht die Grenze zwischen global und local plan aus?](#wie-sieht-die-grenze-zwischen-global-und-local-plan-aus) + - [Müssen Staus umfahren werden?](#müssen-staus-umfahren-werden) + - [Sollgeschwindigkeitsplanung](#sollgeschwindigkeitsplanung) ## [PAF 2021-1](https://github.com/ll7/paf21-1) @@ -57,15 +57,15 @@ Die Kurvendetektion berechnet die maximale Kurvengeschwindigkeit durch Ermittlun Inputs: -* Fahrzeugposition -* Fahrzeugorientierung -* Fahrzeuggeschwindigkeit -* Fahrtrajektorie +- Fahrzeugposition +- Fahrzeugorientierung +- Fahrzeuggeschwindigkeit +- Fahrtrajektorie Outputs: -* Sollgeschwindigkeit -* Lenkwinkel +- Sollgeschwindigkeit +- Lenkwinkel ### Decision-Making-Component @@ -82,16 +82,16 @@ Finite-state machine für Manöver: Inputs: -* Geschwindigkeit -* Objekt auf Trajektorie -* Ampelsignale -* Geschwindigkeitsbegrenzung -* Geschwindigkeit und Position anderer Verkehrsteilnehmer -* Target Lane +- Geschwindigkeit +- Objekt auf Trajektorie +- Ampelsignale +- Geschwindigkeitsbegrenzung +- Geschwindigkeit und Position anderer Verkehrsteilnehmer +- Target Lane Outputs: -* "Actions" (Bremsen, Beschleunigen, Halten, Spurwechsel...) +- "Actions" (Bremsen, Beschleunigen, Halten, Spurwechsel...) Globaler Planer Überblick: ![Alt text](https://github.com/ll7/paf21-1/raw/master/imgs/Global%20Planer.png) @@ -100,21 +100,21 @@ Globaler Planer Überblick: verantwortlich für die Routenplanung und Pfadplanung für das Ego-Vehicle sowie die erkannten Verkehrsteilnehmer. -* global_planner - * Planung einer Route von einem Startpunkt zu einem oder einer Liste an Zielpunkten - * Commonroad Route Planner (TUM) -> Liste an Routen-Lanelets sowie eine Liste an Punkten mit Abstand etwa 10cm - * Anreicherung mit parallelen Spuren -* local_planner - * Lokale Pfadplanung inklusive Spurwahl, Ampelmanagement und Spurwechsel - * erlaubte Geschwindigkeit, sowie die bevorzugte Spur basierend auf der Hinderniserkennung (obstacle planner) wird ergänzt - * "beste"/schnellste Möglichkeit wird errechnet und weiter an acting geschickt -* obstacle_planner - * Verwaltung von dynamischen hindernissen - * Vorhersage von Pfaden anderer Fahrzeuge und generieren von Folgefahrzeug-Informationen - * Verwerfen von "irrelevanten" Fahrezeugen - -* Geschwindigkeitsplanung/Kontrolle wie 2021-1 + Bremswegplanung [Details](https://github.com/ll7/paf21-2/tree/main/paf_ros/paf_planning#bremsweg) -* Map Manager für die Verwaltung aller statischen Kartendaten +- global_planner + - Planung einer Route von einem Startpunkt zu einem oder einer Liste an Zielpunkten + - Commonroad Route Planner (TUM) -> Liste an Routen-Lanelets sowie eine Liste an Punkten mit Abstand etwa 10cm + - Anreicherung mit parallelen Spuren +- local_planner + - Lokale Pfadplanung inklusive Spurwahl, Ampelmanagement und Spurwechsel + - erlaubte Geschwindigkeit, sowie die bevorzugte Spur basierend auf der Hinderniserkennung (obstacle planner) wird ergänzt + - "beste"/schnellste Möglichkeit wird errechnet und weiter an acting geschickt +- obstacle_planner + - Verwaltung von dynamischen hindernissen + - Vorhersage von Pfaden anderer Fahrzeuge und generieren von Folgefahrzeug-Informationen + - Verwerfen von "irrelevanten" Fahrezeugen + +- Geschwindigkeitsplanung/Kontrolle wie 2021-1 + Bremswegplanung [Details](https://github.com/ll7/paf21-2/tree/main/paf_ros/paf_planning#bremsweg) +- Map Manager für die Verwaltung aller statischen Kartendaten ## PAF 2020 ([1](https://github.com/ll7/psaf1) & [2](https://github.com/ll7/psaf2)) @@ -129,14 +129,14 @@ Teilbaum "Intersection" als Beispiel: "If there is a Intersection coming up the agent executes the following sequence of behaviours: -* Approach Intersection - * Slows down, gets into the right lane for turning and stops at line -* Wait at Intersection - * Waits for traffic lights or higher priority traffic -* Enter Intersection - * Enters the intersection and stops again, if there is higher priority oncoming traffic -* Leave Intersection - * Leaves the intersection in the right direction" +- Approach Intersection + - Slows down, gets into the right lane for turning and stops at line +- Wait at Intersection + - Waits for traffic lights or higher priority traffic +- Enter Intersection + - Enters the intersection and stops again, if there is higher priority oncoming traffic +- Leave Intersection + - Leaves the intersection in the right direction" [Kompletter Entscheidungsbaum](https://github.com/ll7/psaf2/tree/main/Planning/behavior_agent) @@ -144,8 +144,8 @@ Teilbaum "Intersection" als Beispiel: Quellen: -* -* +- +- ![architektur gewinnterteam19](../../00_assets/gewinnerteam19-architektur.png) @@ -163,8 +163,8 @@ Planning Übersicht ## Probleme -* Kollision mit statischen Objekten (Gehsteig) -* Kollision mit Fußgängern die unerwartetes Verhalten zeigen +- Kollision mit statischen Objekten (Gehsteig) +- Kollision mit Fußgängern die unerwartetes Verhalten zeigen Es wird vorgeschlagen ein festes Notfallmanöver für das Fahrzeug zu erstellen, welches mit einer schnelleren Reaktionszeit greift, um unerwartete Kollisionen zu verhindern. @@ -221,16 +221,16 @@ Einfache Berechnung einer Kollision Wichtig ist die Sicherheitseigenschaft von Autonomen Fahrzeugen. Risiken können in drei KLassen unterteilt werden: -* Kollision mit statischen Objekten -* Kollision mit dynamischen Objekten -* Kollision mit unerwarteten Objekten +- Kollision mit statischen Objekten +- Kollision mit dynamischen Objekten +- Kollision mit unerwarteten Objekten In dem Beispielprojekt wurde eine Bewertung der Überlappung von Trajekotrien verschiedener Objekte zur HAnd genommen. Es wird eine mögliche Kollisionszone bestimmt. Das Fahrzeug hat hierbei drei Zonen auf seiner Trajektorie. -* Danger Zone: Hier muss sofort gestoppt werden wenn ein Trajektorien Konflikt detektiert wird -* Warning Zone: Hier entsprechend die Geschwindigkeit anpassen im Verhältnis zu der DTC (distance to collision) -* Safe Zone +- Danger Zone: Hier muss sofort gestoppt werden wenn ein Trajektorien Konflikt detektiert wird +- Warning Zone: Hier entsprechend die Geschwindigkeit anpassen im Verhältnis zu der DTC (distance to collision) +- Safe Zone Die Kollision benötigt die Position eines möglichen Kollisionsgegenstandes und seine Form. Wenn die Orientierung und die Geschwindigkeit verfügbar sind, kann eine Vorhersage zu der zukünftigen Position getroffen werden, um Konflikte zu vermeiden. @@ -243,9 +243,9 @@ Annahme: Alle Verkehrsteilnehmer haben konstante Geschwindigkeit (sonst Berechnu Verkehrsszenario einer Kreuzung mit verschiedenen Zonen. -* Roter Bereich: Fahrzeug verlangsamt seine Geschwindigkeit -* Grüner Bereich: Fahrzeug kommt zum stehen -* Oranger Bereich (Intersection): Fahrzeug betritt diesen Bereich nur,wenn kein anderer Verkehrsteilnehmer in diesem erkannt wird +- Roter Bereich: Fahrzeug verlangsamt seine Geschwindigkeit +- Grüner Bereich: Fahrzeug kommt zum stehen +- Oranger Bereich (Intersection): Fahrzeug betritt diesen Bereich nur,wenn kein anderer Verkehrsteilnehmer in diesem erkannt wird ![statemachines](../../00_assets/statemachines.png) Aufteilung in mehrere state machines @@ -253,13 +253,13 @@ Aufteilung in mehrere state machines Eine state machine oder Aufteileung in mehrere state machines Vorteile von mehreren state machines: -* Geringere Berechnungszeit -* einfacher zu erstellen und Instand zu halten +- Geringere Berechnungszeit +- einfacher zu erstellen und Instand zu halten Nachteile von mehreren state machines: -* Sehr viele Regeln -* Regeln zwischen state machines können sich wiederholen +- Sehr viele Regeln +- Regeln zwischen state machines können sich wiederholen Reinforcement Learning, Rule based System, Markov Decision Process @@ -289,8 +289,8 @@ Fehlerminimierung bei der Trajektorienberechnung ## Trajektorie Tracking -* Stanley Controller -* Pure Pursuit Controller +- Stanley Controller +- Pure Pursuit Controller ## Offene Fragen aus dem [Issue](https://github.com/ll7/paf22/issues/26) @@ -302,45 +302,45 @@ Dabei werden andere Fahrzeuge im näheren Umfeld des eigenen Fahrzeugs auch in d ### Eingang -* Fahrzeugposition -* Fahrzeugorientierung -* Fahrzeuggeschwindigkeit -* Fahrtrajektorie bzw anzufahrende Punkte aus denen trajektorie errechnet werden kann -* Objekte auf Trajektorie -* Ampelsignale und Verkehrsschilder -* Geschwindigkeitsbegrenzung -* Geschwindigkeit und Position anderer Verkehrsteilnehmer -* Target Lane +- Fahrzeugposition +- Fahrzeugorientierung +- Fahrzeuggeschwindigkeit +- Fahrtrajektorie bzw anzufahrende Punkte aus denen trajektorie errechnet werden kann +- Objekte auf Trajektorie +- Ampelsignale und Verkehrsschilder +- Geschwindigkeitsbegrenzung +- Geschwindigkeit und Position anderer Verkehrsteilnehmer +- Target Lane ### Ausgang -* "Actions" - * Bremsen - * Beschleunigen - * Halten - * Spurwechsel - * ... +- "Actions" + - Bremsen + - Beschleunigen + - Halten + - Spurwechsel + - ... Oder -* Sollgeschwindigkeit -* Lenkwinkel +- Sollgeschwindigkeit +- Lenkwinkel ### Wie sehen die Daten vom Leaderboard für das Global Planning aus "For each route, agents will be initialized at a starting point and directed to drive to a destination point, provided with a description of the route through GPS style coordinates, map coordinates and route instructions." -* GPS coordinates Beispiel: - * {'z': 0.0, 'lat': 48.99822669411668, 'lon': 8.002271601998707} -* Map/World coordinates Beispiel: - * {'x': 153.7, 'y': 15.6, 'z': 0.0} -* Route Instructions: - * RoadOption.CHANGELANELEFT: Move one lane to the left. - * RoadOption.CHANGELANERIGHT: Move one lane to the right. - * RoadOption.LANEFOLLOW: Continue in the current lane. - * RoadOption.LEFT: Turn left at the intersection. - * RoadOption.RIGHT: Turn right at the intersection. - * RoadOption.STRAIGHT: Keep straight at the intersection. +- GPS coordinates Beispiel: + - {'z': 0.0, 'lat': 48.99822669411668, 'lon': 8.002271601998707} +- Map/World coordinates Beispiel: + - {'x': 153.7, 'y': 15.6, 'z': 0.0} +- Route Instructions: + - RoadOption.CHANGELANELEFT: Move one lane to the left. + - RoadOption.CHANGELANERIGHT: Move one lane to the right. + - RoadOption.LANEFOLLOW: Continue in the current lane. + - RoadOption.LEFT: Turn left at the intersection. + - RoadOption.RIGHT: Turn right at the intersection. + - RoadOption.STRAIGHT: Keep straight at the intersection. "The distance between two consecutive waypoints could be up to hundreds of meters. Do not rely on these as your principal mechanism to navigate the environment." @@ -352,9 +352,9 @@ Des Weiteren steh als globale Map ein OpenDRIVE file als String geparsed zur Ver [Beispiel 2021-2](#paf-2021-2): -* global_planner (Planung einer Route von einem Startpunkt zu einem oder einer Liste an Zielpunkten) - * Commonroad Route Planner (TUM) -> Liste an Routen-Lanelets sowie eine Liste an Punkten mit Abstand etwa 10cm - * (Anreicherung mit parallelen Spuren) +- global_planner (Planung einer Route von einem Startpunkt zu einem oder einer Liste an Zielpunkten) + - Commonroad Route Planner (TUM) -> Liste an Routen-Lanelets sowie eine Liste an Punkten mit Abstand etwa 10cm + - (Anreicherung mit parallelen Spuren) ### Wie sieht die Grenze zwischen global und local plan aus? @@ -372,12 +372,12 @@ Route deviation — If an agent deviates more than 30 meters from the assigned r ### Sollgeschwindigkeitsplanung -* Schilder - * vor Ampeln, Schildern, Kreisverkehren, Kreuzungen verzögern und langsamer werden -* Kurvenfahrt - * siehe [maximale Kurvengeschwindigkeit](#vehicle-controller) -* Vorausfahrendes Auto - * Geschwindigkeit an dieses Anpassen oder überholen wenn möglich -* Straßenverhältnisse - * "variety of situations: including freeways, urban areas, residential districts and rural settings" - * "variety of weather conditions: including daylight scenes, sunset, rain, fog, and night, among others" +- Schilder + - vor Ampeln, Schildern, Kreisverkehren, Kreuzungen verzögern und langsamer werden +- Kurvenfahrt + - siehe [maximale Kurvengeschwindigkeit](#vehicle-controller) +- Vorausfahrendes Auto + - Geschwindigkeit an dieses Anpassen oder überholen wenn möglich +- Straßenverhältnisse + - "variety of situations: including freeways, urban areas, residential districts and rural settings" + - "variety of weather conditions: including daylight scenes, sunset, rain, fog, and night, among others" diff --git a/doc/03_research/03_planning/00_paf22/03_Implementation.md b/doc/03_research/03_planning/00_paf22/03_Implementation.md index 534f4142..adfaa6dd 100644 --- a/doc/03_research/03_planning/00_paf22/03_Implementation.md +++ b/doc/03_research/03_planning/00_paf22/03_Implementation.md @@ -16,15 +16,18 @@ Simon Erlbacher, Niklas Vogel --- -* [Planning Implementation](#planning-implementation) - * [Authors](#authors) - * [Date](#date) - * [Overview](#overview) - * [Preplanning](#preplanning) - * [Decision Making](#decision-making) - * [Local Path Planning](#local-path-planning) - * [Next steps](#next-steps) -* [Sources](#sources) +- [Planning Implementation](#planning-implementation) + - [Authors](#authors) + - [Date](#date) + - [Overview](#overview) + - [Preplanning](#preplanning) + - [Decision Making](#decision-making) + - [Local Path Planning](#local-path-planning) + - [Velocity profile](#velocity-profile) + - [Update path](#update-path) + - [Measure distance](#measure-distance) + - [Next steps](#next-steps) + - [Sources](#sources) --- @@ -50,14 +53,14 @@ Lanelet Model Example : Input: -* Map -* Navigation Waypoints -* (Odometry data (sensoring)) -* (GNUU data (sensoring)) +- Map +- Navigation Waypoints +- (Odometry data (sensoring)) +- (GNUU data (sensoring)) Output: -* Route (Sequences of Lanelets and Points) (local path planning, decision making) +- Route (Sequences of Lanelets and Points) (local path planning, decision making) --- @@ -74,13 +77,13 @@ The system needs to make good predictions to avoid collisions. The Perception da Input: -* Lanelet data (preplanning, local path planning) -* perception data (traffic lights situation, pedestrians,...) +- Lanelet data (preplanning, local path planning) +- perception data (traffic lights situation, pedestrians,...) Output: -* updated driving status (acting, local path planning) -* Lanelet data (acting) +- updated driving status (acting, local path planning) +- Lanelet data (acting) --- @@ -96,11 +99,11 @@ This will be calculated directly after the preplanning created a trajectory. The Input: -* Trajectory points (preplanning) +- Trajectory points (preplanning) Output: -* Max. Velocity (Acting) +- Max. Velocity (Acting) ### Update path @@ -111,14 +114,14 @@ It also tells the velocity profile to update because of the new trajectory. Input: -* lanelet modell (preplanning) -* update command (decision making) -* information about blocked lanelets (decision making, perception) +- lanelet modell (preplanning) +- update command (decision making) +- information about blocked lanelets (decision making, perception) Output: -* updated trajectory (acting, decision making) -* update command (velocity profile) +- updated trajectory (acting, decision making) +- update command (velocity profile) ### Measure distance @@ -126,24 +129,24 @@ This module measures the distance to obstacles, especially cars, with the Lidar Input: -* Lidar Sensor data (perception, sensoring) +- Lidar Sensor data (perception, sensoring) Output: -* distance value (acting) +- distance value (acting) --- ## Next steps -* Another Coordination with Perception to prevent overlaps with Map Manager, Map enrichment, -* Implement Map Manager to convert data into a compatible type for route planning and to extract additional informations (Speed Limits, trafic signs, traffic lights) -* Implement a commonroad route planner (old projects and Gitlab TUM) -* Analyze Lanelet plan and be familiar with it (Which information can we additionally receive from the plan?) -* Enrich Lanelet Modell/Map with additional Informations (additional/parallel Lanes, Speed Limits, trafic signs, traffic lights) -* Choose the Decision Maker (Evaluate Markov Modell in combination with occupancy grid) -* calculate and evaluate distances with given perceptions -* Publish available and needed data (data available in this stage) +- Another Coordination with Perception to prevent overlaps with Map Manager, Map enrichment, +- Implement Map Manager to convert data into a compatible type for route planning and to extract additional informations (Speed Limits, trafic signs, traffic lights) +- Implement a commonroad route planner (old projects and Gitlab TUM) +- Analyze Lanelet plan and be familiar with it (Which information can we additionally receive from the plan?) +- Enrich Lanelet Modell/Map with additional Informations (additional/parallel Lanes, Speed Limits, trafic signs, traffic lights) +- Choose the Decision Maker (Evaluate Markov Modell in combination with occupancy grid) +- calculate and evaluate distances with given perceptions +- Publish available and needed data (data available in this stage) --- diff --git a/doc/03_research/03_planning/00_paf22/04_decision_making.md b/doc/03_research/03_planning/00_paf22/04_decision_making.md index f28a01fe..eea8b21f 100644 --- a/doc/03_research/03_planning/00_paf22/04_decision_making.md +++ b/doc/03_research/03_planning/00_paf22/04_decision_making.md @@ -16,30 +16,44 @@ Josef Kircher --- -* [Decision-making module](#decision-making-module) - * [Author](#author) - * [Date](#date) - * [Prerequisite](#prerequisite) - * [Decision-making algorithms](#decision-making-algorithms) - * [Finite State machine](#finite-state-machine) - * [Markov Chain](#markov-chain) - * [Decision Tree](#decision-tree) - * [Previous approaches](#previous-approaches) - * [PAF21-1](#paf21-1) - * [PAF21-2](#paf21-2) - * [PSAF1 2020](#psaf1-2020) - * [PSAF2 2020](#psaf2-2020) - * [Python or ROS libraries for these decision-making algorithms](#python-or-ros-libraries-for-these-decision-making-algorithms) - * [State machines](#state-machines) - * [SMACH](#smach) - * [SMACC](#smacc) - * [Markov Chains](#markov-chains) - * [QuantEcon](#quantecon) - * [markov_decision_making](#markov_decision_making) - * [Decision trees](#decision-trees) - * [pytrees](#pytrees) - * [Conclusion](#conclusion) - * [Sources](#sources) +- [Decision-making module](#decision-making-module) + - [Author](#author) + - [Date](#date) + - [Prerequisite](#prerequisite) + - [Decision-making algorithms](#decision-making-algorithms) + - [Finite State machine](#finite-state-machine) + - [Advantages](#advantages) + - [Disadvantages](#disadvantages) + - [Markov Chain](#markov-chain) + - [Advantages](#advantages-1) + - [Disadvantages](#disadvantages-1) + - [Decision Tree](#decision-tree) + - [Advantages](#advantages-2) + - [Disadvantages](#disadvantages-2) + - [Previous approaches](#previous-approaches) + - [PAF21-1](#paf21-1) + - [State machine](#state-machine) + - [Take away](#take-away) + - [PAF21-2](#paf21-2) + - [No clear concept](#no-clear-concept) + - [Take away](#take-away-1) + - [PSAF1 2020](#psaf1-2020) + - [State machine](#state-machine-1) + - [Take away](#take-away-2) + - [PSAF2 2020](#psaf2-2020) + - [Decision tree](#decision-tree-1) + - [Take Away](#take-away-3) + - [Python or ROS libraries for these decision-making algorithms](#python-or-ros-libraries-for-these-decision-making-algorithms) + - [State machines](#state-machines) + - [SMACH](#smach) + - [SMACC](#smacc) + - [Markov Chains](#markov-chains) + - [QuantEcon](#quantecon) + - [markov\_decision\_making](#markov_decision_making) + - [Decision trees](#decision-trees) + - [pytrees](#pytrees) + - [Conclusion](#conclusion) + - [Sources](#sources) ## Decision-making algorithms @@ -54,14 +68,14 @@ Finite-state machines are of two types—deterministic finite-state machines and #### Advantages -* easy to implement -* we know most of the scenarios (finite state space) -* previous groups have solutions we could adapt/extend +- easy to implement +- we know most of the scenarios (finite state space) +- previous groups have solutions we could adapt/extend #### Disadvantages -* many states necessary -* even though we can try to map all possible states, there still might be some situation we could not account for +- many states necessary +- even though we can try to map all possible states, there still might be some situation we could not account for ### Markov Chain @@ -70,14 +84,14 @@ A countably infinite sequence, in which the chain moves state at discrete time s #### Advantages -* possible to build Markov Chain from State machine -* experience from previous projects -* only depends on current state ("memorylessness") +- possible to build Markov Chain from State machine +- experience from previous projects +- only depends on current state ("memorylessness") #### Disadvantages -* might be complicated to implement -* probabilities for transitions might need to be guessed, empirically estimated +- might be complicated to implement +- probabilities for transitions might need to be guessed, empirically estimated ### Decision Tree @@ -86,13 +100,13 @@ It is one way to display an algorithm that only contains conditional control sta #### Advantages -* easy implementation -* tree like structure usable in Machine Learning (Random Forest e.g.) +- easy implementation +- tree like structure usable in Machine Learning (Random Forest e.g.) #### Disadvantages -* multiple decision trees necessary -* prediction independent of previous state +- multiple decision trees necessary +- prediction independent of previous state ## Previous approaches @@ -100,57 +114,57 @@ It is one way to display an algorithm that only contains conditional control sta #### State machine -* 2 state machines: one for maneuvers, one for speed control -* Speed control more complex, when to brake seems like the most challenging task +- 2 state machines: one for maneuvers, one for speed control +- Speed control more complex, when to brake seems like the most challenging task #### Take away -* Some states seem to be comparable to what we are required to accomplish by the leaderboard -* Our task might be more complex, needs additional states and transitions -* I'm uncertain about an extra speed state, might be easier to handle that more locally by the local planner, maybe in combination with an observer element that keeps track of the surrounding by processing the information from `Perception` +- Some states seem to be comparable to what we are required to accomplish by the leaderboard +- Our task might be more complex, needs additional states and transitions +- I'm uncertain about an extra speed state, might be easier to handle that more locally by the local planner, maybe in combination with an observer element that keeps track of the surrounding by processing the information from `Perception` ### PAF21-2 #### No clear concept -* some sort of state machine integrated in local planner -* obstacle planner for dynamic obstacles (pedestrians, cars, bicycles) -* useful parameters which we could adapt -* path prediction for obstacles -* obstacles are only interesting if they cross the path of the ego vehicle +- some sort of state machine integrated in local planner +- obstacle planner for dynamic obstacles (pedestrians, cars, bicycles) +- useful parameters which we could adapt +- path prediction for obstacles +- obstacles are only interesting if they cross the path of the ego vehicle #### Take away -* Obstacle planner might be useful for dynamic obstacle detection if not handled elsewhere -* path prediction might reduce the number objects tracked that we could interfere with -* Also, if we adapt our local plan this path prediction of other vehicles might come in handy -* On the other hand, overhead to keep track of vehicles and maybe repredict paths if some vehicles change direction +- Obstacle planner might be useful for dynamic obstacle detection if not handled elsewhere +- path prediction might reduce the number objects tracked that we could interfere with +- Also, if we adapt our local plan this path prediction of other vehicles might come in handy +- On the other hand, overhead to keep track of vehicles and maybe repredict paths if some vehicles change direction ### PSAF1 2020 #### State machine -* Three driving functions: Driving, stopping at traffic light, stopping at stop sign -* First project iteration so state machine more simple -* still covers many important scenarios +- Three driving functions: Driving, stopping at traffic light, stopping at stop sign +- First project iteration so state machine more simple +- still covers many important scenarios #### Take away -* Good starting point to have a minimal viable state machine -* Need adaption depending on what information we are getting forwarded/process in the planning module +- Good starting point to have a minimal viable state machine +- Need adaption depending on what information we are getting forwarded/process in the planning module ### PSAF2 2020 #### Decision tree -* This team used a decision tree to cover the major driving scenarios -* Within the scenarios the actions are more linear -* Reminds me of the execution of a state where driving scenarios are the states and the execution the things our local planner should do within that state +- This team used a decision tree to cover the major driving scenarios +- Within the scenarios the actions are more linear +- Reminds me of the execution of a state where driving scenarios are the states and the execution the things our local planner should do within that state #### Take Away -* Even though the approach is different, the execution might be similar to the other team algorithms -* We might not be interested in a decision tree as we want to keep the option to switch to a Markov chain, which would be more overhead if we start with a decision tree +- Even though the approach is different, the execution might be similar to the other team algorithms +- We might not be interested in a decision tree as we want to keep the option to switch to a Markov chain, which would be more overhead if we start with a decision tree ## Python or ROS libraries for these decision-making algorithms @@ -158,71 +172,71 @@ It is one way to display an algorithm that only contains conditional control sta #### SMACH -* Task-level architecture for creating state machines for robot behaviour. -* Based on Python -* Fast prototyping: Quickly create state machines -* Complex state machines can easily be created -* Introspection: smach_viewer provides a visual aid to follow the state machine executing its tasks - * smach_viewer is unmaintained and does not work with noetic -* Allows nested state machines -* Values can be passed between states -* Tutorials and documentation seems to be easy to understand so creating a first state machine shouldn't be too hard -* working with several ROS topics and messages within the state machine needs to be evaluated: - * the execution of states is mostly planned to happen in the local planner so for just sending a ROS message, SMACH might be efficient +- Task-level architecture for creating state machines for robot behaviour. +- Based on Python +- Fast prototyping: Quickly create state machines +- Complex state machines can easily be created +- Introspection: smach_viewer provides a visual aid to follow the state machine executing its tasks + - smach_viewer is unmaintained and does not work with noetic +- Allows nested state machines +- Values can be passed between states +- Tutorials and documentation seems to be easy to understand so creating a first state machine shouldn't be too hard +- working with several ROS topics and messages within the state machine needs to be evaluated: + - the execution of states is mostly planned to happen in the local planner so for just sending a ROS message, SMACH might be efficient Not use SMACH for: -* Unstructured tasks: SMACH is not efficient in sheduling unstructured tasks -* Low-level systems: SMACH is not build for high efficiency, might fall short for emergency maneuvers +- Unstructured tasks: SMACH is not efficient in sheduling unstructured tasks +- Low-level systems: SMACH is not build for high efficiency, might fall short for emergency maneuvers -* Simple examples run without problem +- Simple examples run without problem #### SMACC -* event-driven, asynchronous, behavioral state machine library -* real-time ROS applications -* written in C++ -* designed to allow programmers to build robot control applications for multicomponent robots, in an intuitive and systematic manner. -* well maintained, lots of prebuild state machines to possibly start from +- event-driven, asynchronous, behavioral state machine library +- real-time ROS applications +- written in C++ +- designed to allow programmers to build robot control applications for multicomponent robots, in an intuitive and systematic manner. +- well maintained, lots of prebuild state machines to possibly start from Why not use SMACC: -* might get some time to get back into C++ -* more sophisticated library might need more time to get used to -* awful country music in the back of tutorial videos +- might get some time to get back into C++ +- more sophisticated library might need more time to get used to +- awful country music in the back of tutorial videos -* Tutorials do not run without further debugging which I didn't invest the time to do so +- Tutorials do not run without further debugging which I didn't invest the time to do so ### Markov Chains #### QuantEcon -* a economics library for implementing Markov chains -* more focussed on simulation than actually using it in an AD agent -* maybe usable for testing and simulating a Markov chain before implementing it +- a economics library for implementing Markov chains +- more focussed on simulation than actually using it in an AD agent +- maybe usable for testing and simulating a Markov chain before implementing it #### markov_decision_making -* ROS library for robot decision-making based on Markov Decision Problems -* written in C++ -* callback-based action interpretation allows to use other frameworks (SMACH) -* relatively easy to implement hierarchical MDPs -* supports synchronous and asynchronous execution +- ROS library for robot decision-making based on Markov Decision Problems +- written in C++ +- callback-based action interpretation allows to use other frameworks (SMACH) +- relatively easy to implement hierarchical MDPs +- supports synchronous and asynchronous execution Why not use markov_decision_making: -* not maintained -* only works with ROS hydro +- not maintained +- only works with ROS hydro ### Decision trees #### pytrees -* easy framework for implementing behaviour trees -* written in python -* used by a group two years ago -* not usable for real-time application code according to their docs -* priority handling - higher level interrupts are handled first +- easy framework for implementing behaviour trees +- written in python +- used by a group two years ago +- not usable for real-time application code according to their docs +- priority handling - higher level interrupts are handled first ## Conclusion diff --git a/doc/03_research/03_planning/00_paf22/05_Navigation_Data.md b/doc/03_research/03_planning/00_paf22/05_Navigation_Data.md index 2d9047ca..18611513 100644 --- a/doc/03_research/03_planning/00_paf22/05_Navigation_Data.md +++ b/doc/03_research/03_planning/00_paf22/05_Navigation_Data.md @@ -14,13 +14,13 @@ Niklas Vogel --- -* [Navigation Data Research](#navigation-data-research) - * [Author](#author) - * [Date](#date) - * [How to receive navigation data](#how-to-receive-navigation-data) - * [Structure of navigation data](#structure-of-navigation-data) - * [Visualisation of received navigation data](#visualisation-of-received-navigation-data) -* [Sources](#sources) +- [Navigation Data Research](#navigation-data-research) + - [Author](#author) + - [Date](#date) + - [How to receive navigation data](#how-to-receive-navigation-data) + - [Structure of navigation data](#structure-of-navigation-data) + - [Visualisation of received navigation data](#visualisation-of-received-navigation-data) + - [Sources](#sources) ## How to receive navigation data @@ -58,15 +58,15 @@ Therefore, the Map is published as topic ``/carla/hero/OpenDrive`` in [OpenDRIVE The route is published in the following topics: -* ``/carla/hero/global_plan`` ([carla_msgs/CarlaRoute](https://github.com/carla-simulator/ros-carla-msgs/blob/leaderboard-2.0/msg/CarlaRoute.msg)) -* ``/carla/hero/global_plan_gnss`` ([carla_msgs/CarlaGnnsRoute](https://github.com/carla-simulator/ros-carla-msgs/blob/leaderboard-2.0/msg/CarlaGnssRoute.msg)) +- ``/carla/hero/global_plan`` ([carla_msgs/CarlaRoute](https://github.com/carla-simulator/ros-carla-msgs/blob/leaderboard-2.0/msg/CarlaRoute.msg)) +- ``/carla/hero/global_plan_gnss`` ([carla_msgs/CarlaGnnsRoute](https://github.com/carla-simulator/ros-carla-msgs/blob/leaderboard-2.0/msg/CarlaGnssRoute.msg)) ## Structure of navigation data Routes consist of tuples of a position and a high level route instruction command which should be taken at that point. Positions are either given as GPS coordinates or as world coordinates: -* GPS coordinates: +- GPS coordinates: ```yaml [({'z': 0.0, 'lat': 48.99822669411668, 'lon': 8.002271601998707}, RoadOption.LEFT), @@ -75,7 +75,7 @@ Positions are either given as GPS coordinates or as world coordinates: ({'z': 0.0, 'lat': 48.99822679980298, 'lon': 8.002735250105061}, RoadOption.STRAIGHT)] ``` -* World coordinates: +- World coordinates: ```yaml [({'x': 153.7, 'y': 15.6, 'z': 0.0}, RoadOption.LEFT), @@ -84,14 +84,14 @@ Positions are either given as GPS coordinates or as world coordinates: ({'x': 180.7, 'y': 45.1, 'z': 1.2}, RoadOption.STRAIGHT)] ``` -* High-level route instruction commands (road options): +- High-level route instruction commands (road options): - * RoadOption.**CHANGELANELEFT**: Move one lane to the left. - * RoadOption.**CHANGELANERIGHT**: Move one lane to the right. - * RoadOption.**LANEFOLLOW**: Continue in the current lane. - * RoadOption.**LEFT**: Turn left at the intersection. - * RoadOption.**RIGHT**: Turn right at the intersection. - * RoadOption.**STRAIGHT**: Keep straight at the intersection. + - RoadOption.**CHANGELANELEFT**: Move one lane to the left. + - RoadOption.**CHANGELANERIGHT**: Move one lane to the right. + - RoadOption.**LANEFOLLOW**: Continue in the current lane. + - RoadOption.**LEFT**: Turn left at the intersection. + - RoadOption.**RIGHT**: Turn right at the intersection. + - RoadOption.**STRAIGHT**: Keep straight at the intersection. **Important:** Distance between route points can be up to hundreds of meters. @@ -103,7 +103,7 @@ WIP notes from team intern meeting: -* leaderboard evaluation visualisiert die route und scenarien evtl schon... evtl wert genauer zu betrachten +- leaderboard evaluation visualisiert die route und scenarien evtl schon... evtl wert genauer zu betrachten ### Sources diff --git a/doc/03_research/03_planning/00_paf22/06_state_machine_design.md b/doc/03_research/03_planning/00_paf22/06_state_machine_design.md index e90a785f..ad57715d 100644 --- a/doc/03_research/03_planning/00_paf22/06_state_machine_design.md +++ b/doc/03_research/03_planning/00_paf22/06_state_machine_design.md @@ -14,33 +14,32 @@ Josef Kircher --- -* [Title of wiki page](#title-of-wiki-page) - * [Author](#author) - * [Date](#date) - * [Super state machine](#super-state-machine) - * [Driving state machine](#driving-state-machine) - * [KEEP](#keep) - * [ACCEL](#accel) - * [Brake](#brake) - * [Lane change state machine](#lane-change-state-machine) - * [DECIDE_LANE_CHANGE](#decidelanechange) - * [CHANGE_LANE_LEFT](#changelaneleft) - * [CHANGE_LANE_RIGHT](#changelaneright) - * [Intersection state machine](#intersection-state-machine) - * [APPROACH_INTERSECTION](#approachintersection) - * [IN_INTERSECTION](#inintersection) - * [TURN_LEFT](#turnleft) - * [STRAIGHT](#straight) - * [TURN_RIGHT](#turnright) - * [LEAVE_INTERSECTION](#leaveintersection) - * [Stop sign/traffic light state machine](#stop-signtraffic-light-state-machine) - * [STOP_NEAR](#stopnear) - * [STOP_SLOW_DOWN](#stopslowdown) - * [STOP_WILL_STOP](#stopwillstop) - * [STOP_WAIT](#stopwait) - * [STOP_GO](#stopgo) - * [Implementation](#implementation) - * [Sources](#sources) +- [State machine design](#state-machine-design) + - [Author](#author) + - [Date](#date) + - [Super state machine](#super-state-machine) + - [Driving state machine](#driving-state-machine) + - [KEEP](#keep) + - [UPDATE\_TARGET\_SPEED](#update_target_speed) + - [Lane change state machine](#lane-change-state-machine) + - [DECIDE\_LANE\_CHANGE](#decide_lane_change) + - [CHANGE\_LANE\_LEFT](#change_lane_left) + - [CHANGE\_LANE\_RIGHT](#change_lane_right) + - [Intersection state machine](#intersection-state-machine) + - [APPROACH\_INTERSECTION](#approach_intersection) + - [IN\_INTERSECTION](#in_intersection) + - [TURN\_LEFT](#turn_left) + - [STRAIGHT](#straight) + - [TURN\_RIGHT](#turn_right) + - [LEAVE\_INTERSECTION](#leave_intersection) + - [Stop sign/traffic light state machine](#stop-signtraffic-light-state-machine) + - [STOP\_NEAR](#stop_near) + - [STOP\_SLOW\_DOWN](#stop_slow_down) + - [STOP\_WILL\_STOP](#stop_will_stop) + - [STOP\_WAIT](#stop_wait) + - [STOP\_GO](#stop_go) + - [Implementation](#implementation) + - [Sources](#sources) ## Super state machine @@ -51,9 +50,9 @@ The super state machine functions as a controller of the main functions of the a Those functions are -* following the road and brake in front of obstacles if needed -* drive across an intersection -* change lane +- following the road and brake in front of obstacles if needed +- drive across an intersection +- change lane ## Driving state machine @@ -61,8 +60,8 @@ Those functions are Transition: -* From `Intersection state machine` -* From `Lane change state machine` +- From `Intersection state machine` +- From `Lane change state machine` This state machine controls the speed of the ego-vehicle. It either tells the acting part of the ego vehicle to `UPDATE_TARGET_SPEED` or `KEEP` the velocity. @@ -74,7 +73,7 @@ If there is an event requiring the ego-vehicle to change the lane as mentioned i Transition: -* From `UPDATE_TARGET_SPEED` +- From `UPDATE_TARGET_SPEED` Keep the current target speed, applied most of the time. From here changes to the `UPDATE_TARGET_SPEED` state are performed, if events require a change of `target_speed`. @@ -82,7 +81,7 @@ Keep the current target speed, applied most of the time. From here changes to th Transition: -* From `KEEP` if `new target_speed` is smaller or greater than current `target_speed` or an `obstacle` or the `leading_vehicle` is in braking distance. +- From `KEEP` if `new target_speed` is smaller or greater than current `target_speed` or an `obstacle` or the `leading_vehicle` is in braking distance. Set a new target speed and change back to `KEEP` state afterwards. @@ -92,26 +91,26 @@ Set a new target speed and change back to `KEEP` state afterwards. Transition: -* From `driving state machine` by `lane_change_requested` +- From `driving state machine` by `lane_change_requested` This state machine completes the change of a lane. This is triggered from the super state machine and can have multiple triggers. Those include: -* Join highway -* Leave highway -* RoadOption: - * CHANGELANELEFT - * CHANGELANERIGHT - * KEEPLANE -* avoid obstacle(doors, static objects) -* give way to emergency vehicle -* overtake slow moving vehicle -* leave a parking bay +- Join highway +- Leave highway +- RoadOption: + - CHANGELANELEFT + - CHANGELANERIGHT + - KEEPLANE +- avoid obstacle(doors, static objects) +- give way to emergency vehicle +- overtake slow moving vehicle +- leave a parking bay ### DECIDE_LANE_CHANGE Transition: -* From super state machine by above triggers +- From super state machine by above triggers From the super state machine the transition to change the lane is given by one of the above triggers. This state decides to which lane should be changed dependent on the trigger. It takes into account if there are lanes to the left and/or right and if the lane change is requested by a roadOption command. @@ -120,7 +119,7 @@ It takes into account if there are lanes to the left and/or right and if the lan Transition: -* From `DECIDE_LANE_CHANGE` by `RoadOption.CHANGELANELEFT` or `obstacle_in_lane` or `leader_vehicle_speed < LEADERTHRESHOLD` +- From `DECIDE_LANE_CHANGE` by `RoadOption.CHANGELANELEFT` or `obstacle_in_lane` or `leader_vehicle_speed < LEADERTHRESHOLD` This state performs a lane change to the lane on the left. @@ -134,8 +133,8 @@ If an obstacle or a slow leading vehicle are the reasons for the lane change, to Transition: -* From `DECIDE_LANE_CHANGE` by `RoadOption.CHANGELANERIGHT` or `emergency_vehicle_in_front` -* From `CHANGE_LANE_LEFT` by `passing_obstacle` or `slow_leading_vehicle` +- From `DECIDE_LANE_CHANGE` by `RoadOption.CHANGELANERIGHT` or `emergency_vehicle_in_front` +- From `CHANGE_LANE_LEFT` by `passing_obstacle` or `slow_leading_vehicle` For changing to the right lane it is assumed, that the traffic in this lane flows in the driving direction of the ego vehicle. @@ -147,7 +146,7 @@ The lane change should be performed if the lane is free and there are no fast mo Transition: -* From `driving state machine` by `intersection_detected` +- From `driving state machine` by `intersection_detected` This state machine handles the passing of an intersection. @@ -163,8 +162,8 @@ If there are is a traffic light or a stop sign at the intersection change to the Transition: -* From `STOP_SIGN/TRAFFIC SM` by `clearing the traffic light, stop sign` -* From `APPROACH_INTERSECTION` by `detecting an unsignalized and cleared intersection` +- From `STOP_SIGN/TRAFFIC SM` by `clearing the traffic light, stop sign` +- From `APPROACH_INTERSECTION` by `detecting an unsignalized and cleared intersection` After the approach of the intersection and clear a possible traffic light/stop sign, the ego vehicle enters the intersection. @@ -174,7 +173,7 @@ From there the RoadOption decides in which direction the ego vehicle should turn Transition: -* From `IN_INTERSECTION` by `RoadOption.LEFT` +- From `IN_INTERSECTION` by `RoadOption.LEFT` Check for pedestrians on the driving path. If the path is clear of pedestrians, make sure there will be no crashes during the turning process with oncoming traffic. @@ -182,7 +181,7 @@ Check for pedestrians on the driving path. If the path is clear of pedestrians, Transition: -* From `IN_INTERSECTION` by `RoadOption.STRAIGHT` +- From `IN_INTERSECTION` by `RoadOption.STRAIGHT` Check if there is a vehicle running a red light in the intersection. Pass the intersection. @@ -190,7 +189,7 @@ Check if there is a vehicle running a red light in the intersection. Pass the in Transition: -* From `IN_INTERSECTION` by `RoadOption.RIGHT` +- From `IN_INTERSECTION` by `RoadOption.RIGHT` Check for pedestrians on the driving path. If the path is clear of pedestrians, make sure there will be no crashes during the turning process with crossing traffic. @@ -198,7 +197,7 @@ Check for pedestrians on the driving path. If the path is clear of pedestrians, Transition: -* From `TURN_RIGHT`, `STRAIGHT` or `TURN_LEFT` by passing a distance from the intersection. +- From `TURN_RIGHT`, `STRAIGHT` or `TURN_LEFT` by passing a distance from the intersection. ## Stop sign/traffic light state machine @@ -206,7 +205,7 @@ Transition: Transition: -* From `APPROACH_INTERSECTION` by `stop_sign_detected or traffic_light_detected` +- From `APPROACH_INTERSECTION` by `stop_sign_detected or traffic_light_detected` This state machine handles the handling of stop signs and traffic lights. @@ -218,7 +217,7 @@ If the traffic light/stop sign is near, reduce speed. Avoid crashes with slowly Transitions: -* From `STOP_NEAR` if `distance greater braking distance`. +- From `STOP_NEAR` if `distance greater braking distance`. Slow down near the traffic light to be able to react to quick changes. @@ -226,10 +225,10 @@ Slow down near the traffic light to be able to react to quick changes. Transition: -* From `STOP_NEAR` if `distance < braking distance` while sensing a traffic_light that is `red` or `yellow` or a `stop sign` -* From `STOP_SLOW_DOWN` if `distance < braking distance` -* From `STOP_GO` if the traffic light changes from `green` to `yellow` or `red` and the ego vehicle can stop in front of the stop sign/traffic light. -* From `STOP_WAIT` if the there is a predominant stop sign and the ego vehicle didn't reach the stop line. +- From `STOP_NEAR` if `distance < braking distance` while sensing a traffic_light that is `red` or `yellow` or a `stop sign` +- From `STOP_SLOW_DOWN` if `distance < braking distance` +- From `STOP_GO` if the traffic light changes from `green` to `yellow` or `red` and the ego vehicle can stop in front of the stop sign/traffic light. +- From `STOP_WAIT` if the there is a predominant stop sign and the ego vehicle didn't reach the stop line. Stop in front of the traffic light or the stop sign. @@ -237,7 +236,7 @@ Stop in front of the traffic light or the stop sign. Transition: -* From `STOP_WILL_STOP` by either vehicle has stopped or distance to stop line is less than 2 meters +- From `STOP_WILL_STOP` by either vehicle has stopped or distance to stop line is less than 2 meters The vehicle has stopped and waits eiter until leading vehicle continues to drive or traffic rules permit to continue driving. @@ -245,9 +244,9 @@ The vehicle has stopped and waits eiter until leading vehicle continues to drive Transition: -* From `STOP_NEAR` if traffic light is `green` or `off` -* From `STOP_SLOW_DOWN` if traffic light is `green` or `off` -* FROM `STOP_WAIT` if traffic light is `green` or `off` +- From `STOP_NEAR` if traffic light is `green` or `off` +- From `STOP_SLOW_DOWN` if traffic light is `green` or `off` +- FROM `STOP_WAIT` if traffic light is `green` or `off` Ego vehicle starts to accelerate to clear the traffic sign/traffic light or continues to drive if the traffic light is green or deactivated. diff --git a/doc/03_research/03_planning/00_paf22/07_OpenDrive.md b/doc/03_research/03_planning/00_paf22/07_OpenDrive.md index 7c5c46fc..e25b8b60 100644 --- a/doc/03_research/03_planning/00_paf22/07_OpenDrive.md +++ b/doc/03_research/03_planning/00_paf22/07_OpenDrive.md @@ -15,21 +15,21 @@ Simon Erlbacher --- -* [OpenDrive Format](#opendrive-format) - * [Authors](#authors) - * [Date](#date) - * [General](#general) - * [Different Projects](#different-projects) - * [PSAF1](#psaf1) - * [PSAF2](#psaf2) - * [paf21-2](#paf21-2) - * [paf21-1](#paf21-1) - * [Result](#result) - * [More information about OpenDrive](#more-information-about-opendrive) - * [Start of the implementation](#start-of-the-implementation) - * [Implementation details](#implementation-details) - * [Follow-up Issues](#follow-up-issues) - * [Sources](#sources) +- [OpenDrive Format](#opendrive-format) + - [Authors](#authors) + - [Date](#date) + - [General](#general) + - [Different Projects](#different-projects) + - [PSAF1](#psaf1) + - [PSAF2](#psaf2) + - [paf21-2](#paf21-2) + - [paf21-1](#paf21-1) + - [Result](#result) + - [More information about OpenDrive](#more-information-about-opendrive) + - [Start of the implementation](#start-of-the-implementation) + - [Implementation details](#implementation-details) + - [Follow-up Issues](#follow-up-issues) + - [Sources](#sources) ## General @@ -45,33 +45,33 @@ It is examined how the OpenDrive file is converted and read in other groups and ### PSAF1 -* Subscribed the OpenDrive information from the Carla Simulator -* Used the Commonroad Route Planner from TUM (in the project they used the now deprecated verison) -* This Route Planner converts the xdor file from the CarlaWorldInfo message automatically -* As a result they used a Lanelet model, which they enriched with additional information about +- Subscribed the OpenDrive information from the Carla Simulator +- Used the Commonroad Route Planner from TUM (in the project they used the now deprecated verison) +- This Route Planner converts the xdor file from the CarlaWorldInfo message automatically +- As a result they used a Lanelet model, which they enriched with additional information about traffic lights and traffic signs -* This additional information comes from the Carla Simulator API +- This additional information comes from the Carla Simulator API Result: We can't use this information from [psaf1]("https://github.com/ll7/psaf1/tree/master/psaf_ros/psaf_global_planner") , because it is not allowed to use privileged information from the Carla Simulator ### PSAF2 -* Same approach as described in PSAF1 above -* Same problem in [psaf2](https://github.com/ll7/psaf2/tree/main/Planning/global_planner) with this approach as +- Same approach as described in PSAF1 above +- Same problem in [psaf2](https://github.com/ll7/psaf2/tree/main/Planning/global_planner) with this approach as mentioned in PSAF1 ### paf21-2 -* Same approach as described in PSAF1 above -* Same problem in [paf21-2](https://github.com/ll7/paf21-2#global-planner) with this approach as mentioned in PSAF1 +- Same approach as described in PSAF1 above +- Same problem in [paf21-2](https://github.com/ll7/paf21-2#global-planner) with this approach as mentioned in PSAF1 ### paf21-1 -* Worked directly with the OpenDrive format -* There is a lot of information available -* They extracted some information from the xdor file to plan their trajectory -* They don't recommend to use this approach, because a lot of "black magic" is happening in their code +- Worked directly with the OpenDrive format +- There is a lot of information available +- They extracted some information from the xdor file to plan their trajectory +- They don't recommend to use this approach, because a lot of "black magic" is happening in their code Result: The only possible way to get all the road information without using the Carla Simulator API @@ -83,17 +83,17 @@ during the planning process. It would be better to convert and analyse the xdor ## More information about OpenDrive -* We can read the xdor file with the [ElementTree XML API](https://docs.python.org/3/library/xml.etree.elementtree.html) -* We can refactor the scripts from paf21-1 but as they described, it is a lot of code and hard to get a good +- We can read the xdor file with the [ElementTree XML API](https://docs.python.org/3/library/xml.etree.elementtree.html) +- We can refactor the scripts from paf21-1 but as they described, it is a lot of code and hard to get a good overview about it -* Also we have a different scenario, because we do not need to read the whole xdor file in the beginning. We need +- Also we have a different scenario, because we do not need to read the whole xdor file in the beginning. We need to search for the relevant area -* The OpenDrive format contains a lot of information to extract - * Every road section has a unique id - * Road has a predecessor and a successor with its specific type (road, junction,...) - * Information about signals and their position - * Information about the reference lines (line which seperates lanes) and their layout (linear, arc, cubic curves) - * Information about the maximum speed +- The OpenDrive format contains a lot of information to extract + - Every road section has a unique id + - Road has a predecessor and a successor with its specific type (road, junction,...) + - Information about signals and their position + - Information about the reference lines (line which seperates lanes) and their layout (linear, arc, cubic curves) + - Information about the maximum speed ![OpenDrive stop sign](../../00_assets/Stop_sign_OpenDrive.png) Impression of the format @@ -108,22 +108,22 @@ After that, we can add some more information about the signals to our trajectory structure of the xodr files from the Simulator: -* header -* road (attributes: junction id (-1 if no junction), length, road id, Road name) - * lanes - * link (predecessor and successor with id) - * signals - * type (contains max speed) - * planView (contains information about the geometry and the line type (= reference line)) -* controller (information about the controlled signals) -* junction (crossing lane sections) +- header +- road (attributes: junction id (-1 if no junction), length, road id, Road name) + - lanes + - link (predecessor and successor with id) + - signals + - type (contains max speed) + - planView (contains information about the geometry and the line type (= reference line)) +- controller (information about the controlled signals) +- junction (crossing lane sections) link: -* every road has a successor and a predecessor road (sometimes only one of them) -* the road can have the type "road" or "junction" -* we can access the relevant sections with an id value -* Example: +- every road has a successor and a predecessor road (sometimes only one of them) +- the road can have the type "road" or "junction" +- we can access the relevant sections with an id value +- Example: @@ -132,10 +132,10 @@ link: planView: -* x and y world coordinates (startposition of the reference line) -* hdg value for the orientation -* length value for the length of this road section (reference line) -* reference line type: line, curvature (more possible in Asam OpenDrive) +- x and y world coordinates (startposition of the reference line) +- hdg value for the orientation +- length value for the length of this road section (reference line) +- reference line type: line, curvature (more possible in Asam OpenDrive) @@ -151,18 +151,18 @@ planView: lane: -* a lane is part of a road -* road can consists of different lanes -* the lane next to the reference line has the value 1 -* the lanes next to that lane have increasing numbers -* lanes on the left and on the right side of the reference line have different signs +- a lane is part of a road +- road can consists of different lanes +- the lane next to the reference line has the value 1 +- the lanes next to that lane have increasing numbers +- lanes on the left and on the right side of the reference line have different signs junction: -* a road section with crossing lanes -* a junction has one id -* every segment in the junction connects different lanes -* every connection has its own id +- a road section with crossing lanes +- a junction has one id +- every segment in the junction connects different lanes +- every connection has its own id @@ -176,16 +176,16 @@ junction: Relevant coordinate system: -* inertial coordinate system - * x -> right (roll) - * y -> up (pitch) - * z -> coming out of the drawig plane (yaw) +- inertial coordinate system + - x -> right (roll) + - y -> up (pitch) + - z -> coming out of the drawig plane (yaw) Driving direction: -* calculate on which road the agent drives -* that has an impact on the way we have to calculate the end points -* A road is decribed through the reference line. Every road segment has a +- calculate on which road the agent drives +- that has an impact on the way we have to calculate the end points +- A road is decribed through the reference line. Every road segment has a starting point and a length value. The distance to the following road segment. The calculation of the trajectory uses the startpoint of the next road segment to navigate along the street. If the agent drives on the other side of the street, @@ -198,25 +198,25 @@ the start points of the reference line There are two methods to calculate the trajectory. The first method is only needed once at the beginning, when the ego-vehicle stays at its start position. -* First we need to find the current road, where the agent is located -* Take all road start points and calculate the nearest startpoint to the vehicle position -* Calculate Endpoint for each connecting road and check if the vehicle lays in the interval -> road id - * use the predecessor and the successor points to get the correct road - * also check if the predecessor or successor is a junction. If do not have a command from the leaderboard we pass +- First we need to find the current road, where the agent is located +- Take all road start points and calculate the nearest startpoint to the vehicle position +- Calculate Endpoint for each connecting road and check if the vehicle lays in the interval -> road id + - use the predecessor and the successor points to get the correct road + - also check if the predecessor or successor is a junction. If do not have a command from the leaderboard we pass the junction straight. For this scenario we first have to filter the correct road id out ouf the junction to get the start and endpoint - * check if the ego vehicle lays in the interval -> if yes change the road id (else we chose the correct one) -* Check the driving direction (following road id) - * calculate the distances from one predecessor point and one successor point to the target point - * the road with the smaller distance is the next following road -* Interpolate the current road from start to end (arc and line) - * check the point ordering -> possible that we have to reverse them - * at the beginning we can be located in the middle of a street - * we need to delete the points from the interpolation laying before our ego vehicle position -* Weakness - * The Calculation of the driving direction is based on the distance to the target location - * If the course of the road is difficult, this approach could fail - * As you can see in the top right corner of the picture. the distance from the lower blue line + - check if the ego vehicle lays in the interval -> if yes change the road id (else we chose the correct one) +- Check the driving direction (following road id) + - calculate the distances from one predecessor point and one successor point to the target point + - the road with the smaller distance is the next following road +- Interpolate the current road from start to end (arc and line) + - check the point ordering -> possible that we have to reverse them + - at the beginning we can be located in the middle of a street + - we need to delete the points from the interpolation laying before our ego vehicle position +- Weakness + - The Calculation of the driving direction is based on the distance to the target location + - If the course of the road is difficult, this approach could fail + - As you can see in the top right corner of the picture. the distance from the lower blue line is shorter to the target than the upper blue line. The method would choose the lower line because of the smaller distance @@ -226,45 +226,45 @@ Road Concepts Further Calculation of the trajectory -* after each interpolation we calculate the midpoint of a lane. Otherwise we would drive on +- after each interpolation we calculate the midpoint of a lane. Otherwise we would drive on the reference line. That is why we have to filter the width information for our lanes. - * there can be more than one driving lane on one side of the reference line - * filter all width values and decide on which side of the reference line the vehicle drives - * after this we have the information which of the two perpendicular vectors we need to compute + - there can be more than one driving lane on one side of the reference line + - filter all width values and decide on which side of the reference line the vehicle drives + - after this we have the information which of the two perpendicular vectors we need to compute the points on the correct side of the reference line - * we always choose the biggest width value, to take the rightmost lane + - we always choose the biggest width value, to take the rightmost lane ![lane_midpoint](../../00_assets/lane_midpoint.png) Scenario and concept to compute the midpoint of a lane -* the second method takes the target position and the next command from the leaderboard -* we always calculate the follow road based on the distance to the target and then +- the second method takes the target position and the next command from the leaderboard +- we always calculate the follow road based on the distance to the target and then interpolate the current road - * here we can also change this approach if there is the same weakness as mentioned before - * we can calculate the next road based on the distance to the last trajectory point -* we have to keep in mind the same aspects as in the starting case -* after each interpolation of a road we check the distance from the new trajectory points to + - here we can also change this approach if there is the same weakness as mentioned before + - we can calculate the next road based on the distance to the last trajectory point +- we have to keep in mind the same aspects as in the starting case +- after each interpolation of a road we check the distance from the new trajectory points to the target position - * if the distance is smaller than a set threshold, we reached the target - * in this case we may need to calculate this last road again because based on the command + - if the distance is smaller than a set threshold, we reached the target + - in this case we may need to calculate this last road again because based on the command from the leaderboard we have to turn to the left side or the rigth side. We need to change the lane before we reach the startpoint of a junction - * we calculate the next road to take, based on the heading value of the endpoint of this + - we calculate the next road to take, based on the heading value of the endpoint of this following road. We compare this value to the yaw value from the leaderboard. The heading value with the smallest distance indicates the correct following road id. - * when we know the end point of the following road, we can recompute the last trajectory point + - when we know the end point of the following road, we can recompute the last trajectory point with all possible width values for this road. calculate the distance to the following endpoint and chose the width value with the smallest distance. - * Now we can interpolate our last road with the new width value (if the width value was updated) - * Also we can smooth our first trajectory points with smaller width values, to change the lane smooth + - Now we can interpolate our last road with the new width value (if the width value was updated) + - Also we can smooth our first trajectory points with smaller width values, to change the lane smooth For the next target point and command we need to call this method again (not the starting method) and calculate the trajectory. Weakness -* Offset for restricted areas is not yet calculated (see the picture above) -* no max speed value for junctions -> default value -* Check where the target points are located. In the middle of a junction or before? +- Offset for restricted areas is not yet calculated (see the picture above) +- no max speed value for junctions -> default value +- Check where the target points are located. In the middle of a junction or before? At the moment we assume they are before a junction. In the following test scenario we added a manual start point on road 8. @@ -288,15 +288,15 @@ One cutout of the trajectory ## Follow-up Issues -* Check out positioning - * Compare positioning of signs in Carla and in the OpenDrive Map - * Compare positioning of traffic lights in Carla and in the OpenDrive Map -* Visualize Trajectory in Carla -* Implement velocity profile -* Check if waypoints fit with Simulator -* Keep the lane limitation -> testing -* Extract signals information for the state machine -* Implement local path planner for alternative routes and collision prediction +- Check out positioning + - Compare positioning of signs in Carla and in the OpenDrive Map + - Compare positioning of traffic lights in Carla and in the OpenDrive Map +- Visualize Trajectory in Carla +- Implement velocity profile +- Check if waypoints fit with Simulator +- Keep the lane limitation -> testing +- Extract signals information for the state machine +- Implement local path planner for alternative routes and collision prediction ## Sources diff --git a/doc/03_research/03_planning/00_paf22/07_reevaluation_desicion_making.md b/doc/03_research/03_planning/00_paf22/07_reevaluation_desicion_making.md index a3fe7fb5..f6492d3c 100644 --- a/doc/03_research/03_planning/00_paf22/07_reevaluation_desicion_making.md +++ b/doc/03_research/03_planning/00_paf22/07_reevaluation_desicion_making.md @@ -16,28 +16,27 @@ Josef Kircher --- -* [Re-evaluation of decision making component](#re-evaluation-of-decision-making-component) - * [**Summary:** This page gives a foundation for the re-evaluation of the decision-making](#summary-this-page-gives-a-foundation-for-the-re-evaluation-of-the-decision-making) - * [Author](#author) - * [Date](#date) - * [Prerequisite](#prerequisite) - * [Reasons for re-evaluation](#reasons-for-re-evaluation) - * [Options](#options) - * [Pylot](#pylot) - * [Pytrees](#pytrees) - * [Pros](#pros) - * [Cons](#cons) - * [Conclusion](#conclusion) - * [Sources](#sources) +- [Re-evaluation of decision making component](#re-evaluation-of-decision-making-component) + - [Author](#author) + - [Date](#date) + - [Prerequisite](#prerequisite) + - [Reasons for re-evaluation](#reasons-for-re-evaluation) + - [Options](#options) + - [Pylot](#pylot) + - [Pytrees](#pytrees) + - [Pros](#pros) + - [Cons](#cons) + - [Conclusion](#conclusion) + - [Sources](#sources) ## Reasons for re-evaluation In the last sprint, I tried to get a graphic tool to work with the docker container withing the project. That failed, but I still think, that a graphical representation would be helpful. Other reasons are: -* not much time has been allocated for the state machine so far -* using SMACH would result in a mostly from scratch implementation -* harder to debug due to the lack of a graphic representation +- not much time has been allocated for the state machine so far +- using SMACH would result in a mostly from scratch implementation +- harder to debug due to the lack of a graphic representation ## Options @@ -56,18 +55,18 @@ As it is looking very promising, I list here a few arguments to help support my #### Pros -* support a graphical representation at runtime with rqt -* a lot of similar driving scenarios as the old team -* so a lot of code can be recycled -* quite intuitive and easy to understand -* only a limited amount of commands (easy to learn) -* well documented -* maintained +- support a graphical representation at runtime with rqt +- a lot of similar driving scenarios as the old team +- so a lot of code can be recycled +- quite intuitive and easy to understand +- only a limited amount of commands (easy to learn) +- well documented +- maintained #### Cons -* only a couple of decision can be made inside the tree, so it might be more complicated to depict the complex behaviour of the ego vehicle -* A lot of time was invested in the design of the original state machine, might be needed to be adapted +- only a couple of decision can be made inside the tree, so it might be more complicated to depict the complex behaviour of the ego vehicle +- A lot of time was invested in the design of the original state machine, might be needed to be adapted ## Conclusion diff --git a/doc/03_research/03_planning/Readme.md b/doc/03_research/03_planning/Readme.md index e48c2530..67c5f196 100644 --- a/doc/03_research/03_planning/Readme.md +++ b/doc/03_research/03_planning/Readme.md @@ -3,5 +3,5 @@ This folder contains all the results of research on planning from PAF 23 and 22. The research documents from the previous project were kept as they contain helpful information. The documents are separated in different folders: -* **[PAF22](./00_paf22/)** -* **[PAF23](./00_paf23/)** +- **[PAF22](./00_paf22/)** +- **[PAF23](./00_paf23/)** diff --git a/doc/03_research/04_requirements/02_informations_from_leaderboard.md b/doc/03_research/04_requirements/02_informations_from_leaderboard.md index 137d3566..25ce6b78 100644 --- a/doc/03_research/04_requirements/02_informations_from_leaderboard.md +++ b/doc/03_research/04_requirements/02_informations_from_leaderboard.md @@ -19,52 +19,52 @@ none --- -* [Requirements of Carla Leaderboard](#requirements-of-carla-leaderboard) - * [Author](#author) - * [Date](#date) - * [Prerequisite](#prerequisite) - * [Task](#task) - * [Participation modalities](#participation-modalities) - * [Route format](#route-format) - * [Sensors](#sensors) - * [Evaluation](#evaluation) - * [Main score](#main-score) - * [Driving Score for route i](#driving-score-for-route-i) - * [Infraction penalty](#infraction-penalty) - * [Shutdown criteria](#shutdown-criteria) - * [Submission](#submission) - * [Sources](#sources) +- [Requirements of Carla Leaderboard](#requirements-of-carla-leaderboard) + - [Author](#author) + - [Date](#date) + - [Prerequisite](#prerequisite) + - [Task](#task) + - [Participation modalities](#participation-modalities) + - [Route format](#route-format) + - [Sensors](#sensors) + - [Evaluation](#evaluation) + - [Main score](#main-score) + - [Driving score for single route](#driving-score-for-single-route) + - [Infraction penalty](#infraction-penalty) + - [Shutdown criteria](#shutdown-criteria) + - [Submission](#submission) + - [Sources](#sources) --- ## Task -* an autonomous agent should drive through a set of predefined routes -* for each route: - * initialization at a starting point - * directed to drive to a destination point - * route described by GPS coordinates **or** map coordinates **or** route instructions -* route situations: - * freeways - * urban areas - * residential districts - * rural settings -* weather conditions: - * daylight - * sunset - * rain - * fog - * night - * more ... +- an autonomous agent should drive through a set of predefined routes +- for each route: + - initialization at a starting point + - directed to drive to a destination point + - route described by GPS coordinates **or** map coordinates **or** route instructions +- route situations: + - freeways + - urban areas + - residential districts + - rural settings +- weather conditions: + - daylight + - sunset + - rain + - fog + - night + - more ... Possible traffic signs (not complete): -* Stop sign -* Speed limitation -* Traffic lights -* Arrows on street -* Stop sign on street +- Stop sign +- Speed limitation +- Traffic lights +- Arrows on street +- Stop sign on street ## Participation modalities @@ -100,12 +100,12 @@ Second, world coordinates and a route option High-level commands (rood options) are: -* RoadOption.**CHANGELANELEFT**: Move one lane to the left. -* RoadOption.**CHANGELANERIGHT**: Move one lane to the right. -* RoadOption.**LANEFOLLOW**: Continue in the current lane. -* RoadOption.**LEFT**: Turn left at the intersection. -* RoadOption.**RIGHT**: Turn right at the intersection. -* RoadOption.**STRAIGHT**: Keep straight at the intersection. +- RoadOption.**CHANGELANELEFT**: Move one lane to the left. +- RoadOption.**CHANGELANERIGHT**: Move one lane to the right. +- RoadOption.**LANEFOLLOW**: Continue in the current lane. +- RoadOption.**LEFT**: Turn left at the intersection. +- RoadOption.**RIGHT**: Turn right at the intersection. +- RoadOption.**STRAIGHT**: Keep straight at the intersection. **Important:** If the semantics of left and right are ambiguous, the next position should be used to clarify the path. @@ -131,9 +131,9 @@ Determination how "good" the agent performs on the Leaderboard. The driving proficiency of an agent can be characterized by multiple metrics. -* `Driving score:` Product between route completion and infractions penalty -* `Route completion:` Percentage of the route distance completed by an agent -* `Infraction penalty:` The leaderboard tracks several types of infractions which reduce the score +- `Driving score:` Product between route completion and infractions penalty +- `Route completion:` Percentage of the route distance completed by an agent +- `Infraction penalty:` The leaderboard tracks several types of infractions which reduce the score Every agent starts with a base infraction score of 1.0 at the beginning. @@ -147,36 +147,36 @@ Product of route completion and infraction penalty of this route Not complying with traffic rules will result in a penalty. Multiple penalties can be applied per route. Infractions ordered by severity are: -* collisions with pedestrians: 0.50 -* collisions with other vehicles: 0.60 -* collisions with static elements: 0.65 -* running a red light: 0.70 -* running a stop sign: 0.80 +- collisions with pedestrians: 0.50 +- collisions with other vehicles: 0.60 +- collisions with static elements: 0.65 +- running a red light: 0.70 +- running a stop sign: 0.80 It is possible that the vehicle is stuck in some scenario. After a timeout of **4 minutes** the vehicle will be released, however a penalty is applied -* scenario timeout (feature behaviours can block ego vehicle): 0.70 +- scenario timeout (feature behaviours can block ego vehicle): 0.70 Agent should keep a minimum speed compared to the nearby traffic. The penalty is increases with the difference in speed. -* Failure to maintain minimum speed: 0.70 +- Failure to maintain minimum speed: 0.70 Agent should let emergency vehicles from behind pass. -* Failure to yield to emergency vehicle: 0.70 +- Failure to yield to emergency vehicle: 0.70 If the agent drives off-road that percentage does not count towards the road completion -* Off-road driving: not considered towards the computation of the route completion score +- Off-road driving: not considered towards the computation of the route completion score ### Shutdown criteria Some events will interrupt the simulation of that resulting in an incomplete route -* route deviation - more than 30 meters from assigned route -* agent blocked - if agent does not take an action for 180 seconds -* simulation timeout - no client-server communication in 60 seconds -* route timeout - simulation takes too long to finish +- route deviation - more than 30 meters from assigned route +- agent blocked - if agent does not take an action for 180 seconds +- simulation timeout - no client-server communication in 60 seconds +- route timeout - simulation takes too long to finish ## Submission diff --git a/doc/03_research/04_requirements/03_requirements.md b/doc/03_research/04_requirements/03_requirements.md index 9f8755ab..953bd900 100644 --- a/doc/03_research/04_requirements/03_requirements.md +++ b/doc/03_research/04_requirements/03_requirements.md @@ -16,24 +16,23 @@ Josef Kircher, Simon Erlbacher --- -* [Requirements](#requirements) - * [Author](#author) - * [Date](#date) - * [Prerequisite](#prerequisite) - * [Requirements from Leaderboard tasks](#requirements-from-leaderboard-tasks) - * [Carla Leaderboard Score](#carla-leaderboard-score) - * [Prioritized driving aspects](#prioritized-driving-aspects) - * [more Content](#more-content) - * [Sources](#sources) +- [Requirements](#requirements) + - [Author](#author) + - [Date](#date) + - [Prerequisite](#prerequisite) + - [Requirements from Leaderboard tasks](#requirements-from-leaderboard-tasks) + - [Prioritized driving aspects](#prioritized-driving-aspects) + - [more Content](#more-content) + - [Sources](#sources) ## Requirements from Leaderboard tasks -* follow waypoints on a route -* don't deviate from route by more than 30 meters -* act in accordance with traffic rules -* don't get blocked -* complete 10 routes (2 weather conditions) +- follow waypoints on a route +- don't deviate from route by more than 30 meters +- act in accordance with traffic rules +- don't get blocked +- complete 10 routes (2 weather conditions) --- @@ -45,33 +44,33 @@ Also, it is appropriate to implement the basic features of an autonomous car fir `Very important:` -* Recognize the street limitations -* Recognize pedestrians -* Follow the waypoints -* Recognize traffic lights -* Recognize obstacles -* Recognize cars in front of the agent (keep distance) -* Steering, accelerate, decelerate -* Street rules (no street signs available) -* Change lane (obstacles) +- Recognize the street limitations +- Recognize pedestrians +- Follow the waypoints +- Recognize traffic lights +- Recognize obstacles +- Recognize cars in front of the agent (keep distance) +- Steering, accelerate, decelerate +- Street rules (no street signs available) +- Change lane (obstacles) `Important:` -* Check Intersection -* Sense traffic (speed and trajectory) -* Predict traffic -* Emergency brake -* Sense length of ramp -* Recognize space (Turn into highway) -* Change lane (safe) -* Recognize emergency vehicle -* Recognize unexpected dynamic situations (opening door, bycicles,...) +- Check Intersection +- Sense traffic (speed and trajectory) +- Predict traffic +- Emergency brake +- Sense length of ramp +- Recognize space (Turn into highway) +- Change lane (safe) +- Recognize emergency vehicle +- Recognize unexpected dynamic situations (opening door, bycicles,...) `Less important:` -* Smooth driving (accelerate, decelerate, stop) -* Weather Condition -* Predict pedestrians +- Smooth driving (accelerate, decelerate, stop) +- Weather Condition +- Predict pedestrians --- diff --git a/doc/03_research/04_requirements/04_use_cases.md b/doc/03_research/04_requirements/04_use_cases.md index 59cd6984..ee58d615 100644 --- a/doc/03_research/04_requirements/04_use_cases.md +++ b/doc/03_research/04_requirements/04_use_cases.md @@ -16,33 +16,163 @@ Josef Kircher --- -* [Use cases in Carla Leaderboard](#use-cases-in-carla-leaderboard) - * [Author](#author) - * [Date](#date) - * [Prerequisite](#prerequisite) - * [1. Control loss due to bad road condition](#1-control-loss-due-to-bad-road-condition) - * [2. Unprotected left turn at intersection with oncoming traffic](#2-unprotected-left-turn-at-intersection-with-oncoming-traffic) - * [3. Right turn at an intersection with crossing traffic](#3-right-turn-at-an-intersection-with-crossing-traffic) - * [4. Crossing negotiation at unsignalized intersection](#4-crossing-negotiation-at-unsignalized-intersection) - * [5. Crossing traffic running a red light at intersection](#5-crossing-traffic-running-a-red-light-at-intersection) - * [6. Highway merge from on-ramp](#6-highway-merge-from-on-ramp) - * [7. Highway cut-in from on-ramp](#7-highway-cut-in-from-on-ramp) - * [8. Static cut-in](#8-static-cut-in) - * [9. Highway exit](#9-highway-exit) - * [10. Yield to emergency vehicle](#10-yield-to-emergency-vehicle) - * [11. Obstacle in lane](#11-obstacle-in-lane) - * [12. Door Obstacle](#12-door-obstacle) - * [13. Slow moving hazard at lane edge](#13-slow-moving-hazard-at-lane-edge) - * [14. Vehicle invading lane on bend](#14-vehicle-invading-lane-on-bend) - * [15. Longitudinal control after leading vehicle brakes](#15-longitudinal-control-after-leading-vehicle-brakes) - * [16. Obstacle avoidance without prior action](#16-obstacle-avoidance-without-prior-action) - * [17. Pedestrian emerging from behind parked vehicle](#17-pedestrian-emerging-from-behind-parked-vehicle) - * [18. Obstacle avoidance with prior action](#18-obstacle-avoidance-with-prior-action) - * [19. Parking Cut-in](#19-parking-cut-in) - * [20. Lane changing to evade slow leading vehicle](#20-lane-changing-to-evade-slow-leading-vehicle) - * [21. Passing obstacle with oncoming traffic](#21-passing-obstacle-with-oncoming-traffic) - * [22. Parking Exit](#22-parking-exit) - * [Sources](#sources) +- [Use cases in Carla Leaderboard](#use-cases-in-carla-leaderboard) + - [Author](#author) + - [Date](#date) + - [Prerequisite](#prerequisite) + - [1. Control loss due to bad road condition](#1-control-loss-due-to-bad-road-condition) + - [Description](#description) + - [Pre-condition(Event)](#pre-conditionevent) + - [Driving functions](#driving-functions) + - [Outcome](#outcome) + - [Associated use cases](#associated-use-cases) + - [2. Unprotected left turn at intersection with oncoming traffic](#2-unprotected-left-turn-at-intersection-with-oncoming-traffic) + - [Description](#description-1) + - [Basic flow](#basic-flow) + - [Pre-condition(Event)](#pre-conditionevent-1) + - [Driving functions](#driving-functions-1) + - [Outcome](#outcome-1) + - [Associated use cases](#associated-use-cases-1) + - [3. Right turn at an intersection with crossing traffic](#3-right-turn-at-an-intersection-with-crossing-traffic) + - [Description](#description-2) + - [Basic flow](#basic-flow-1) + - [Pre-condition(Event)](#pre-conditionevent-2) + - [Driving functions](#driving-functions-2) + - [Outcome](#outcome-2) + - [Associated use cases](#associated-use-cases-2) + - [4. Crossing negotiation at unsignalized intersection](#4-crossing-negotiation-at-unsignalized-intersection) + - [Description](#description-3) + - [Basic flow](#basic-flow-2) + - [Pre-condition(Event)](#pre-conditionevent-3) + - [Driving functions](#driving-functions-3) + - [Outcome](#outcome-3) + - [Associated use cases](#associated-use-cases-3) + - [5. Crossing traffic running a red light at intersection](#5-crossing-traffic-running-a-red-light-at-intersection) + - [Description](#description-4) + - [Pre-condition(Event)](#pre-conditionevent-4) + - [Driving functions](#driving-functions-4) + - [Outcome](#outcome-4) + - [Associated use cases](#associated-use-cases-4) + - [6. Highway merge from on-ramp](#6-highway-merge-from-on-ramp) + - [Description](#description-5) + - [Basic flow](#basic-flow-3) + - [Pre-condition(Event)](#pre-conditionevent-5) + - [Driving functions](#driving-functions-5) + - [Outcome](#outcome-5) + - [Associated use cases](#associated-use-cases-5) + - [7. Highway cut-in from on-ramp](#7-highway-cut-in-from-on-ramp) + - [Description](#description-6) + - [Basic flow](#basic-flow-4) + - [Pre-condition(Event)](#pre-conditionevent-6) + - [Driving functions](#driving-functions-6) + - [Outcome](#outcome-6) + - [Associated use cases](#associated-use-cases-6) + - [8. Static cut-in](#8-static-cut-in) + - [Description](#description-7) + - [Basic flow](#basic-flow-5) + - [Pre-condition(Event)](#pre-conditionevent-7) + - [Driving functions](#driving-functions-7) + - [Outcome](#outcome-7) + - [Associated use cases](#associated-use-cases-7) + - [9. Highway exit](#9-highway-exit) + - [Description](#description-8) + - [Basic flow](#basic-flow-6) + - [Pre-condition(Event)](#pre-conditionevent-8) + - [Driving functions](#driving-functions-8) + - [Outcome](#outcome-8) + - [Associated use cases](#associated-use-cases-8) + - [10. Yield to emergency vehicle](#10-yield-to-emergency-vehicle) + - [Description](#description-9) + - [Basic flow](#basic-flow-7) + - [Pre-condition(Event)](#pre-conditionevent-9) + - [Driving functions](#driving-functions-9) + - [Outcome](#outcome-9) + - [Associated use cases](#associated-use-cases-9) + - [11. Obstacle in lane](#11-obstacle-in-lane) + - [Description](#description-10) + - [Basic flow](#basic-flow-8) + - [Pre-condition(Event)](#pre-conditionevent-10) + - [Driving functions](#driving-functions-10) + - [Outcome](#outcome-10) + - [Associated use cases](#associated-use-cases-10) + - [12. Door Obstacle](#12-door-obstacle) + - [Description](#description-11) + - [Basic flow](#basic-flow-9) + - [Pre-condition(Event)](#pre-conditionevent-11) + - [Driving functions](#driving-functions-11) + - [Outcome](#outcome-11) + - [Associated use cases](#associated-use-cases-11) + - [13. Slow moving hazard at lane edge](#13-slow-moving-hazard-at-lane-edge) + - [Description](#description-12) + - [Basic flow](#basic-flow-10) + - [Pre-condition(Event)](#pre-conditionevent-12) + - [Driving functions](#driving-functions-12) + - [Outcome](#outcome-12) + - [Associated use cases](#associated-use-cases-12) + - [14. Vehicle invading lane on bend](#14-vehicle-invading-lane-on-bend) + - [Description](#description-13) + - [Basic flow](#basic-flow-11) + - [Pre-condition(Event)](#pre-conditionevent-13) + - [Driving functions](#driving-functions-13) + - [Outcome](#outcome-13) + - [Associated use cases](#associated-use-cases-13) + - [15. Longitudinal control after leading vehicle brakes](#15-longitudinal-control-after-leading-vehicle-brakes) + - [Description](#description-14) + - [Basic flow](#basic-flow-12) + - [Pre-condition(Event)](#pre-conditionevent-14) + - [Driving functions](#driving-functions-14) + - [Outcome](#outcome-14) + - [Associated use cases](#associated-use-cases-14) + - [16. Obstacle avoidance without prior action](#16-obstacle-avoidance-without-prior-action) + - [Description](#description-15) + - [Basic flow](#basic-flow-13) + - [Pre-condition(Event)](#pre-conditionevent-15) + - [Driving functions](#driving-functions-15) + - [Outcome](#outcome-15) + - [Associated use cases](#associated-use-cases-15) + - [17. Pedestrian emerging from behind parked vehicle](#17-pedestrian-emerging-from-behind-parked-vehicle) + - [Description](#description-16) + - [Basic flow](#basic-flow-14) + - [Pre-condition(Event)](#pre-conditionevent-16) + - [Driving functions](#driving-functions-16) + - [Outcome](#outcome-16) + - [Associated use cases](#associated-use-cases-16) + - [18. Obstacle avoidance with prior action](#18-obstacle-avoidance-with-prior-action) + - [Description](#description-17) + - [Basic flow](#basic-flow-15) + - [Pre-condition(Event)](#pre-conditionevent-17) + - [Driving functions](#driving-functions-17) + - [Outcome](#outcome-17) + - [Associated use cases](#associated-use-cases-17) + - [19. Parking Cut-in](#19-parking-cut-in) + - [Description](#description-18) + - [Basic flow](#basic-flow-16) + - [Pre-condition(Event)](#pre-conditionevent-18) + - [Driving functions](#driving-functions-18) + - [Outcome](#outcome-18) + - [Associated use cases](#associated-use-cases-18) + - [20. Lane changing to evade slow leading vehicle](#20-lane-changing-to-evade-slow-leading-vehicle) + - [Description](#description-19) + - [Basic flow](#basic-flow-17) + - [Pre-condition(Event)](#pre-conditionevent-19) + - [Driving functions](#driving-functions-19) + - [Outcome](#outcome-19) + - [Associated use cases](#associated-use-cases-19) + - [21. Passing obstacle with oncoming traffic](#21-passing-obstacle-with-oncoming-traffic) + - [Description](#description-20) + - [Basic flow](#basic-flow-18) + - [Pre-condition(Event)](#pre-conditionevent-20) + - [Driving functions](#driving-functions-20) + - [Outcome](#outcome-20) + - [Associated use cases](#associated-use-cases-20) + - [22. Parking Exit](#22-parking-exit) + - [Description](#description-21) + - [Basic flow](#basic-flow-19) + - [Pre-condition(Event)](#pre-conditionevent-21) + - [Driving functions](#driving-functions-21) + - [Outcome](#outcome-21) + - [Associated use cases](#associated-use-cases-21) + - [Sources](#sources) --- @@ -61,9 +191,9 @@ Loss of control ### Driving functions -* Control steering angle, throttle and brake to counter unexpected movements +- Control steering angle, throttle and brake to counter unexpected movements -* (Opt): Sense wheel friction to predict unexpected behaviour +- (Opt): Sense wheel friction to predict unexpected behaviour ### Outcome @@ -98,13 +228,13 @@ Global route wants you to perform a left turn at an intersection ### Driving functions -* Sense street signs and traffic lights -* Observe the intersection -* Sense oncoming traffic -* (Check indicator of oncoming traffic) -* Sense pedestrians in your drive path -* Steer the vehicle in a left turn -* Predict if a turn is possible before oncoming traffic reaches the intersection +- Sense street signs and traffic lights +- Observe the intersection +- Sense oncoming traffic +- (Check indicator of oncoming traffic) +- Sense pedestrians in your drive path +- Steer the vehicle in a left turn +- Predict if a turn is possible before oncoming traffic reaches the intersection ### Outcome @@ -144,13 +274,13 @@ Global route wants you to perform a right turn at an intersection ### Driving functions -* Sense street signs and traffic lights -* Observe the intersection -* Sense crossing traffic -* Check indicator of crossing traffic -* Sense pedestrians in your drive path -* Steer the vehicle in a right turn -* Predict if a turn is possible before crossing traffic reaches the intersection +- Sense street signs and traffic lights +- Observe the intersection +- Sense crossing traffic +- Check indicator of crossing traffic +- Sense pedestrians in your drive path +- Steer the vehicle in a right turn +- Predict if a turn is possible before crossing traffic reaches the intersection ### Outcome @@ -192,10 +322,10 @@ No traffic lights or street signs are sensed and agent is at an intersection ### Driving functions -* Sense street signs and traffic lights -* Observe the intersection -* Sense pedestrians in your drive path -* Steering the vehicle +- Sense street signs and traffic lights +- Observe the intersection +- Sense pedestrians in your drive path +- Steering the vehicle ### Outcome @@ -225,10 +355,10 @@ Vehicle enters intersection while having a red light ### Driving functions -* Sense street signs and traffic lights -* Observe the intersection -* Sense crossing traffic -* Emergency brake +- Sense street signs and traffic lights +- Observe the intersection +- Sense crossing traffic +- Emergency brake ### Outcome @@ -269,10 +399,10 @@ Vehicle enters a highway ### Driving functions -* Sense speed of surrounding traffic -* Sense length of ramp -* Adjust speed to enter highway -* Turn into highway +- Sense speed of surrounding traffic +- Sense length of ramp +- Adjust speed to enter highway +- Turn into highway ### Outcome @@ -310,11 +440,11 @@ Vehicle enters a highway ### Driving functions -* Sense speed of surrounding traffic -* Adjust speed to let vehicle enter highway -* Change lane -* Decelerate -* Brake +- Sense speed of surrounding traffic +- Adjust speed to let vehicle enter highway +- Change lane +- Decelerate +- Brake ### Outcome @@ -352,11 +482,11 @@ Vehicle tries to cut-in ### Driving functions -* Sense speed of surrounding traffic -* Adjust speed to let vehicle enter lane -* Change lane -* Decelerate -* Brake +- Sense speed of surrounding traffic +- Adjust speed to let vehicle enter lane +- Change lane +- Decelerate +- Brake ### Outcome @@ -397,12 +527,12 @@ Vehicle leaves a highway ### Driving functions -* Sense speed of surrounding traffic -* Sense distance to off-ramp -* Adjust speed to change lane -* Change lane -* Decelerate -* Brake +- Sense speed of surrounding traffic +- Sense distance to off-ramp +- Adjust speed to change lane +- Change lane +- Decelerate +- Brake ### Outcome @@ -441,10 +571,10 @@ Emergency vehicle behind us ### Driving functions -* Sense emergency vehicle -* Sense speed of surrounding traffic -* Adjust speed to change lane -* Change lane +- Sense emergency vehicle +- Sense speed of surrounding traffic +- Adjust speed to change lane +- Change lane ### Outcome @@ -481,11 +611,11 @@ Obstacle on lane ### Driving functions -* Sense obstacles -* Sense speed of surrounding traffic -* Change lane -* Decelerate -* Brake +- Sense obstacles +- Sense speed of surrounding traffic +- Change lane +- Decelerate +- Brake ### Outcome @@ -536,11 +666,11 @@ Door opens in lane ### Driving functions -* Sense opening door -* Sense speed of surrounding traffic -* Change lane -* Decelerate -* Brake +- Sense opening door +- Sense speed of surrounding traffic +- Change lane +- Decelerate +- Brake ### Outcome @@ -585,11 +715,11 @@ slow moving hazard(bicycle) in lane ### Driving functions -* Sense slow moving hazards -* Sense speed of surrounding traffic -* Change lane -* Decelerate -* Brake +- Sense slow moving hazards +- Sense speed of surrounding traffic +- Change lane +- Decelerate +- Brake ### Outcome @@ -632,10 +762,10 @@ Bend in the road and a vehicle invading our lane ### Driving functions -* Sense vehicle on our lane -* Decelerate -* Brake -* Move to right part of lane +- Sense vehicle on our lane +- Decelerate +- Brake +- Move to right part of lane ### Outcome @@ -667,10 +797,10 @@ Vehicle in front suddenly slows down ### Driving functions -* Sense vehicle on our lane -* Sense vehicle speed -* Decelerate -* Emergency-/Brake +- Sense vehicle on our lane +- Sense vehicle speed +- Decelerate +- Emergency-/Brake ### Outcome @@ -709,9 +839,9 @@ Obstacle in front suddenly appears ### Driving functions -* Sense obstacle on our lane -* Decelerate -* Emergency-/Brake +- Sense obstacle on our lane +- Decelerate +- Emergency-/Brake ### Outcome @@ -760,9 +890,9 @@ Pedestrian in front suddenly appears from behind a parked car. ### Driving functions -* Sense pedestrian on our lane -* Decelerate -* Emergency-/Brake +- Sense pedestrian on our lane +- Decelerate +- Emergency-/Brake ### Outcome @@ -803,9 +933,9 @@ Obstacle in planned driving path ### Driving functions -* Sense obstacle in driving path -* Decelerate -* Emergency-/Brake +- Sense obstacle in driving path +- Decelerate +- Emergency-/Brake ### Outcome @@ -842,9 +972,9 @@ Parked car tries to join traffic ### Driving functions -* Sense parked car starts moving -* Decelerate -* Emergency-/Brake +- Sense parked car starts moving +- Decelerate +- Emergency-/Brake ### Outcome @@ -883,11 +1013,11 @@ Speed of car is under a certain threshold ### Driving functions -* Sense speed of traffic -* Sense vehicles in surrounding lanes -* Decelerate -* Emergency-/Brake -* Change lane +- Sense speed of traffic +- Sense vehicles in surrounding lanes +- Decelerate +- Emergency-/Brake +- Change lane ### Outcome @@ -929,14 +1059,14 @@ Obstacle in front of us with oncoming traffic ### Driving functions -* Sense obstacle -* Sense length of obstacle -* Sense speed, distance of oncoming traffic -* Sense vehicles in surrounding lanes -* Decelerate -* Brake -* Change lane -* Rejoin old lane after the obstacle +- Sense obstacle +- Sense length of obstacle +- Sense speed, distance of oncoming traffic +- Sense vehicles in surrounding lanes +- Decelerate +- Brake +- Change lane +- Rejoin old lane after the obstacle ### Outcome @@ -981,11 +1111,11 @@ Ego-vehicle is parked and wants to join traffic ### Driving functions -* Sense space of parking bay -* Sense speed, distance of traffic -* Sense vehicles in lane the agent wants to join -* Accelerate -* Change lane(Join traffic) +- Sense space of parking bay +- Sense speed, distance of traffic +- Sense vehicles in lane the agent wants to join +- Accelerate +- Change lane(Join traffic) ### Outcome diff --git a/doc/03_research/04_requirements/Readme.md b/doc/03_research/04_requirements/Readme.md index e45c90be..a2f40164 100644 --- a/doc/03_research/04_requirements/Readme.md +++ b/doc/03_research/04_requirements/Readme.md @@ -2,6 +2,6 @@ This folder contains all the results of our research on requirements: -* [Leaderboard information](./02_informations_from_leaderboard.md) -* [Reqirements for agent](./03_requirements.md) -* [Use case scenarios](./04_use_cases.md) +- [Leaderboard information](./02_informations_from_leaderboard.md) +- [Reqirements for agent](./03_requirements.md) +- [Use case scenarios](./04_use_cases.md) diff --git a/doc/03_research/Readme.md b/doc/03_research/Readme.md index f4948302..04591a65 100644 --- a/doc/03_research/Readme.md +++ b/doc/03_research/Readme.md @@ -4,7 +4,7 @@ This folder contains every research we did before we started the project. The research is structured in the following folders: -* [Acting](./01_acting/Readme.md) -* [Perception](./02_perception/Readme.md) -* [Planning](./03_planning/Readme.md) -* [Requirements](./04_requirements/Readme.md) +- [Acting](./01_acting/Readme.md) +- [Perception](./02_perception/Readme.md) +- [Planning](./03_planning/Readme.md) +- [Requirements](./04_requirements/Readme.md) diff --git a/doc/06_perception/02_dataset_structure.md b/doc/06_perception/02_dataset_structure.md index 24a4d8e0..aecd0a40 100644 --- a/doc/06_perception/02_dataset_structure.md +++ b/doc/06_perception/02_dataset_structure.md @@ -13,15 +13,15 @@ Marco Riedenauer 19.02.2023 -* [Dataset structure](#dataset-structure) - * [Author](#author) - * [Date](#date) - * [Converting the dataset](#converting-the-dataset) - * [Preparation of the dataset for training](#preparation-of-the-dataset-for-training) - * [Explanation of the conversion of groundtruth images](#explanation-of-the-conversion-of-groundtruth-images) - * [Things](#things) - * [Stuff](#stuff) - * [Explanation of creating json files](#explanation-of-creating-json-files) +- [Dataset structure](#dataset-structure) + - [Author](#author) + - [Date](#date) + - [Converting the dataset](#converting-the-dataset) + - [Preparation of the dataset for training](#preparation-of-the-dataset-for-training) + - [Explanation of the conversion of groundtruth images](#explanation-of-the-conversion-of-groundtruth-images) + - [Things](#things) + - [Stuff](#stuff) + - [Explanation of creating json files](#explanation-of-creating-json-files) ## Converting the dataset @@ -64,7 +64,7 @@ following structure: When the dataset has the correct structure, the groundtruth images have to be converted to COCO format and some json files have to be created. -To do so, execute the following command in your b5 shell: +To do so, execute the following command in an attached shell: ```shell python3 perception/src/panoptic_segmentation/preparation/createPanopticImgs.py --dataset_folder diff --git a/doc/06_perception/03_lidar_distance_utility.md b/doc/06_perception/03_lidar_distance_utility.md index 2d68e6f1..f81d2904 100644 --- a/doc/06_perception/03_lidar_distance_utility.md +++ b/doc/06_perception/03_lidar_distance_utility.md @@ -24,11 +24,11 @@ Tim Dreier --- -* [Lidar Distance Utility](#lidar-distance-utility) - * [Author](#author) - * [Date](#date) - * [Configuration](#configuration) - * [Example](#example) +- [Lidar Distance Utility](#lidar-distance-utility) + - [Author](#author) + - [Date](#date) + - [Configuration](#configuration) + - [Example](#example) ## Configuration diff --git a/doc/06_perception/04_efficientps.md b/doc/06_perception/04_efficientps.md index 4fa17b74..92ce43a4 100644 --- a/doc/06_perception/04_efficientps.md +++ b/doc/06_perception/04_efficientps.md @@ -15,14 +15,14 @@ Marco Riedenauer 28.03.2023 -* [EfficientPS](#efficientps) - * [Author](#author) - * [Date](#date) - * [Model Overview](#model-overview) - * [Training](#training) - * [Labels](#labels) - * [Training parameters](#training-parameters) - * [Train](#train) +- [EfficientPS](#efficientps) + - [Author](#author) + - [Date](#date) + - [Model Overview](#model-overview) + - [Training](#training) + - [Labels](#labels) + - [Training parameters](#training-parameters) + - [Train](#train) ## Model Overview @@ -35,13 +35,13 @@ case, since we used half the image size. ![EfficientPS Structure](../00_assets/efficientps_structure.png) [Source](https://arxiv.org/pdf/2004.02307.pdf) -* Feature Extraction: +- Feature Extraction: This is the first part of the model on which all following parts depend on. In this part, all important features are extracted from the input image. -* Semantic Segmentation Head: As the name implies, this part of the model computes a semantic segmentation on the +- Semantic Segmentation Head: As the name implies, this part of the model computes a semantic segmentation on the extracted features. -* Instance Segmentation Head: This part computes the instance segmentation on things on the extracted features. -* Panoptic Fusion: As the last part of the model, this component is responsible for combining the information gathered +- Instance Segmentation Head: This part computes the instance segmentation on things on the extracted features. +- Panoptic Fusion: As the last part of the model, this component is responsible for combining the information gathered by the semantic segmentation and the instance segmentation heads. The output of this component and thereby the model is an image where stuff is semantic segmented and things are instance segmented. @@ -64,19 +64,19 @@ All adaptable training parameters can be found and changed in The most important configs are: -* MODEL/ROI_HEADS/NUM_CLASSES: Number of instance classes -* DATASET_PATH: Path to dataset root -* TRAIN_JSON: Relative path from DATASET_PATH to train json file -* VALID_JSON: Relative path from DATASET_PATH to validation json file -* PRED_DIR: Directory to save predictions in -* PRED_JSON: Name of prediction json file -* CHECKPOINT_PATH: Path of already trained models you want to train furthermore -* BATCH_SIZE: Number of images to be loaded during on training step -* NUM_CLASSES: Number of all classes +- MODEL/ROI_HEADS/NUM_CLASSES: Number of instance classes +- DATASET_PATH: Path to dataset root +- TRAIN_JSON: Relative path from DATASET_PATH to train json file +- VALID_JSON: Relative path from DATASET_PATH to validation json file +- PRED_DIR: Directory to save predictions in +- PRED_JSON: Name of prediction json file +- CHECKPOINT_PATH: Path of already trained models you want to train furthermore +- BATCH_SIZE: Number of images to be loaded during on training step +- NUM_CLASSES: Number of all classes ### Train -To start the training, just execute the following command in b5 shell: +To start the training, just execute the following command in an attached shell: ```shell python3 perception/src/panoptic_segmentation/train_net.py