diff --git a/.editorconfig b/.editorconfig index 9dd330ea..8db9e725 100644 --- a/.editorconfig +++ b/.editorconfig @@ -10,7 +10,7 @@ insert_final_newline = true trim_trailing_whitespace = true [*.py] -max_line_length = 80 +max_line_length = 88 indent_size = 4 [*.md] diff --git a/.flake8 b/.flake8 index 6cce5018..6c032f36 100644 --- a/.flake8 +++ b/.flake8 @@ -1,7 +1,3 @@ [flake8] -exclude= code/planning/src/behavior_agent/behavior_tree.py, - code/planning/src/behavior_agent/behaviours/__init__.py, - code/planning/src/behavior_agent/behaviours, - code/planning/__init__.py, - doc/02_development/templates/template_class_no_comments.py, - doc/02_development/templates/template_class.py \ No newline at end of file +max-line-length = 88 +extend-ignore = E203,E701 \ No newline at end of file diff --git a/.github/workflows/add-to-project.yml b/.github/workflows/add-to-project.yml index dcced1c4..52f9bcf6 100644 --- a/.github/workflows/add-to-project.yml +++ b/.github/workflows/add-to-project.yml @@ -1,4 +1,4 @@ -name: Add bugs to bugs project +name: Add issue to project on: issues: diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f7e00e1b..ac3b7276 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,9 +1,10 @@ -name: Build, publish and run tests +name: Build and push image on: - push: - branches: [ 'main' ] - pull_request: + workflow_run: + workflows: ["Check code format", "Linter markdown and code"] + types: + - completed env: REGISTRY: ghcr.io @@ -38,19 +39,6 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Bump version and push tag - # only run on push to main - if: github.event_name == 'push' && github.ref == 'refs/heads/main' - id: tag - uses: mathieudutour/github-tag-action@v6.1 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - release_branches: main - - - name: Get commit hash - id: hash - run: echo "hash=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT - - name: Build and push Docker image id: build uses: docker/build-push-action@v3 @@ -59,72 +47,6 @@ jobs: file: ./build/docker/build/Dockerfile push: true # tag 'latest' and version on push to main, otherwise use the commit hash - tags: | - ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tag.outputs.new_version == '' && steps.hash.outputs.hash || steps.tag.outputs.new_version }} - ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.event_name == 'push' && github.ref == 'refs/heads/main' && 'latest' || steps.hash.outputs.hash }} + tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest cache-from: type=gha cache-to: type=gha,mode=max - - - name: Output version - id: version - # take either the created tag or the commit hash - run: echo "version=${{ steps.tag.outputs.new_version == '' && steps.hash.outputs.hash || steps.tag.outputs.new_version }}" >> $GITHUB_OUTPUT - drive: - runs-on: self-hosted - needs: build-and-push-image - # run only on pull request for now - if: github.event_name == 'pull_request' - env: - AGENT_VERSION: ${{ needs.build-and-push-image.outputs.version }} - COMPOSE_FILE: ./build/docker-compose_cicd.yaml - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - name: Print environment variables (DEBUG) - run: | - echo "AGENT_VERSION=${AGENT_VERSION}" - echo "COMPOSE_FILE=${COMPOSE_FILE}" - - name: Get commit hash - id: hash - run: echo "hash=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT - - name: Set AGENT_VERSION from hash (workaround) - run: echo "AGENT_VERSION=${{ steps.hash.outputs.hash }}" >> $GITHUB_ENV - - name: Run docker-compose - run: docker compose up --quiet-pull --exit-code-from agent - - name: Copy results - run: docker compose cp agent:/tmp/simulation_results.json . - - name: Stop docker-compose - # always run this step, to clean up even on error - if: always() - run: docker compose down -v - # add rendered JSON as comment to the pull request - - name: Add simulation results as comment - if: github.event_name == 'pull_request' - uses: actions/github-script@v6 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - # this script reads the simulation_results.json and creates a comment on the pull request with the results. - script: | - const fs = require('fs'); - // read the simulation results - const results = fs.readFileSync('./simulation_results.json', 'utf8'); - let resultsJson = JSON.parse(results); - // create a markdown table of the results - let resultsTable = resultsJson.values.map((values, i) => { - return `| ${resultsJson.labels[i]} | ${values} |`; - }); - // create a markdown table header - let resultsTableHeader = `| Metric | Value |`; - // create a markdown table divider - let resultsTableDivider = `| --- | --- |`; - // add everything to the resultsTable - resultsTable = resultsTableHeader + '\n' + resultsTableDivider + '\n' + resultsTable.join('\n'); - // add the results as a comment to the pull request - github.rest.issues.createComment({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - body: "## Simulation results\n" + resultsTable - }); - - name: Prune all images older than 30 days from self-hosted runner - run: docker image prune -a --force --filter "until=720h" diff --git a/.github/workflows/drive.yaml b/.github/workflows/drive.yaml new file mode 100644 index 00000000..629acba7 --- /dev/null +++ b/.github/workflows/drive.yaml @@ -0,0 +1,63 @@ +name: Evaluate agent + +on: + workflow_run: + workflows: ["Build and push image"] + types: + - completed + +jobs: + drive: + runs-on: self-hosted + needs: build-and-push-image + # run only on pull request for now + if: github.event_name == 'pull_request' + env: + AGENT_VERSION: latest + COMPOSE_FILE: ./build/docker-compose.cicd.yaml + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Print environment variables (DEBUG) + run: | + echo "AGENT_VERSION=${AGENT_VERSION}" + echo "COMPOSE_FILE=${COMPOSE_FILE}" + - name: Run docker-compose + run: docker compose up --quiet-pull --exit-code-from agent + - name: Copy results + run: docker compose cp agent:/tmp/simulation_results.json . + - name: Stop docker-compose + # always run this step, to clean up even on error + if: always() + run: docker compose down -v + # add rendered JSON as comment to the pull request + - name: Add simulation results as comment + if: github.event_name == 'pull_request' + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + # this script reads the simulation_results.json and creates a comment on the pull request with the results. + script: | + const fs = require('fs'); + // read the simulation results + const results = fs.readFileSync('./simulation_results.json', 'utf8'); + let resultsJson = JSON.parse(results); + // create a markdown table of the results + let resultsTable = resultsJson.values.map((values, i) => { + return `| ${resultsJson.labels[i]} | ${values} |`; + }); + // create a markdown table header + let resultsTableHeader = `| Metric | Value |`; + // create a markdown table divider + let resultsTableDivider = `| --- | --- |`; + // add everything to the resultsTable + resultsTable = resultsTableHeader + '\n' + resultsTableDivider + '\n' + resultsTable.join('\n'); + // add the results as a comment to the pull request + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: "## Simulation results\n" + resultsTable + }); + - name: Prune all images older than 30 days from self-hosted runner + run: docker image prune -a --force --filter "until=720h" \ No newline at end of file diff --git a/.github/workflows/format.yaml b/.github/workflows/format.yaml new file mode 100644 index 00000000..58a1947e --- /dev/null +++ b/.github/workflows/format.yaml @@ -0,0 +1,21 @@ +name: Check code format + +on: + pull_request: + branches: + - "main" + +jobs: + format: + name: Check code files format + runs-on: ubuntu-latest + steps: + - name: Check out the repo + uses: actions/checkout@v2 + # Execute the python formatter + - name: Run the python formatter + uses: addnab/docker-run-action@v3 + with: + image: pyfound/black + options: -v ${{ github.workspace}}:/apps + run: black --check ./apps/ diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index d20a1b54..630ad725 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -1,6 +1,9 @@ -name: linter +name: Linter markdown and code -on: pull_request +on: + pull_request: + branches: + - "main" jobs: linter: @@ -13,16 +16,13 @@ jobs: - name: Run the markdown linter uses: addnab/docker-run-action@v3 with: - image: peterdavehello/markdownlint:0.32.2 - options: -v ${{ github.workspace }}:/md - run: | - markdownlint . + image: peterdavehello/markdownlint:0.32.2 + options: -v ${{ github.workspace }}:/md + run: markdownlint . # Execute the python linter (executes even if the previous step failed) - name: Run the python linter - if: always() uses: addnab/docker-run-action@v3 with: image: alpine/flake8 options: -v ${{ github.workspace }}:/apps - run: | - flake8 code + run: flake8 . diff --git a/.markdownlint.yaml b/.markdownlint.yaml index b49b73db..fe64f4e8 100755 --- a/.markdownlint.yaml +++ b/.markdownlint.yaml @@ -6,7 +6,7 @@ MD013: tables: false MD004: - style: "consistent" + style: dash MD051: false diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 394eaac6..c27bbad5 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -8,6 +8,10 @@ "njpwerner.autodocstring", "ms-azuretools.vscode-docker", "ms-python.flake8", - "bierner.markdown-mermaid" + "bierner.markdown-mermaid", + "richardkotze.git-mob", + "ms-vscode-remote.remote-containers", + "valentjn.vscode-ltex", + "ms-python.black-formatter" ] } \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index b7873462..260ba70c 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,7 +1,6 @@ { "githubIssues.issueBranchTitle": "${issueNumber}-${sanitizedIssueTitle}", "githubIssues.queries": [ - { "label": "My Issues", "query": "default" @@ -19,5 +18,13 @@ "query": "state:open repo:${owner}/${repository} sort:created-desc" } ], - "ltex.language": "en-US" + "ltex.language": "en-US", + "docker.commands.composeUp": [ + { + "label": "Compose Up", + "template": "xhost +local: && USERNAME=$(whoami) USER_UID=$(id -u) USER_GID=$(id -g) ${composeCommand} ${configurationFile} up" + } + ], + "workbench.iconTheme": "vscode-icons", + "editor.formatOnSave": true } \ No newline at end of file diff --git a/README.md b/README.md index 4998d4ac..59863059 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -# Praktikum Autonomes Fahren 2023 - PAF23 +# Praktikum Autonomes Fahren - PAF -This repository contains the source code for the "Praktikum Autonomes Fahren" at the Chair of Mechatronics from the University of Augsburg in the winter semester of 2023/2024. +This repository contains the source code for the "Praktikum Autonomes Fahren" at the Chair of Mechatronics from the University of Augsburg. The goal of the project is to develop a self-driving car that can navigate through a simulated environment. The project is based on the [CARLA simulator](https://carla.org/) and uses the [ROS](https://www.ros.org/) framework for communication between the different components. In the future, the project aims to contribute to the [CARLA Autonomous Driving Challenge](https://leaderboard.carla.org/challenge/). @@ -21,32 +21,22 @@ To be able to execute and develop the project, you need a Linux system equipped As the project is still in early development, these requirements are subject to change. -## Installation +## Getting started -To run the project you have to install [b5](https://github.com/team23/b5) -and [docker](https://docs.docker.com/engine/install/) with NVIDIA GPU support, +### Installation + +To run the project you have to install [docker](https://docs.docker.com/engine/install/) with NVIDIA GPU support, [nvidia-docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker). -`b5` is used to simplify some of the docker commands and to provide a more user-friendly interface. `docker` and `nvidia-docker` are used to run the project in a containerized environment with GPU support. -Afterwards, you can set up and execute the project with the following two commands: - -```bash -# Setup project -b5 install - -# Run project -b5 run -``` - -More detailed instructions about setup and execution can be found [here](./doc/01_general/Readme.md). - -More available b5 commands are documented [here](./doc/01_general/03_commands.md). +More detailed instructions about the setup can be found [here](./doc/general/installation.md). ## Development -If you contribute to this project please read the guidelines first. They can be found [here](./doc/02_development/Readme.md). +To get an overview of the current architecture of the agent you can look at the general documentation [here](./doc/general/architecture.md). The individual components are explained in the README files of their subfolders. + +If you contribute to this project please read the guidelines first. They can be found [here](./doc/development/README.md). ## Research -The research on existing projects we did can be found [here](./doc/03_research/Readme.md). +The research on existing projects we did can be found [here](./doc/research/README.md). diff --git a/build/Taskfile b/build/Taskfile deleted file mode 100644 index c89e1133..00000000 --- a/build/Taskfile +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env bash -# b5 Taskfile, see https://git.team23.de/build/b5 for details - -########################################## -# General commands -########################################## - -task:shell() { - container="$1" - command="$2" - additionalArguments="${@:3}" - docker:container_run "${container:-agent}" "${command:-/bin/bash}" ${additionalArguments:-} -} - -########################################## -# Project setup / maintenance -########################################## -task:install() { - task:install:git_hooks - #task:gitconfig:copy - install:gpu-support - docker:install -} - -install:gpu-support() { - # check if docker-nvidia is installed, to make the project also executable on - # systems without nvidia GPU. - - if [ -z "$(command -v docker-nvidia)" ] - then - echo -e "Juhu! Alles ist richtig installiert für NVIDIA-Support! Hier ein Keks für dich :D" - else - RED='\033[0;31m' - NC='\033[0m' - echo -e "${RED}######################################################################################${NC}" - echo -e "${RED}WARNING: NVIDIA Container Toolkit not installed. The project won't run as expected!${NC}" - echo -e "${RED}#####################################################################################${NC}" - fi -} - -########################################## -# Project linting -########################################## - -task:lint() { - b5 python:lint - b5 markdown:lint -} - -task:python:lint() { - docker:container_run -T flake8 code -} - -task:markdown:lint() { - docker:container_run -T mdlint markdownlint . -} - -task:markdown:fix() { - docker:container_run -T mdlint markdownlint --fix . -} - -task:comlipy() { - docker:container_run -T comlipy -c /apps/build/config-comlipy.yml "$@" -} - -task:install:git_hooks() { - test -L ../.git/hooks/pre-commit || ln -s ../../build/hooks/pre-commit ../.git/hooks/ - test -L ../.git/hooks/commit-msg || ln -s ../../build/hooks/commit-msg ../.git/hooks/ - chmod +x ./hooks/* -} - -task:gitconfig:copy() { - cp -u ~/.gitconfig ../.gitconfig -} - -source ./tasks/ros.sh diff --git a/build/agent_service.yaml b/build/agent_service.yaml index 6362983b..13d8bab1 100644 --- a/build/agent_service.yaml +++ b/build/agent_service.yaml @@ -2,10 +2,11 @@ services: agent: build: dockerfile: build/docker/agent/Dockerfile - args: - - USER_UID=${DOCKER_HOST_UNIX_UID:-1000} - - USER_GID=${DOCKER_HOST_UNIX_GID:-1000} context: ../ + args: + USERNAME: ${USERNAME} + USER_UID: ${USER_UID} + USER_GID: ${USER_GID} init: true tty: true shm_size: 2gb diff --git a/build/config-comlipy.yml b/build/config-comlipy.yml deleted file mode 100644 index 9fffe84d..00000000 --- a/build/config-comlipy.yml +++ /dev/null @@ -1,23 +0,0 @@ -# comlipy config file (commit naming) -global: - help: 'Help: https://github.com/ll7/paf22/blob/main/doc/developement/commit.md' - -rules: - scope-min-length: - applicable: 'always' - value: 2 - level: 1 - type-enum: - applicable: 'always' - value: - - 'docs' - - 'feat' - - 'fix' - - 'other' - level: 1 - subject-case: - applicable: 'never' - value: - - 'upper-case' - level: 1 - diff --git a/build/config.yml b/build/config.yml deleted file mode 100644 index 648c6c08..00000000 --- a/build/config.yml +++ /dev/null @@ -1,4 +0,0 @@ -# b5 config file -modules: - template: - docker: diff --git a/build/carla-simulator_service.yaml b/build/docker-compose.carla-simulator.yaml similarity index 100% rename from build/carla-simulator_service.yaml rename to build/docker-compose.carla-simulator.yaml diff --git a/build/docker-compose_cicd.yaml b/build/docker-compose.cicd.yaml similarity index 87% rename from build/docker-compose_cicd.yaml rename to build/docker-compose.cicd.yaml index c792c6d0..2d7a47d5 100644 --- a/build/docker-compose_cicd.yaml +++ b/build/docker-compose.cicd.yaml @@ -2,11 +2,11 @@ include: # linter runs in a seperate workflow - roscore_service.yaml - - carla-simulator_service.yaml + - docker-compose.carla-simulator.yaml services: agent: - image: ghcr.io/una-auxme/paf23:${AGENT_VERSION:-latest} + image: ghcr.io/una-auxme/paf:${AGENT_VERSION:-latest} init: true tty: true logging: diff --git a/build/docker-compose_dev_offline.yaml b/build/docker-compose.dev.yaml similarity index 70% rename from build/docker-compose_dev_offline.yaml rename to build/docker-compose.dev.yaml index 0e3fe39a..ab21bf03 100644 --- a/build/docker-compose_dev_offline.yaml +++ b/build/docker-compose.dev.yaml @@ -1,13 +1,17 @@ # compose file for the development without a driving vehicle # "interactive" development without a car +include: + - roscore_service.yaml + services: agent-dev: build: dockerfile: build/docker/agent-dev/Dockerfile context: ../ args: - - USER_UID=${DOCKER_HOST_UNIX_UID:-1000} - - USER_GID=${DOCKER_HOST_UNIX_GID:-1000} + USERNAME: ${USERNAME} + USER_UID: ${USER_UID} + USER_GID: ${USER_GID} init: true tty: true shm_size: 2gb @@ -23,6 +27,4 @@ services: - DISPLAY=${DISPLAY} network_mode: host privileged: true - entrypoint: ["/dev_entrypoint.sh"] - command: bash - \ No newline at end of file + command: bash -c "sudo chown -R ${USER_UID}:${USER_GID} ../ && sudo chmod -R a+w ../ && bash" diff --git a/build/docker-compose_dev_distributed.yaml b/build/docker-compose.devroute-distributed.yaml similarity index 54% rename from build/docker-compose_dev_distributed.yaml rename to build/docker-compose.devroute-distributed.yaml index fe805a93..cc9f429c 100644 --- a/build/docker-compose_dev_distributed.yaml +++ b/build/docker-compose.devroute-distributed.yaml @@ -1,6 +1,6 @@ # compose file for the development environment with distributed mode include: - - linter_services.yaml + - docker-compose.linter.yaml - roscore_service.yaml services: @@ -8,9 +8,10 @@ services: extends: file: agent_service.yaml service: agent - command: bash -c "sleep 10 && roslaunch agent/launch/dev.launch" + command: bash -c "sleep 10 && sudo chown -R ${USER_UID}:${USER_GID} ../ && sudo chmod -R a+w ../ && roslaunch agent/launch/dev.launch" environment: - CARLA_SIM_HOST= + - ROUTE=/workspace/code/routes/routes_simple.xml networks: carla: \ No newline at end of file diff --git a/build/docker-compose.devroute.yaml b/build/docker-compose.devroute.yaml new file mode 100644 index 00000000..4510f0a2 --- /dev/null +++ b/build/docker-compose.devroute.yaml @@ -0,0 +1,15 @@ +# compose file for the development environment +# routes_simple.xml +include: + - docker-compose.linter.yaml + - roscore_service.yaml + - docker-compose.carla-simulator.yaml + +services: + agent: + extends: + file: agent_service.yaml + service: agent + environment: + - ROUTE=/workspace/code/routes/routes_simple.xml + command: bash -c "sleep 10 && sudo chown -R ${USER_UID}:${USER_GID} ../ && sudo chmod -R a+w ../ && python3 /opt/leaderboard/leaderboard/leaderboard_evaluator.py --debug=0 --routes=$${ROUTE} --agent=/workspace/code/agent/src/agent/agent.py --host=$${CARLA_SIM_HOST} --track=MAP" diff --git a/build/docker-compose.leaderboard-distributed.yaml b/build/docker-compose.leaderboard-distributed.yaml new file mode 100644 index 00000000..1bcb9949 --- /dev/null +++ b/build/docker-compose.leaderboard-distributed.yaml @@ -0,0 +1,15 @@ +include: + - docker-compose.linter.yaml + - roscore_service.yaml + +services: + agent: + extends: + file: agent_service.yaml + service: agent + command: bash -c "sleep 10 && sudo chown -R ${USER_UID}:${USER_GID} ../ && sudo chmod -R a+w ../ && python3 /opt/leaderboard/leaderboard/leaderboard_evaluator.py --debug=0 --routes=$${ROUTE} --agent=/workspace/code/agent/src/agent/agent.py --host=$${CARLA_SIM_HOST} --track=MAP" + environment: + - CARLA_SIM_HOST= + +networks: + carla: \ No newline at end of file diff --git a/build/docker-compose.leaderboard.yaml b/build/docker-compose.leaderboard.yaml new file mode 100644 index 00000000..32fc98fc --- /dev/null +++ b/build/docker-compose.leaderboard.yaml @@ -0,0 +1,11 @@ +include: + - docker-compose.linter.yaml + - roscore_service.yaml + - docker-compose.carla-simulator.yaml + +services: + agent: + extends: + file: agent_service.yaml + service: agent + command: bash -c "sleep 10 && sudo chown -R ${USER_UID}:${USER_GID} ../ && sudo chmod -R a+w ../ && python3 /opt/leaderboard/leaderboard/leaderboard_evaluator.py --debug=0 --routes=$${ROUTE} --agent=/workspace/code/agent/src/agent/agent.py --host=$${CARLA_SIM_HOST} --track=MAP" diff --git a/build/linter_services.yaml b/build/docker-compose.linter.yaml similarity index 77% rename from build/linter_services.yaml rename to build/docker-compose.linter.yaml index 3e386f88..d184141d 100644 --- a/build/linter_services.yaml +++ b/build/docker-compose.linter.yaml @@ -5,9 +5,9 @@ services: volumes: - ../:/apps - comlipy: - build: docker/comlipy - command: . + black: + image: pyfound/black + command: black --check ./apps/ volumes: - ../:/apps diff --git a/build/docker-compose.yaml b/build/docker-compose.yaml deleted file mode 100644 index 44acaf62..00000000 --- a/build/docker-compose.yaml +++ /dev/null @@ -1,11 +0,0 @@ -include: - - linter_services.yaml - - roscore_service.yaml - - carla-simulator_service.yaml - -services: - agent: - extends: - file: agent_service.yaml - service: agent - command: bash -c "sleep 10 && sudo chown -R carla:carla ../code/ && sudo chmod -R a+w ../code/ && python3 /opt/leaderboard/leaderboard/leaderboard_evaluator.py --debug=0 --routes=$${ROUTE} --agent=/workspace/code/agent/src/agent/agent.py --host=$${CARLA_SIM_HOST} --track=MAP" diff --git a/build/docker-compose_dev.yaml b/build/docker-compose_dev.yaml deleted file mode 100644 index 1b9129de..00000000 --- a/build/docker-compose_dev.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# compose file for the development environment -# routes_simple.xml -include: - - linter_services.yaml - - roscore_service.yaml - - carla-simulator_service.yaml - -services: - agent: - extends: - file: agent_service.yaml - service: agent - environment: - - ROUTE=/workspace/code/routes/routes_simple.xml - command: bash -c "sleep 10 && sudo chown -R carla:carla ../code/ && sudo chmod -R a+w ../code/ && python3 /opt/leaderboard/leaderboard/leaderboard_evaluator.py --debug=0 --routes=$${ROUTE} --agent=/workspace/code/agent/src/agent/agent.py --host=$${CARLA_SIM_HOST} --track=MAP" diff --git a/build/docker-compose_distributed.yaml b/build/docker-compose_distributed.yaml deleted file mode 100644 index cd600a02..00000000 --- a/build/docker-compose_distributed.yaml +++ /dev/null @@ -1,15 +0,0 @@ -include: - - linter_services.yaml - - roscore_service.yaml - -services: - agent: - extends: - file: agent_service.yaml - service: agent - command: bash -c "sleep 10 && sudo chown -R carla:carla ../code/ && sudo chmod -R a+w ../code/ && python3 /opt/leaderboard/leaderboard/leaderboard_evaluator.py --debug=0 --routes=$${ROUTE} --agent=/workspace/code/agent/src/agent/agent.py --host=$${CARLA_SIM_HOST} --track=MAP" - environment: - - CARLA_SIM_HOST= - -networks: - carla: \ No newline at end of file diff --git a/build/docker/agent-dev/dev_entrypoint.sh b/build/docker/agent-dev/dev_entrypoint.sh index 14f912e3..2626fcb9 100755 --- a/build/docker/agent-dev/dev_entrypoint.sh +++ b/build/docker/agent-dev/dev_entrypoint.sh @@ -1,19 +1,7 @@ #!/bin/bash +set -e -# Source ROS setup source /opt/ros/noetic/setup.bash - -# Source the catkin workspace setup source /catkin_ws/devel/setup.bash -# Set up any additional environment variables if needed -export CARLA_ROOT=/opt/carla -export SCENARIO_RUNNER_ROOT=/opt/scenario_runner -export LEADERBOARD_ROOT=/opt/leaderboard - -# Execute the command passed to the script, or start a bash session if no command was given -if [ $# -eq 0 ]; then - exec bash -else - exec "$@" -fi \ No newline at end of file +exec "$@" diff --git a/build/docker/agent/Dockerfile b/build/docker/agent/Dockerfile index ea981056..dff54814 100644 --- a/build/docker/agent/Dockerfile +++ b/build/docker/agent/Dockerfile @@ -12,14 +12,14 @@ FROM osrf/ros:noetic-desktop-full-focal # COPY --from=carla /home/carla/PythonAPI /opt/carla/PythonAPI -ARG USERNAME=carla -ARG USER_UID=999 -ARG USER_GID=$USER_UID +ARG USERNAME +ARG USER_UID +ARG USER_GID ARG DEBIAN_FRONTEND=noninteractive # install rendering dependencies for rviz / rqt RUN apt-get update \ - && apt-get install -y -qq --no-install-recommends \ + && apt-get install -y -qq --no-install-recommends \ libxext6 libx11-6 libglvnd0 libgl1 \ libglx0 libegl1 freeglut3-dev apt-utils \ fprintd libfprint-2-2 libpam-fprintd @@ -29,11 +29,11 @@ RUN apt-get install wget unzip # Download Carla PythonAPI (alternative to getting it from the Carla-Image, which is commented out above) # If the PythonAPI/Carla version changes, either update the link, or refer to the comment at the top of this file. -RUN wget https://github.com/una-auxme/paf23/releases/download/v0.0.1/PythonAPI_Leaderboard-2.0.zip -O PythonAPI.zip \ - && unzip PythonAPI.zip \ - && rm PythonAPI.zip \ - && mkdir -p /opt/carla \ - && mv PythonAPI /opt/carla/PythonAPI +RUN wget https://github.com/una-auxme/paf/releases/download/v0.0.1/PythonAPI_Leaderboard-2.0.zip -O PythonAPI.zip \ + && unzip PythonAPI.zip \ + && rm PythonAPI.zip \ + && mkdir -p /opt/carla \ + && mv PythonAPI /opt/carla/PythonAPI # Workaround/fix for using dpkg for cuda installation # Only required for the lab PCs @@ -65,12 +65,12 @@ ENV PYTHONPATH=$PYTHONPATH:/opt/carla/PythonAPI/carla/dist/carla-0.9.14-py3.7-li # install mlocate, pip, wget, git and some ROS dependencies for building the CARLA ROS bridge RUN apt-get update && apt-get install -y \ - mlocate python3-pip wget git python-is-python3 \ - ros-noetic-ackermann-msgs ros-noetic-derived-object-msgs \ - ros-noetic-carla-msgs ros-noetic-pcl-conversions \ - ros-noetic-rviz ros-noetic-rqt ros-noetic-pcl-ros ros-noetic-rosbridge-suite ros-noetic-rosbridge-server \ - ros-noetic-robot-pose-ekf ros-noetic-ros-numpy \ - ros-noetic-py-trees-ros ros-noetic-rqt-py-trees ros-noetic-rqt-reconfigure + mlocate python3-pip wget git python-is-python3 \ + ros-noetic-ackermann-msgs ros-noetic-derived-object-msgs \ + ros-noetic-carla-msgs ros-noetic-pcl-conversions \ + ros-noetic-rviz ros-noetic-rqt ros-noetic-pcl-ros ros-noetic-rosbridge-suite ros-noetic-rosbridge-server \ + ros-noetic-robot-pose-ekf ros-noetic-ros-numpy \ + ros-noetic-py-trees-ros ros-noetic-rqt-py-trees ros-noetic-rqt-reconfigure SHELL ["/bin/bash", "-c"] @@ -105,9 +105,9 @@ ENV CARLA_ROS_BRIDGE_ROOT=/catkin_ws/src/ros-bridge # (as we're not running as root, pip installs into ~/.local/bin) ENV PATH=$PATH:/home/$USERNAME/.local/bin -# install simple_pid +# install pip packages RUN python -m pip install pip --upgrade \ - && python -m pip install simple_pid pygame transformations roslibpy lxml + && python -m pip install simple_pid pygame transformations roslibpy lxml black # install the scenario runner from GitHub leaderboard-2.0 branch ENV CARLA_ROOT=/opt/carla @@ -139,7 +139,7 @@ ENV CARLA_SIM_HOST=localhost ENV CARLA_SIM_WAIT_SECS=15 ENV SCENARIO_RUNNER_PATH=/opt/scenario_runner -# setup python path for PyCharm integration +# setup python path RUN echo /catkin_ws/install/lib/python3/dist-packages >> /home/$USERNAME/.local/lib/python3.8/site-packages/carla.pth && \ echo /catkin_ws/devel/lib/python3/dist-packages >> /home/$USERNAME/.local/lib/python3.8/site-packages/carla.pth && \ echo /opt/ros/noetic/lib/python3/dist-packages >> /home/$USERNAME/.local/lib/python3.8/site-packages/carla.pth && \ @@ -170,20 +170,19 @@ RUN source /opt/ros/noetic/setup.bash && catkin_make ADD ./build/docker/agent/entrypoint.sh /entrypoint.sh - - # set the default working directory to the code WORKDIR /workspace/code RUN echo "source /opt/ros/noetic/setup.bash" >> ~/.bashrc +RUN echo "source /catkin_ws/devel/setup.bash" >> ~/.bashrc ENTRYPOINT ["/entrypoint.sh"] CMD ["bash", "-c", "sleep 10 && \ -python3 /opt/leaderboard/leaderboard/leaderboard_evaluator.py \ ---debug=${DEBUG_CHALLENGE} \ ---repetitions=${REPETITIONS} \ ---checkpoint=${CHECKPOINT_ENDPOINT} \ ---track=${CHALLENGE_TRACK} \ ---agent=${TEAM_AGENT} \ ---routes=${ROUTES} \ ---host=${CARLA_SIM_HOST}"] + python3 /opt/leaderboard/leaderboard/leaderboard_evaluator.py \ + --debug=${DEBUG_CHALLENGE} \ + --repetitions=${REPETITIONS} \ + --checkpoint=${CHECKPOINT_ENDPOINT} \ + --track=${CHALLENGE_TRACK} \ + --agent=${TEAM_AGENT} \ + --routes=${ROUTES} \ + --host=${CARLA_SIM_HOST}"] diff --git a/build/docker/agent/Dockerfile_Submission b/build/docker/agent/Dockerfile_Submission index bb6757d2..8128266e 100644 --- a/build/docker/agent/Dockerfile_Submission +++ b/build/docker/agent/Dockerfile_Submission @@ -19,18 +19,18 @@ ARG DEBIAN_FRONTEND=noninteractive # install rendering dependencies for rviz / rqt RUN apt-get update \ - && apt-get install -y -qq --no-install-recommends \ + && apt-get install -y -qq --no-install-recommends \ libxext6 libx11-6 libglvnd0 libgl1 \ libglx0 libegl1 freeglut3-dev # install dependencies for libgit2 and Carla PythonAPI RUN apt-get install wget unzip -RUN wget https://github.com/una-auxme/paf23/releases/download/v0.0.1/PythonAPI_Leaderboard-2.0.zip -O PythonAPI.zip \ - && unzip PythonAPI.zip \ - && rm PythonAPI.zip \ - && mkdir -p /opt/carla \ - && mv PythonAPI /opt/carla/PythonAPI +RUN wget https://github.com/una-auxme/paf/releases/download/v0.0.1/PythonAPI_Leaderboard-2.0.zip -O PythonAPI.zip \ + && unzip PythonAPI.zip \ + && rm PythonAPI.zip \ + && mkdir -p /opt/carla \ + && mv PythonAPI /opt/carla/PythonAPI # build libgit2 RUN wget https://github.com/libgit2/libgit2/archive/refs/tags/v1.5.0.tar.gz -O libgit2-1.5.0.tar.gz \ @@ -67,12 +67,12 @@ ENV PYTHONPATH=$PYTHONPATH:/opt/carla/PythonAPI/carla/dist/carla-0.9.14-py3.7-li # install mlocate, pip, wget, git and some ROS dependencies for building the CARLA ROS bridge RUN apt-get update && apt-get install -y \ - mlocate python3-pip wget git python-is-python3 \ - ros-noetic-ackermann-msgs ros-noetic-derived-object-msgs \ - ros-noetic-carla-msgs ros-noetic-pcl-conversions \ - ros-noetic-rviz ros-noetic-rqt ros-noetic-pcl-ros ros-noetic-rosbridge-suite ros-noetic-rosbridge-server \ - ros-noetic-robot-pose-ekf ros-noetic-ros-numpy \ - ros-noetic-py-trees-ros ros-noetic-rqt-py-trees ros-noetic-rqt-reconfigure + mlocate python3-pip wget git python-is-python3 \ + ros-noetic-ackermann-msgs ros-noetic-derived-object-msgs \ + ros-noetic-carla-msgs ros-noetic-pcl-conversions \ + ros-noetic-rviz ros-noetic-rqt ros-noetic-pcl-ros ros-noetic-rosbridge-suite ros-noetic-rosbridge-server \ + ros-noetic-robot-pose-ekf ros-noetic-ros-numpy \ + ros-noetic-py-trees-ros ros-noetic-rqt-py-trees ros-noetic-rqt-reconfigure SHELL ["/bin/bash", "-c"] @@ -141,7 +141,7 @@ ENV CARLA_SIM_HOST=localhost ENV CARLA_SIM_WAIT_SECS=15 ENV SCENARIO_RUNNER_PATH=/opt/scenario_runner -# setup python path for PyCharm integration +# setup python path RUN echo /catkin_ws/install/lib/python3/dist-packages >> /home/$USERNAME/.local/lib/python3.8/site-packages/carla.pth && \ echo /catkin_ws/devel/lib/python3/dist-packages >> /home/$USERNAME/.local/lib/python3.8/site-packages/carla.pth && \ echo /opt/ros/noetic/lib/python3/dist-packages >> /home/$USERNAME/.local/lib/python3.8/site-packages/carla.pth && \ @@ -178,12 +178,13 @@ ADD ./build/docker/agent/entrypoint.sh /entrypoint.sh WORKDIR /workspace/code RUN echo "source /opt/ros/noetic/setup.bash" >> ~/.bashrc +RUN echo "source /catkin_ws/devel/setup.bash" >> ~/.bashrc ENTRYPOINT ["/entrypoint.sh"] CMD ["bash", "-c", "sleep 10 && python3 /opt/leaderboard/leaderboard/leaderboard_evaluator.py --debug=${DEBUG_CHALLENGE} \ ---repetitions=${REPETITIONS} \ ---checkpoint=${CHECKPOINT_ENDPOINT} \ ---track=${CHALLENGE_TRACK} \ ---agent=${TEAM_AGENT} \ ---routes=${ROUTES} \ ---host=${CARLA_SIM_HOST}"] + --repetitions=${REPETITIONS} \ + --checkpoint=${CHECKPOINT_ENDPOINT} \ + --track=${CHALLENGE_TRACK} \ + --agent=${TEAM_AGENT} \ + --routes=${ROUTES} \ + --host=${CARLA_SIM_HOST}"] diff --git a/build/docker/agent/entrypoint.sh b/build/docker/agent/entrypoint.sh index 61e51dc4..2626fcb9 100755 --- a/build/docker/agent/entrypoint.sh +++ b/build/docker/agent/entrypoint.sh @@ -1,7 +1,7 @@ #!/bin/bash set -e -source "/opt/ros/noetic/setup.bash" -source "/catkin_ws/devel/setup.bash" +source /opt/ros/noetic/setup.bash +source /catkin_ws/devel/setup.bash exec "$@" diff --git a/build/docker/comlipy/Dockerfile b/build/docker/comlipy/Dockerfile deleted file mode 100644 index b98681f2..00000000 --- a/build/docker/comlipy/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM python:3-alpine - -RUN pip install --no-cache-dir comlipy - -WORKDIR /apps - -ENTRYPOINT ["comlipy"] -CMD ["--help"] \ No newline at end of file diff --git a/build/hooks/commit-msg b/build/hooks/commit-msg deleted file mode 100755 index d0fb42e4..00000000 --- a/build/hooks/commit-msg +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env bash - -# get the commit message -commit_msg=$(<"${1:-}") - -b5 comlipy "$commit_msg" \ No newline at end of file diff --git a/build/hooks/pre-commit b/build/hooks/pre-commit deleted file mode 100755 index e8a0de65..00000000 --- a/build/hooks/pre-commit +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh -# Called by "git commit" with no arguments. The hook should -# exit with non-zero status after issuing an appropriate message if -# it wants to stop the commit. - -set -o errexit - -echo ############################################ -echo Starting git hooks -echo ############################################ - -for s in ./build/hooks/pre-commit.d/*.sh; do - . "./$s" -done - -echo ############################################ -echo Finished git hooks -echo ############################################ \ No newline at end of file diff --git a/build/hooks/pre-commit.d/10-flake8.sh b/build/hooks/pre-commit.d/10-flake8.sh deleted file mode 100644 index 237a5180..00000000 --- a/build/hooks/pre-commit.d/10-flake8.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh - -python_changed=0 -FILE_PATTERN=\.py$ - -git diff --cached --name-only | grep -q $FILE_PATTERN && python_changed=1 - -if [ $python_changed = 1 ]; then - b5 python:lint -else - echo "No python files in commit, skip python linting" -fi \ No newline at end of file diff --git a/build/hooks/pre-commit.d/20-markdown.sh b/build/hooks/pre-commit.d/20-markdown.sh deleted file mode 100644 index 13a93af0..00000000 --- a/build/hooks/pre-commit.d/20-markdown.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh - -markdown_changed=0 -FILE_PATTERN=\.md$ - -git diff --cached --name-only | grep -q $FILE_PATTERN && markdown_changed=1 - -if [ $markdown_changed = 1 ]; then - b5 lint -else - echo "No markdown files in commit, skip markdown linting" -fi \ No newline at end of file diff --git a/build/tasks/ros.sh b/build/tasks/ros.sh deleted file mode 100644 index bea9d749..00000000 --- a/build/tasks/ros.sh +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/env bash -# b5 Taskfile, see https://git.team23.de/build/b5 for details - -# shortcuts for commands documented here -# http://wiki.ros.org/ROS/CommandLineTools#Common_user_tools - -task:roscommand() { - # seems necessary to source the setup file on every call - docker:container_run agent /bin/bash -c "source /opt/ros/noetic/setup.bash && ${@}" -} - -task:rosbag() { - task:roscommand "rosbag ${@}" -} - -task:ros_readbagfile() { - task:roscommand "ros_readbagfile ${@}" -} - -task:rosbash() { - task:roscommand "rosbash ${@}" -} - -task:roscd() { - task:roscommand "roscd ${@}" -} - -task:rosclean() { - task:roscommand "rosclean ${@}" -} - -task:roscore() { - task:roscommand "roscore ${@}" -} - -task:rosdep() { - task:roscommand "rosdep ${@}" -} - -task:rosed() { - task:roscommand "rosed ${@}" -} - -task:roscreate-pkg() { - task:roscommand "roscreate-pkg ${@}" -} - -task:roscreate-stack() { - task:roscommand "roscreate-stack ${@}" -} - -task:rosrun() { - task:roscommand "rosrun ${@}" -} - -task:roslaunch() { - task:roscommand "roslaunch ${@}" -} - -task:roslocate() { - task:roscommand "roslocate ${@}" -} - -task:rosmake() { - task:roscommand "rosmake ${@}" -} - -task:rosmsg() { - task:roscommand "rosmsg ${@}" -} - -task:rosnode() { - additionalArguments="${@:1}" - task:roscommand "rosnode ${@}" -} - -task:rospack() { - task:roscommand "rospack ${@}" -} - -task:rosparam() { - task:roscommand "rosparam ${@}" -} - -task:rossrv() { - task:roscommand "rossrv ${@}" -} - -task:rosservice() { - task:roscommand "rosservice ${@}" -} - -task:rosstack() { - task:roscommand "rosstack ${@}" -} - -task:rostopic() { - task:roscommand "rostopic ${@}" -} - -task:rosversion() { - task:roscommand "rosversion ${@}" -} -task:rqt_graph() { - task:roscommand "rqt_graph ${@}" -} - -task:rqt_plot() { - task:roscommand "rqt_plot ${@}" -} - -task:rqt_topic() { - task:roscommand "rqt_topic ${@}" -} diff --git a/code/__init__.py b/code/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/code/acting/readme.md b/code/acting/readme.md deleted file mode 100644 index abc85d83..00000000 --- a/code/acting/readme.md +++ /dev/null @@ -1,67 +0,0 @@ -# Acting - -**Summary:** This package contains all functions implemented for the acting component. - ---- - -## Authors - -Alexander Hellmann - -## Date - -01.04.2024 - ---- - - -- [Acting](#acting) - - [Authors](#authors) - - [Date](#date) - - [Acting Documentation](#acting-documentation) - - [Test/Debug/Tune Acting-Components](#testdebugtune-acting-components) - - [Longitudinal controllers (Velocity Controller)](#longitudinal-controllers-velocity-controller) - - [Lateral controllers (Steering Controllers)](#lateral-controllers-steering-controllers) - - [Vehicle controller](#vehicle-controller) - - [Visualization of the HeroFrame in rviz](#visualization-of-the-heroframe-in-rviz) - - -## Acting Documentation - -In order to further understand the general idea of the taken approach to the acting component please refer to the documentation of the [research](../../doc/03_research/01_acting/Readme.md) done and see the planned [general definition](../../doc/01_general/04_architecture.md#acting). - -It is also highly recommended to go through the indepth [Acting-Documentation](../../doc/05_acting/Readme.md)! - -## Test/Debug/Tune Acting-Components - -The Acting_Debug_Node can be used as a simulated Planning package, publishing adjustable target velocities, steerings and trajectories as needed. - -For more information about this node and how to use it, please read the [documentation](../../doc/05_acting/05_acting_testing.md). -You can also find more information in the commented [code](./src/acting/Acting_Debug_Node.py). - -## Longitudinal controllers (Velocity Controller) - -The longitudinal controller is implemented as a PID velocity controller. - -For more information about this controller, either read the [documentation](../../doc/05_acting/02_velocity_controller.md) or go through the commented [code](./src/acting/velocity_controller.py). - -## Lateral controllers (Steering Controllers) - -There are two steering controllers currently implemented, both providing live telemetry via Debug-Messages: - -- Pure Persuit Controller (paf/hero/pure_p_debug) -- Stanley Controller (paf/hero/stanley_debug) - -For further information about the steering controllers, either read the [documentation](./../../doc/05_acting/03_steering_controllers.md) or go through the commented code of [stanley_controller](./src/acting/stanley_controller.py) or [purepursuit_controller](./src/acting/pure_pursuit_controller.py). - -## Vehicle controller - -The VehicleController collects all necessary msgs from the other controllers and publishes the [CarlaEgoVehicleControl](https://carla.readthedocs.io/en/0.9.8/ros_msgs/#carlaegovehiclecontrol) for the [Carla ros bridge](https://github.com/carla-simulator/ros-bridge). - -It also executes emergency-brakes and the unstuck-routine, if detected. - -For more information about this controller, either read the [documentation](../../doc/05_acting/04_vehicle_controller.md) or go through the commented [code](./src/acting/vehicle_controller.py). - -## Visualization of the HeroFrame in rviz - -For information about vizualizing the upcomming path in rviz see [Main frame publisher](../../doc/05_acting/06_main_frame_publisher.md) diff --git a/code/acting/setup.py b/code/acting/setup.py index e2665b1f..773f1357 100644 --- a/code/acting/setup.py +++ b/code/acting/setup.py @@ -2,6 +2,5 @@ from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup -setup_args = generate_distutils_setup(packages=['acting'], - package_dir={'': 'src'}) +setup_args = generate_distutils_setup(packages=["acting"], package_dir={"": "src"}) setup(**setup_args) diff --git a/code/acting/src/acting/Acting_Debug_Node.py b/code/acting/src/acting/Acting_Debug_Node.py index 99839e18..b3289747 100755 --- a/code/acting/src/acting/Acting_Debug_Node.py +++ b/code/acting/src/acting/Acting_Debug_Node.py @@ -71,35 +71,33 @@ def __init__(self): Constructor of the class :return: """ - super(Acting_Debug_Node, self).__init__('dummy_trajectory_pub') - self.loginfo('Acting_Debug_Node node started') - self.role_name = self.get_param('role_name', 'ego_vehicle') - self.control_loop_rate = self.get_param('control_loop_rate', 0.05) + super(Acting_Debug_Node, self).__init__("dummy_trajectory_pub") + self.loginfo("Acting_Debug_Node node started") + self.role_name = self.get_param("role_name", "ego_vehicle") + self.control_loop_rate = self.get_param("control_loop_rate", 0.05) # Publisher for Dummy Trajectory self.trajectory_pub: Publisher = self.new_publisher( - Path, - "/paf/" + self.role_name + "/trajectory", - qos_profile=1) + Path, "/paf/" + self.role_name + "/trajectory", qos_profile=1 + ) # Publisher for Dummy Velocity self.velocity_pub: Publisher = self.new_publisher( - Float32, - f"/paf/{self.role_name}/target_velocity", - qos_profile=1) + Float32, f"/paf/{self.role_name}/target_velocity", qos_profile=1 + ) # PurePursuit: Publisher for Dummy PP-Steer self.pure_pursuit_steer_pub: Publisher = self.new_publisher( - Float32, - f"/paf/{self.role_name}/pure_pursuit_steer", - qos_profile=1) + Float32, f"/paf/{self.role_name}/pure_pursuit_steer", qos_profile=1 + ) # Subscriber of current_pos, used for Steering Debugging self.current_pos_sub: Subscriber = self.new_subscription( msg_type=PoseStamped, topic="/paf/" + self.role_name + "/current_pos", callback=self.__current_position_callback, - qos_profile=1) + qos_profile=1, + ) # ---> EVALUATION/TUNING: Subscribers for plotting # Subscriber for target_velocity for plotting @@ -107,55 +105,61 @@ def __init__(self): Float32, f"/paf/{self.role_name}/target_velocity", self.__get_target_velocity, - qos_profile=1) + qos_profile=1, + ) # Subscriber for current_heading self.heading_sub: Subscriber = self.new_subscription( Float32, f"/paf/{self.role_name}/current_heading", self.__get_heading, - qos_profile=1) + qos_profile=1, + ) # Subscriber for current_velocity self.current_velocity_sub: Subscriber = self.new_subscription( CarlaSpeedometer, f"/carla/{self.role_name}/Speed", self.__get_current_velocity, - qos_profile=1) + qos_profile=1, + ) # Subscriber for current_throttle self.current_throttle_sub: Subscriber = self.new_subscription( Float32, f"/paf/{self.role_name}/throttle", self.__get_throttle, - qos_profile=1) + qos_profile=1, + ) # Subscriber for Stanley_steer self.stanley_steer_sub: Subscriber = self.new_subscription( Float32, f"/paf/{self.role_name}/stanley_steer", self.__get_stanley_steer, - qos_profile=1) + qos_profile=1, + ) # Subscriber for PurePursuit_steer self.pure_pursuit_steer_sub: Subscriber = self.new_subscription( Float32, f"/paf/{self.role_name}/pure_pursuit_steer", self.__get_purepursuit_steer, - qos_profile=1) + qos_profile=1, + ) # Subscriber for vehicle_steer self.vehicle_steer_sub: Subscriber = self.new_subscription( CarlaEgoVehicleControl, - f'/carla/{self.role_name}/vehicle_control_cmd', + f"/carla/{self.role_name}/vehicle_control_cmd", self.__get_vehicle_steer, - qos_profile=10) + qos_profile=10, + ) # Publisher for emergency brake testing self.emergency_pub: Publisher = self.new_publisher( - Bool, - f"/paf/{self.role_name}/emergency", - qos_profile=1) + Bool, f"/paf/{self.role_name}/emergency", qos_profile=1 + ) # Initialize all needed "global" variables here self.current_trajectory = [] @@ -181,16 +185,12 @@ def __init__(self): # Spawncoords at the simulationstart startx = 984.5 starty = -5442.0 - if (TRAJECTORY_TYPE == 0): # Straight trajectory - self.current_trajectory = [ - (startx, starty), - (startx, starty-200) - ] + if TRAJECTORY_TYPE == 0: # Straight trajectory + self.current_trajectory = [(startx, starty), (startx, starty - 200)] - elif (TRAJECTORY_TYPE == 1): # straight into 90° Curve + elif TRAJECTORY_TYPE == 1: # straight into 90° Curve self.current_trajectory = [ (984.5, -5442.0), - (984.5, -5563.5), (985.0, -5573.2), (986.3, -5576.5), @@ -198,12 +198,11 @@ def __init__(self): (988.7, -5579.0), (990.5, -5579.8), (1000.0, -5580.2), - (1040.0, -5580.0), - (1070.0, -5580.0) + (1070.0, -5580.0), ] - elif (TRAJECTORY_TYPE == 2): # Sinewave Serpentines trajectory + elif TRAJECTORY_TYPE == 2: # Sinewave Serpentines trajectory # Generate a sine-wave with the global Constants to # automatically generate a trajectory with serpentine waves cycles = 4 # how many sine cycles @@ -224,53 +223,50 @@ def __init__(self): traj_y -= 2 trajectory_wave.append((traj_x, traj_y)) # back to the middle of the road - trajectory_wave.append((startx, traj_y-2)) + trajectory_wave.append((startx, traj_y - 2)) # add a long straight path after the serpentines - trajectory_wave.append((startx, starty-200)) + trajectory_wave.append((startx, starty - 200)) self.current_trajectory = trajectory_wave - elif (TRAJECTORY_TYPE == 3): # 2 Lane Switches + elif TRAJECTORY_TYPE == 3: # 2 Lane Switches self.current_trajectory = [ (startx, starty), - (startx-0.5, starty-10), - (startx-0.5, starty-20), - - (startx-0.4, starty-21), - (startx-0.3, starty-22), - (startx-0.2, starty-23), - (startx-0.1, starty-24), - (startx, starty-25), - (startx+0.1, starty-26), - (startx+0.2, starty-27), - (startx+0.3, starty-28), - (startx+0.4, starty-29), - (startx+0.5, starty-30), - (startx+0.6, starty-31), - (startx+0.7, starty-32), - (startx+0.8, starty-33), - (startx+0.9, starty-34), - (startx+1.0, starty-35), - (startx+1.0, starty-50), - - (startx+1.0, starty-51), - (startx+0.9, starty-52), - (startx+0.8, starty-53), - (startx+0.7, starty-54), - (startx+0.6, starty-55), - (startx+0.5, starty-56), - (startx+0.4, starty-57), - (startx+0.3, starty-58), - (startx+0.2, starty-59), - (startx+0.1, starty-60), - (startx, starty-61), - (startx-0.1, starty-62), - (startx-0.2, starty-63), - (startx-0.3, starty-64), - (startx-0.4, starty-65), - (startx-0.5, starty-66), - - (startx-0.5, starty-100), - ] + (startx - 0.5, starty - 10), + (startx - 0.5, starty - 20), + (startx - 0.4, starty - 21), + (startx - 0.3, starty - 22), + (startx - 0.2, starty - 23), + (startx - 0.1, starty - 24), + (startx, starty - 25), + (startx + 0.1, starty - 26), + (startx + 0.2, starty - 27), + (startx + 0.3, starty - 28), + (startx + 0.4, starty - 29), + (startx + 0.5, starty - 30), + (startx + 0.6, starty - 31), + (startx + 0.7, starty - 32), + (startx + 0.8, starty - 33), + (startx + 0.9, starty - 34), + (startx + 1.0, starty - 35), + (startx + 1.0, starty - 50), + (startx + 1.0, starty - 51), + (startx + 0.9, starty - 52), + (startx + 0.8, starty - 53), + (startx + 0.7, starty - 54), + (startx + 0.6, starty - 55), + (startx + 0.5, starty - 56), + (startx + 0.4, starty - 57), + (startx + 0.3, starty - 58), + (startx + 0.2, starty - 59), + (startx + 0.1, starty - 60), + (startx, starty - 61), + (startx - 0.1, starty - 62), + (startx - 0.2, starty - 63), + (startx - 0.3, starty - 64), + (startx - 0.4, starty - 65), + (startx - 0.5, starty - 66), + (startx - 0.5, starty - 100), + ] self.updated_trajectory(self.current_trajectory) def updated_trajectory(self, target_trajectory): @@ -347,21 +343,21 @@ def loop(timer_event=None): depending on the selected TEST_TYPE """ # Drive const. velocity on fixed straight steering - if (TEST_TYPE == 0): + if TEST_TYPE == 0: self.driveVel = TARGET_VELOCITY_1 self.pure_pursuit_steer_pub.publish(FIXED_STEERING) self.velocity_pub.publish(self.driveVel) # Drive alternating velocities on fixed straight steering - elif (TEST_TYPE == 1): + elif TEST_TYPE == 1: if not self.time_set: self.drive_Vel = TARGET_VELOCITY_1 self.switch_checkpoint_time = rospy.get_time() self.switch_time_set = True - if (self.switch_checkpoint_time < rospy.get_time() - 10): + if self.switch_checkpoint_time < rospy.get_time() - 10: self.switch_checkpoint_time = rospy.get_time() self.switchVelocity = not self.switchVelocity - if (self.switchVelocity): + if self.switchVelocity: self.driveVel = TARGET_VELOCITY_2 else: self.driveVel = TARGET_VELOCITY_1 @@ -369,7 +365,7 @@ def loop(timer_event=None): self.velocity_pub.publish(self.driveVel) # drive const. velocity on trajectoy with steering controller - elif (TEST_TYPE == 2): + elif TEST_TYPE == 2: # Continuously update path and publish it self.drive_Vel = TARGET_VELOCITY_1 self.updated_trajectory(self.current_trajectory) @@ -378,13 +374,13 @@ def loop(timer_event=None): # drive const. velocity on fixed straight steering and # trigger an emergency brake after 15 secs - elif (TEST_TYPE == 3): + elif TEST_TYPE == 3: # Continuously update path and publish it self.drive_Vel = TARGET_VELOCITY_1 if not self.time_set: self.checkpoint_time = rospy.get_time() self.time_set = True - if (self.checkpoint_time < rospy.get_time() - 15.0): + if self.checkpoint_time < rospy.get_time() - 15.0: self.checkpoint_time = rospy.get_time() self.emergency_pub.publish(True) self.pure_pursuit_steer_pub.publish(FIXED_STEERING) @@ -402,7 +398,7 @@ def loop(timer_event=None): print(">>>>>>>>>>>> TRAJECTORY <<<<<<<<<<<<<<") # Uncomment the prints of the data you want to plot - if (self.checkpoint_time < rospy.get_time() - PRINT_AFTER_TIME): + if self.checkpoint_time < rospy.get_time() - PRINT_AFTER_TIME: self.checkpoint_time = rospy.get_time() print(">>>>>>>>>>>> DATA <<<<<<<<<<<<<<") if PRINT_VELOCITY_DATA: @@ -420,6 +416,7 @@ def loop(timer_event=None): print(">> ACTUAL POSITIONS <<") print(self.positions) print(">>>>>>>>>>>> DATA <<<<<<<<<<<<<<") + self.new_timer(self.control_loop_rate, loop) self.spin() diff --git a/code/acting/src/acting/MainFramePublisher.py b/code/acting/src/acting/MainFramePublisher.py index 0ba4783d..a34240e8 100755 --- a/code/acting/src/acting/MainFramePublisher.py +++ b/code/acting/src/acting/MainFramePublisher.py @@ -19,11 +19,11 @@ def __init__(self): ego vehicle does. The hero frame is used by sensors like the lidar. Rviz also uses the hero frame. The main frame is used for planning. """ - super(MainFramePublisher, self).__init__('main_frame_publisher') - self.loginfo('MainFramePublisher node started') + super(MainFramePublisher, self).__init__("main_frame_publisher") + self.loginfo("MainFramePublisher node started") - self.control_loop_rate = self.get_param('control_loop_rate', 0.05) - self.role_name = self.get_param('role_name', 'ego_vehicle') + self.control_loop_rate = self.get_param("control_loop_rate", 0.05) + self.role_name = self.get_param("role_name", "ego_vehicle") self.current_pos: PoseStamped = PoseStamped() self.current_heading: float = 0 @@ -31,16 +31,18 @@ def __init__(self): PoseStamped, "/paf/" + self.role_name + "/current_pos", self.get_current_pos, - qos_profile=1) + qos_profile=1, + ) self.current_heading_subscriber = self.new_subscription( Float32, f"/paf/{self.role_name}/current_heading", self.get_current_heading, - qos_profile=1) + qos_profile=1, + ) def run(self): - self.loginfo('MainFramePublisher node running') + self.loginfo("MainFramePublisher node running") br = tf.TransformBroadcaster() def loop(timer_event=None): @@ -49,22 +51,26 @@ def loop(timer_event=None): return rot = -self.current_heading pos = [0, 0, 0] - pos[0] = cos(rot) * \ - self.current_pos.pose.position.x - \ - sin(rot) * self.current_pos.pose.position.y - pos[1] = sin(rot) * \ - self.current_pos.pose.position.x + \ - cos(rot) * self.current_pos.pose.position.y + pos[0] = ( + cos(rot) * self.current_pos.pose.position.x + - sin(rot) * self.current_pos.pose.position.y + ) + pos[1] = ( + sin(rot) * self.current_pos.pose.position.x + + cos(rot) * self.current_pos.pose.position.y + ) pos[2] = -self.current_pos.pose.position.z - rot_quat = R.from_euler("xyz", [0, 0, -self.current_heading+pi], - degrees=False).as_quat() - - br.sendTransform(pos, - rot_quat, - rospy.Time.now(), - "global", - "hero", - ) + rot_quat = R.from_euler( + "xyz", [0, 0, -self.current_heading + pi], degrees=False + ).as_quat() + + br.sendTransform( + pos, + rot_quat, + rospy.Time.now(), + "global", + "hero", + ) self.new_timer(self.control_loop_rate, loop) self.spin() @@ -81,7 +87,7 @@ def main(args=None): Main function starts the node :param args: """ - roscomp.init('main_frame_publisher', args=args) + roscomp.init("main_frame_publisher", args=args) try: node = MainFramePublisher() @@ -92,5 +98,5 @@ def main(args=None): roscomp.shutdown() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/code/acting/src/acting/helper_functions.py b/code/acting/src/acting/helper_functions.py index 072e9aa2..8dc2c6d8 100755 --- a/code/acting/src/acting/helper_functions.py +++ b/code/acting/src/acting/helper_functions.py @@ -106,10 +106,10 @@ def calc_path_yaw(path: Path, idx: int) -> float: point_current = path.poses[idx] point_next: PoseStamped point_next = path.poses[idx + 1] - angle = math.atan2(point_next.pose.position.y - - point_current.pose.position.y, - point_next.pose.position.x - - point_current.pose.position.x) + angle = math.atan2( + point_next.pose.position.y - point_current.pose.position.y, + point_next.pose.position.x - point_current.pose.position.x, + ) return normalize_angle(angle) @@ -134,14 +134,19 @@ def calc_egocar_yaw(pose: PoseStamped) -> float: :param pose: The current pose of the ego vehicle :return: normalized yaw of the vehicle """ - quaternion = (pose.pose.orientation.x, pose.pose.orientation.y, - pose.pose.orientation.z, pose.pose.orientation.w) + quaternion = ( + pose.pose.orientation.x, + pose.pose.orientation.y, + pose.pose.orientation.z, + pose.pose.orientation.w, + ) _, _, yaw = euler_from_quaternion(quaternion) return normalize_angle(yaw) -def points_to_vector(p_1: Tuple[float, float], - p_2: Tuple[float, float]) -> Tuple[float, float]: +def points_to_vector( + p_1: Tuple[float, float], p_2: Tuple[float, float] +) -> Tuple[float, float]: """ Create the vector starting at p1 and ending at p2 :param p_1: Start point @@ -157,11 +162,12 @@ def vector_len(vec: Tuple[float, float]) -> float: :param vec: vector v as a tuple (x, y) :return: length of vector v """ - return sqrt(vec[0]**2 + vec[1]**2) + return sqrt(vec[0] ** 2 + vec[1] ** 2) -def add_vector(v_1: Tuple[float, float], - v_2: Tuple[float, float]) -> Tuple[float, float]: +def add_vector( + v_1: Tuple[float, float], v_2: Tuple[float, float] +) -> Tuple[float, float]: """ Add the two given vectors :param v_1: first vector @@ -172,20 +178,22 @@ def add_vector(v_1: Tuple[float, float], return v_1[0] + v_2[0], v_1[1] + v_2[1] -def rotate_vector(vector: Tuple[float, float], - angle_rad: float) -> Tuple[float, float]: +def rotate_vector(vector: Tuple[float, float], angle_rad: float) -> Tuple[float, float]: """ Rotate the given vector by an angle :param vector: vector :param angle_rad: angle of rotation :return: rotated angle """ - return (cos(angle_rad) * vector[0] - sin(angle_rad) * vector[1], - sin(angle_rad) * vector[0] + cos(angle_rad) * vector[1]) + return ( + cos(angle_rad) * vector[0] - sin(angle_rad) * vector[1], + sin(angle_rad) * vector[0] + cos(angle_rad) * vector[1], + ) -def linear_interpolation(start: Tuple[float, float], end: Tuple[float, float], - interval_m: float) -> List[Tuple[float, float]]: +def linear_interpolation( + start: Tuple[float, float], end: Tuple[float, float], interval_m: float +) -> List[Tuple[float, float]]: """ Interpolate linearly between start and end, with a minimal distance of interval_m between points. @@ -200,21 +208,23 @@ def linear_interpolation(start: Tuple[float, float], end: Tuple[float, float], steps = max(1, floor(distance / interval_m)) exceeds_interval_cap = distance > interval_m - step_vector = (vector[0] / steps if exceeds_interval_cap else vector[0], - vector[1] / steps if exceeds_interval_cap else vector[1]) + step_vector = ( + vector[0] / steps if exceeds_interval_cap else vector[0], + vector[1] / steps if exceeds_interval_cap else vector[1], + ) lin_points = [(start[0], start[1])] for i in range(1, steps): lin_points.append( - (start[0] + step_vector[0] * i, - start[1] + step_vector[1] * i) + (start[0] + step_vector[0] * i, start[1] + step_vector[1] * i) ) return lin_points -def _clean_route_duplicates(route: List[Tuple[float, float]], - min_dist: float) -> List[Tuple[float, float]]: +def _clean_route_duplicates( + route: List[Tuple[float, float]], min_dist: float +) -> List[Tuple[float, float]]: """ Remove duplicates in the given List of tuples, if the distance between them is less than min_dist. @@ -243,8 +253,9 @@ def interpolate_route(orig_route: List[Tuple[float, float]], interval_m=0.5): orig_route = _clean_route_duplicates(orig_route, 0.1) route = [] for index in range(len(orig_route) - 1): - waypoints = linear_interpolation(orig_route[index], - orig_route[index + 1], interval_m) + waypoints = linear_interpolation( + orig_route[index], orig_route[index + 1], interval_m + ) route.extend(waypoints) route = route + [orig_route[-1]] diff --git a/code/acting/src/acting/pure_pursuit_controller.py b/code/acting/src/acting/pure_pursuit_controller.py index 04740418..4259fe78 100755 --- a/code/acting/src/acting/pure_pursuit_controller.py +++ b/code/acting/src/acting/pure_pursuit_controller.py @@ -26,45 +26,44 @@ class PurePursuitController(CompatibleNode): def __init__(self): - super(PurePursuitController, self).__init__('pure_pursuit_controller') - self.loginfo('PurePursuitController node started') + super(PurePursuitController, self).__init__("pure_pursuit_controller") + self.loginfo("PurePursuitController node started") - self.control_loop_rate = self.get_param('control_loop_rate', 0.05) - self.role_name = self.get_param('role_name', 'ego_vehicle') + self.control_loop_rate = self.get_param("control_loop_rate", 0.05) + self.role_name = self.get_param("role_name", "ego_vehicle") self.position_sub: Subscriber = self.new_subscription( - Path, - f"/paf/{self.role_name}/trajectory", - self.__set_path, - qos_profile=1) + Path, f"/paf/{self.role_name}/trajectory", self.__set_path, qos_profile=1 + ) self.path_sub: Subscriber = self.new_subscription( PoseStamped, f"/paf/{self.role_name}/current_pos", self.__set_position, - qos_profile=1) + qos_profile=1, + ) self.velocity_sub: Subscriber = self.new_subscription( CarlaSpeedometer, f"/carla/{self.role_name}/Speed", self.__set_velocity, - qos_profile=1) + qos_profile=1, + ) self.heading_sub: Subscriber = self.new_subscription( Float32, f"/paf/{self.role_name}/current_heading", self.__set_heading, - qos_profile=1) + qos_profile=1, + ) self.pure_pursuit_steer_pub: Publisher = self.new_publisher( - Float32, - f"/paf/{self.role_name}/pure_pursuit_steer", - qos_profile=1) + Float32, f"/paf/{self.role_name}/pure_pursuit_steer", qos_profile=1 + ) self.debug_msg_pub: Publisher = self.new_publisher( - Debug, - f"/paf/{self.role_name}/pure_p_debug", - qos_profile=1) + Debug, f"/paf/{self.role_name}/pure_p_debug", qos_profile=1 + ) self.__position: tuple[float, float] = None # x, y self.__path: Path = None @@ -77,7 +76,7 @@ def run(self): Starts the main loop of the node :return: """ - self.loginfo('PurePursuitController node running') + self.loginfo("PurePursuitController node running") def loop(timer_event=None): """ @@ -86,26 +85,34 @@ def loop(timer_event=None): :return: """ if self.__path is None: - self.logdebug("PurePursuitController hasn't received a path " - "yet and can therefore not publish steering") + self.logdebug( + "PurePursuitController hasn't received a path " + "yet and can therefore not publish steering" + ) return if self.__position is None: - self.logdebug("PurePursuitController hasn't received the " - "position of the vehicle yet " - "and can therefore not publish steering") + self.logdebug( + "PurePursuitController hasn't received the " + "position of the vehicle yet " + "and can therefore not publish steering" + ) return if self.__heading is None: - self.logdebug("PurePursuitController hasn't received the " - "heading of the vehicle yet and " - "can therefore not publish steering") + self.logdebug( + "PurePursuitController hasn't received the " + "heading of the vehicle yet and " + "can therefore not publish steering" + ) return if self.__velocity is None: - self.logdebug("PurePursuitController hasn't received the " - "velocity of the vehicle yet " - "and can therefore not publish steering") + self.logdebug( + "PurePursuitController hasn't received the " + "velocity of the vehicle yet " + "and can therefore not publish steering" + ) return self.pure_pursuit_steer_pub.publish(self.__calculate_steer()) @@ -119,16 +126,17 @@ def __calculate_steer(self) -> float: :return: """ # la_dist = MIN_LA_DISTANCE <= K_LAD * velocity <= MAX_LA_DISTANCE - look_ahead_dist = np.clip(K_LAD * self.__velocity, - MIN_LA_DISTANCE, MAX_LA_DISTANCE) + look_ahead_dist = np.clip( + K_LAD * self.__velocity, MIN_LA_DISTANCE, MAX_LA_DISTANCE + ) # Get the target position on the trajectory in look_ahead distance self.__tp_idx = self.__get_target_point_index(look_ahead_dist) target_wp: PoseStamped = self.__path.poses[self.__tp_idx] # Get the vector from the current position to the target position - target_v_x, target_v_y = points_to_vector((self.__position[0], - self.__position[1]), - (target_wp.pose.position.x, - target_wp.pose.position.y)) + target_v_x, target_v_y = points_to_vector( + (self.__position[0], self.__position[1]), + (target_wp.pose.position.x, target_wp.pose.position.y), + ) # Get the target heading from that vector target_vector_heading = vector_angle(target_v_x, target_v_y) # Get the error between current heading and target heading @@ -181,7 +189,7 @@ def __dist_to(self, pos: Point) -> float: y_current = self.__position[1] x_target = pos.x y_target = pos.y - d = (x_target - x_current)**2 + (y_target - y_current)**2 + d = (x_target - x_current) ** 2 + (y_target - y_current) ** 2 return math.sqrt(d) def __set_position(self, data: PoseStamped, min_diff=0.001): @@ -206,9 +214,11 @@ def __set_position(self, data: PoseStamped, min_diff=0.001): # if new position is to close to current, do not accept it # too close = closer than min_diff = 0.001 meters # for debugging purposes: - self.logdebug("New position disregarded, " - f"as dist ({round(dist, 3)}) to current pos " - f"< min_diff ({round(min_diff, 3)})") + self.logdebug( + "New position disregarded, " + f"as dist ({round(dist, 3)}) to current pos " + f"< min_diff ({round(min_diff, 3)})" + ) return new_x = data.pose.position.x new_y = data.pose.position.y @@ -230,10 +240,10 @@ def __set_velocity(self, data: CarlaSpeedometer): def main(args=None): """ - main function starts the pure pursuit controller node - :param args: + main function starts the pure pursuit controller node + :param args: """ - roscomp.init('pure_pursuit_controller', args=args) + roscomp.init("pure_pursuit_controller", args=args) try: node = PurePursuitController() @@ -244,5 +254,5 @@ def main(args=None): roscomp.shutdown() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/code/acting/src/acting/stanley_controller.py b/code/acting/src/acting/stanley_controller.py index e0cbc190..3463e90e 100755 --- a/code/acting/src/acting/stanley_controller.py +++ b/code/acting/src/acting/stanley_controller.py @@ -20,46 +20,45 @@ class StanleyController(CompatibleNode): def __init__(self): - super(StanleyController, self).__init__('stanley_controller') - self.loginfo('StanleyController node started') + super(StanleyController, self).__init__("stanley_controller") + self.loginfo("StanleyController node started") - self.control_loop_rate = self.get_param('control_loop_rate', 0.05) - self.role_name = self.get_param('role_name', 'ego_vehicle') + self.control_loop_rate = self.get_param("control_loop_rate", 0.05) + self.role_name = self.get_param("role_name", "ego_vehicle") # Subscribers self.position_sub: Subscriber = self.new_subscription( - Path, - f"/paf/{self.role_name}/trajectory", - self.__set_path, - qos_profile=1) + Path, f"/paf/{self.role_name}/trajectory", self.__set_path, qos_profile=1 + ) self.path_sub: Subscriber = self.new_subscription( PoseStamped, f"/paf/{self.role_name}/current_pos", self.__set_position, - qos_profile=1) + qos_profile=1, + ) self.velocity_sub: Subscriber = self.new_subscription( CarlaSpeedometer, f"/carla/{self.role_name}/Speed", self.__set_velocity, - qos_profile=1) + qos_profile=1, + ) self.heading_sub: Subscriber = self.new_subscription( Float32, f"/paf/{self.role_name}/current_heading", self.__set_heading, - qos_profile=1) + qos_profile=1, + ) self.stanley_steer_pub: Publisher = self.new_publisher( - Float32, - f"/paf/{self.role_name}/stanley_steer", - qos_profile=1) + Float32, f"/paf/{self.role_name}/stanley_steer", qos_profile=1 + ) self.debug_publisher: Publisher = self.new_publisher( - StanleyDebug, - f"/paf/{self.role_name}/stanley_debug", - qos_profile=1) + StanleyDebug, f"/paf/{self.role_name}/stanley_debug", qos_profile=1 + ) self.__position: tuple[float, float] = None # x , y self.__path: Path = None @@ -71,7 +70,7 @@ def run(self): Starts the main loop of the node :return: """ - self.loginfo('StanleyController node running') + self.loginfo("StanleyController node running") def loop(timer_event=None): """ @@ -80,25 +79,33 @@ def loop(timer_event=None): :return: """ if self.__path is None: - self.logwarn("StanleyController hasn't received a path yet " - "and can therefore not publish steering") + self.logwarn( + "StanleyController hasn't received a path yet " + "and can therefore not publish steering" + ) return if self.__position is None: - self.logwarn("StanleyController hasn't received the" - "position of the vehicle yet " - "and can therefore not publish steering") + self.logwarn( + "StanleyController hasn't received the" + "position of the vehicle yet " + "and can therefore not publish steering" + ) return if self.__heading is None: - self.logwarn("StanleyController hasn't received the" - "heading of the vehicle yet and" - "can therefore not publish steering") + self.logwarn( + "StanleyController hasn't received the" + "heading of the vehicle yet and" + "can therefore not publish steering" + ) return if self.__velocity is None: - self.logwarn("StanleyController hasn't received the " - "velocity of the vehicle yet " - "and can therefore not publish steering") + self.logwarn( + "StanleyController hasn't received the " + "velocity of the vehicle yet " + "and can therefore not publish steering" + ) return self.stanley_steer_pub.publish(self.__calculate_steer()) @@ -125,8 +132,9 @@ def __calculate_steer(self) -> float: closest_point: PoseStamped = self.__path.poses[closest_point_idx] cross_err = self.__get_cross_err(closest_point.pose.position) # * -1 because it is inverted compared to PurePursuit - steering_angle = 1 * (heading_err + atan((K_CROSSERR * cross_err) - / current_velocity)) + steering_angle = 1 * ( + heading_err + atan((K_CROSSERR * cross_err) / current_velocity) + ) # -> for debugging debug_msg = StanleyDebug() debug_msg.heading = self.__heading @@ -174,10 +182,11 @@ def __get_path_heading(self, index: int) -> float: if index > 0: # Calculate heading from the previous point on the trajectory - prv_point: Point = self.__path.poses[index-1].pose.position + prv_point: Point = self.__path.poses[index - 1].pose.position - prv_v_x, prv_v_y = points_to_vector((prv_point.x, prv_point.y), - (cur_pos.x, cur_pos.y)) + prv_v_x, prv_v_y = points_to_vector( + (prv_point.x, prv_point.y), (cur_pos.x, cur_pos.y) + ) heading_sum += vector_angle(prv_v_x, prv_v_y) heading_sum_args += 1 @@ -186,8 +195,9 @@ def __get_path_heading(self, index: int) -> float: # Calculate heading to the following point on the trajectory aft_point: Point = self.__path.poses[index + 1].pose.position - aft_v_x, aft_v_y = points_to_vector((aft_point.x, aft_point.y), - (cur_pos.x, cur_pos.y)) + aft_v_x, aft_v_y = points_to_vector( + (aft_point.x, aft_point.y), (cur_pos.x, cur_pos.y) + ) heading_sum += vector_angle(aft_v_x, aft_v_y) heading_sum_args += 1 @@ -210,8 +220,10 @@ def __get_cross_err(self, pos: Point) -> float: if self.__heading is not None: alpha = self.__heading + (math.pi / 2) v_e_0 = (0, 1) - v_e = (cos(alpha)*v_e_0[0] - sin(alpha)*v_e_0[1], - sin(alpha)*v_e_0[0] + cos(alpha)*v_e_0[1]) + v_e = ( + cos(alpha) * v_e_0[0] - sin(alpha) * v_e_0[1], + sin(alpha) * v_e_0[0] + cos(alpha) * v_e_0[1], + ) # define a vector (v_ab) with length 10 centered on the cur pos # of the vehicle, with a heading parallel to that of the vehicle @@ -221,8 +233,7 @@ def __get_cross_err(self, pos: Point) -> float: v_ab = (b[0] - a[0], b[1] - a[1]) v_am = (pos.x - a[0], pos.y - a[1]) - c = np.array([[v_ab[0], v_am[0]], - [v_ab[1], v_am[1]]]) + c = np.array([[v_ab[0], v_am[0]], [v_ab[1], v_am[1]]]) temp_sign = np.linalg.det(c) min_sign = 0.01 # to avoid rounding errors @@ -268,9 +279,11 @@ def __set_position(self, data: PoseStamped, min_diff=0.001): # check if the new position is valid dist = self.__dist_to(data.pose.position) if dist < min_diff: - self.logdebug("New position disregarded, " - f"as dist ({round(dist, 3)}) to current pos " - f"< min_diff ({round(min_diff, 3)})") + self.logdebug( + "New position disregarded, " + f"as dist ({round(dist, 3)}) to current pos " + f"< min_diff ({round(min_diff, 3)})" + ) return new_x = data.pose.position.x @@ -296,7 +309,7 @@ def main(args=None): Main function starts the node :param args: """ - roscomp.init('stanley_controller', args=args) + roscomp.init("stanley_controller", args=args) try: node = StanleyController() @@ -307,5 +320,5 @@ def main(args=None): roscomp.shutdown() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/code/acting/src/acting/vehicle_controller.py b/code/acting/src/acting/vehicle_controller.py index e97bc1c8..90aa0f68 100755 --- a/code/acting/src/acting/vehicle_controller.py +++ b/code/acting/src/acting/vehicle_controller.py @@ -21,41 +21,45 @@ class VehicleController(CompatibleNode): """ def __init__(self): - super(VehicleController, self).__init__('vehicle_controller') - self.loginfo('VehicleController node started') - self.control_loop_rate = self.get_param('control_loop_rate', 0.05) - self.role_name = self.get_param('role_name', 'ego_vehicle') + super(VehicleController, self).__init__("vehicle_controller") + self.loginfo("VehicleController node started") + self.control_loop_rate = self.get_param("control_loop_rate", 0.05) + self.role_name = self.get_param("role_name", "ego_vehicle") self.__curr_behavior = None # only unstuck behavior is relevant here # Publisher for Carla Vehicle Control Commands self.control_publisher: Publisher = self.new_publisher( CarlaEgoVehicleControl, - f'/carla/{self.role_name}/vehicle_control_cmd', - qos_profile=10) + f"/carla/{self.role_name}/vehicle_control_cmd", + qos_profile=10, + ) # Publisher for Status TODO: Maybe unneccessary self.status_pub: Publisher = self.new_publisher( Bool, f"/carla/{self.role_name}/status", qos_profile=QoSProfile( - depth=1, - durability=DurabilityPolicy.TRANSIENT_LOCAL)) + depth=1, durability=DurabilityPolicy.TRANSIENT_LOCAL + ), + ) # Publisher for which steering-controller is mainly used # 1 = PurePursuit and 2 = Stanley self.controller_pub: Publisher = self.new_publisher( Float32, f"/paf/{self.role_name}/controller", - qos_profile=QoSProfile(depth=10, - durability=DurabilityPolicy.TRANSIENT_LOCAL) + qos_profile=QoSProfile( + depth=10, durability=DurabilityPolicy.TRANSIENT_LOCAL + ), ) self.emergency_pub: Publisher = self.new_publisher( Bool, f"/paf/{self.role_name}/emergency", - qos_profile=QoSProfile(depth=10, - durability=DurabilityPolicy.TRANSIENT_LOCAL) + qos_profile=QoSProfile( + depth=10, durability=DurabilityPolicy.TRANSIENT_LOCAL + ), ) # Subscribers @@ -63,51 +67,53 @@ def __init__(self): String, f"/paf/{self.role_name}/curr_behavior", self.__set_curr_behavior, - qos_profile=1) + qos_profile=1, + ) self.emergency_sub: Subscriber = self.new_subscription( Bool, f"/paf/{self.role_name}/emergency", self.__set_emergency, - qos_profile=QoSProfile(depth=10, - durability=DurabilityPolicy.TRANSIENT_LOCAL) + qos_profile=QoSProfile( + depth=10, durability=DurabilityPolicy.TRANSIENT_LOCAL + ), ) self.velocity_sub: Subscriber = self.new_subscription( CarlaSpeedometer, f"/carla/{self.role_name}/Speed", self.__get_velocity, - qos_profile=1) + qos_profile=1, + ) self.throttle_sub: Subscriber = self.new_subscription( Float32, f"/paf/{self.role_name}/throttle", self.__set_throttle, - qos_profile=1) + qos_profile=1, + ) self.brake_sub: Subscriber = self.new_subscription( - Float32, - f"/paf/{self.role_name}/brake", - self.__set_brake, - qos_profile=1) + Float32, f"/paf/{self.role_name}/brake", self.__set_brake, qos_profile=1 + ) self.reverse_sub: Subscriber = self.new_subscription( - Bool, - f"/paf/{self.role_name}/reverse", - self.__set_reverse, - qos_profile=1) + Bool, f"/paf/{self.role_name}/reverse", self.__set_reverse, qos_profile=1 + ) self.pure_pursuit_steer_sub: Subscriber = self.new_subscription( Float32, f"/paf/{self.role_name}/pure_pursuit_steer", self.__set_pure_pursuit_steer, - qos_profile=1) + qos_profile=1, + ) self.stanley_sub: Subscriber = self.new_subscription( Float32, f"/paf/{self.role_name}/stanley_steer", self.__set_stanley_steer, - qos_profile=1) + qos_profile=1, + ) self.__reverse: bool = False self.__emergency: bool = False @@ -123,7 +129,7 @@ def run(self): :return: """ self.status_pub.publish(True) - self.loginfo('VehicleController node running') + self.loginfo("VehicleController node running") def loop(timer_event=None) -> None: """ @@ -143,8 +149,10 @@ def loop(timer_event=None) -> None: steer = self._s_steer else: # while doing the unstuck routine we don't want to steer - if self.__curr_behavior == "us_unstuck" or \ - self.__curr_behavior == "us_stop": + if ( + self.__curr_behavior == "us_unstuck" + or self.__curr_behavior == "us_stop" + ): steer = 0 else: steer = self._p_steer @@ -157,8 +165,7 @@ def loop(timer_event=None) -> None: message.throttle = self.__throttle message.brake = self.__brake message.steer = steer - message.header.stamp = roscomp.ros_timestamp(self.get_time(), - from_sec=True) + message.header.stamp = roscomp.ros_timestamp(self.get_time(), from_sec=True) self.control_publisher.publish(message) self.new_timer(self.control_loop_rate, loop) @@ -204,8 +211,7 @@ def __emergency_brake(self, active) -> None: message.reverse = True message.hand_brake = True message.manual_gear_shift = False - message.header.stamp = roscomp.ros_timestamp(self.get_time(), - from_sec=True) + message.header.stamp = roscomp.ros_timestamp(self.get_time(), from_sec=True) else: self.__emergency = False message.throttle = 0 @@ -214,8 +220,7 @@ def __emergency_brake(self, active) -> None: message.reverse = False message.hand_brake = False message.manual_gear_shift = False - message.header.stamp = roscomp.ros_timestamp(self.get_time(), - from_sec=True) + message.header.stamp = roscomp.ros_timestamp(self.get_time(), from_sec=True) self.control_publisher.publish(message) def __get_velocity(self, data: CarlaSpeedometer) -> None: @@ -230,11 +235,12 @@ def __get_velocity(self, data: CarlaSpeedometer) -> None: return if data.speed < 0.1: # vehicle has come to a stop self.__emergency_brake(False) - self.loginfo("Emergency breaking disengaged " - "(Emergency breaking has been executed successfully)") + self.loginfo( + "Emergency breaking disengaged " + "(Emergency breaking has been executed successfully)" + ) for _ in range(7): # publish 7 times just to be safe - self.emergency_pub.publish( - Bool(False)) + self.emergency_pub.publish(Bool(False)) def __set_throttle(self, data): self.__throttle = data.data @@ -246,12 +252,12 @@ def __set_reverse(self, data): self.__reverse = data.data def __set_pure_pursuit_steer(self, data: Float32): - r = (math.pi / 2) # convert from RAD to [-1;1] - self._p_steer = (data.data / r) + r = math.pi / 2 # convert from RAD to [-1;1] + self._p_steer = data.data / r def __set_stanley_steer(self, data: Float32): - r = (math.pi / 2) # convert from RAD to [-1;1] - self._s_steer = (data.data / r) + r = math.pi / 2 # convert from RAD to [-1;1] + self._s_steer = data.data / r def main(args=None): @@ -259,7 +265,7 @@ def main(args=None): Main function starts the node :param args: """ - roscomp.init('vehicle_controller', args=args) + roscomp.init("vehicle_controller", args=args) try: node = VehicleController() @@ -270,5 +276,5 @@ def main(args=None): roscomp.shutdown() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/code/acting/src/acting/velocity_controller.py b/code/acting/src/acting/velocity_controller.py index bf43ab41..db1f53aa 100755 --- a/code/acting/src/acting/velocity_controller.py +++ b/code/acting/src/acting/velocity_controller.py @@ -15,38 +15,37 @@ class VelocityController(CompatibleNode): """ def __init__(self): - super(VelocityController, self).__init__('velocity_controller') - self.loginfo('VelocityController node started') + super(VelocityController, self).__init__("velocity_controller") + self.loginfo("VelocityController node started") - self.control_loop_rate = self.get_param('control_loop_rate', 0.05) - self.role_name = self.get_param('role_name', 'ego_vehicle') + self.control_loop_rate = self.get_param("control_loop_rate", 0.05) + self.role_name = self.get_param("role_name", "ego_vehicle") self.target_velocity_sub: Subscriber = self.new_subscription( Float32, f"/paf/{self.role_name}/target_velocity", self.__get_target_velocity, - qos_profile=1) + qos_profile=1, + ) self.velocity_sub: Subscriber = self.new_subscription( CarlaSpeedometer, f"/carla/{self.role_name}/Speed", self.__get_current_velocity, - qos_profile=1) + qos_profile=1, + ) self.throttle_pub: Publisher = self.new_publisher( - Float32, - f"/paf/{self.role_name}/throttle", - qos_profile=1) + Float32, f"/paf/{self.role_name}/throttle", qos_profile=1 + ) self.brake_pub: Publisher = self.new_publisher( - Float32, - f"/paf/{self.role_name}/brake", - qos_profile=1) + Float32, f"/paf/{self.role_name}/brake", qos_profile=1 + ) self.reverse_pub: Publisher = self.new_publisher( - Bool, - f"/paf/{self.role_name}/reverse", - qos_profile=1) + Bool, f"/paf/{self.role_name}/reverse", qos_profile=1 + ) self.__current_velocity: float = None self.__target_velocity: float = None @@ -56,7 +55,7 @@ def run(self): Starts the main loop of the node :return: """ - self.loginfo('VelocityController node running') + self.loginfo("VelocityController node running") # PID for throttle pid_t = PID(0.60, 0.00076, 0.63) # since we use this for braking aswell, allow -1 to 0. @@ -71,15 +70,19 @@ def loop(timer_event=None): :return: """ if self.__target_velocity is None: - self.logdebug("VelocityController hasn't received target" - "_velocity yet. target_velocity has been set to" - "default value 0") + self.logdebug( + "VelocityController hasn't received target" + "_velocity yet. target_velocity has been set to" + "default value 0" + ) self.__target_velocity = 0 if self.__current_velocity is None: - self.logdebug("VelocityController hasn't received " - "current_velocity yet and can therefore not" - "publish a throttle value") + self.logdebug( + "VelocityController hasn't received " + "current_velocity yet and can therefore not" + "publish a throttle value" + ) return if self.__target_velocity < 0: @@ -135,7 +138,7 @@ def main(args=None): Main function starts the node :param args: """ - roscomp.init('velocity_controller', args=args) + roscomp.init("velocity_controller", args=args) try: node = VelocityController() @@ -146,5 +149,5 @@ def main(args=None): roscomp.shutdown() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/code/agent/setup.py b/code/agent/setup.py index ad7850df..0b6d7399 100644 --- a/code/agent/setup.py +++ b/code/agent/setup.py @@ -2,6 +2,5 @@ from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup -setup_args = generate_distutils_setup(packages=['agent'], - package_dir={'': 'src'}) +setup_args = generate_distutils_setup(packages=["agent"], package_dir={"": "src"}) setup(**setup_args) diff --git a/code/agent/src/agent/agent.py b/code/agent/src/agent/agent.py index f3f7b4a0..a57f2d6a 100755 --- a/code/agent/src/agent/agent.py +++ b/code/agent/src/agent/agent.py @@ -4,88 +4,114 @@ def get_entry_point(): - return 'PAF22Agent' + return "PAFAgent" -class PAF22Agent(ROS1Agent): +class PAFAgent(ROS1Agent): def setup(self, path_to_conf_file): self.track = Track.MAP def get_ros_entrypoint(self): return { - 'package': 'agent', - 'launch_file': 'agent.launch', - 'parameters': { - 'role_name': 'hero', - } + "package": "agent", + "launch_file": "agent.launch", + "parameters": { + "role_name": "hero", + }, } def sensors(self): sensors = [ { - 'type': 'sensor.camera.rgb', - 'id': 'Center', - 'x': 0.0, 'y': 0.0, 'z': 1.70, - 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, - 'width': 1280, 'height': 720, 'fov': 100 - }, + "type": "sensor.camera.rgb", + "id": "Center", + "x": 0.0, + "y": 0.0, + "z": 1.70, + "roll": 0.0, + "pitch": 0.0, + "yaw": 0.0, + "width": 1280, + "height": 720, + "fov": 100, + }, { - 'type': 'sensor.camera.rgb', - 'id': 'Back', - 'x': 0.0, 'y': 0.0, 'z': 1.70, - 'roll': 0.0, 'pitch': 0.0, 'yaw': math.radians(180.0), - 'width': 1280, 'height': 720, 'fov': 100 - }, + "type": "sensor.camera.rgb", + "id": "Back", + "x": 0.0, + "y": 0.0, + "z": 1.70, + "roll": 0.0, + "pitch": 0.0, + "yaw": math.radians(180.0), + "width": 1280, + "height": 720, + "fov": 100, + }, { - 'type': 'sensor.camera.rgb', - 'id': 'Left', - 'x': 0.0, 'y': 0.0, 'z': 1.70, - 'roll': 0.0, 'pitch': 0.0, 'yaw': math.radians(-90.0), - 'width': 1280, 'height': 720, 'fov': 100 - }, + "type": "sensor.camera.rgb", + "id": "Left", + "x": 0.0, + "y": 0.0, + "z": 1.70, + "roll": 0.0, + "pitch": 0.0, + "yaw": math.radians(-90.0), + "width": 1280, + "height": 720, + "fov": 100, + }, { - 'type': 'sensor.camera.rgb', - 'id': 'Right', - 'x': 0.0, 'y': 0.0, 'z': 1.70, - 'roll': 0.0, 'pitch': 0.0, 'yaw': math.radians(90.0), - 'width': 1280, 'height': 720, 'fov': 100 - }, + "type": "sensor.camera.rgb", + "id": "Right", + "x": 0.0, + "y": 0.0, + "z": 1.70, + "roll": 0.0, + "pitch": 0.0, + "yaw": math.radians(90.0), + "width": 1280, + "height": 720, + "fov": 100, + }, { - 'type': 'sensor.lidar.ray_cast', - 'id': 'LIDAR', - 'x': 0.0, 'y': 0.0, 'z': 1.70, - 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0 - }, + "type": "sensor.lidar.ray_cast", + "id": "LIDAR", + "x": 0.0, + "y": 0.0, + "z": 1.70, + "roll": 0.0, + "pitch": 0.0, + "yaw": 0.0, + }, { - 'type': 'sensor.other.radar', - 'id': 'RADAR', - 'x': 2.0, 'y': 0.0, 'z': 0.7, - 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, - 'horizontal_fov': 30, 'vertical_fov': 30 - }, + "type": "sensor.other.radar", + "id": "RADAR", + "x": 2.0, + "y": 0.0, + "z": 0.7, + "roll": 0.0, + "pitch": 0.0, + "yaw": 0.0, + "horizontal_fov": 30, + "vertical_fov": 30, + }, + {"type": "sensor.other.gnss", "id": "GPS", "x": 0.0, "y": 0.0, "z": 0.0}, { - 'type': 'sensor.other.gnss', - 'id': 'GPS', - 'x': 0.0, 'y': 0.0, 'z': 0.0 - }, - { - 'type': 'sensor.other.imu', - 'id': 'IMU', - 'x': 0.0, 'y': 0.0, 'z': 0.0, - 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0 - }, - { - 'type': 'sensor.opendrive_map', - 'id': 'OpenDRIVE', - 'reading_frequency': 1 - }, - { - 'type': 'sensor.speedometer', - 'id': 'Speed' - } + "type": "sensor.other.imu", + "id": "IMU", + "x": 0.0, + "y": 0.0, + "z": 0.0, + "roll": 0.0, + "pitch": 0.0, + "yaw": 0.0, + }, + {"type": "sensor.opendrive_map", "id": "OpenDRIVE", "reading_frequency": 1}, + {"type": "sensor.speedometer", "id": "Speed"}, ] return sensors def destroy(self): - super(PAF22Agent, self).destroy() + super(PAFAgent, self).destroy() diff --git a/code/mock/setup.py b/code/mock/setup.py index 8f232a1f..bf698614 100755 --- a/code/mock/setup.py +++ b/code/mock/setup.py @@ -1,6 +1,5 @@ from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup -setup_args = generate_distutils_setup(packages=['mock'], - package_dir={'': 'src'}) +setup_args = generate_distutils_setup(packages=["mock"], package_dir={"": "src"}) setup(**setup_args) diff --git a/code/mock/src/mock_intersection_clear.py b/code/mock/src/mock_intersection_clear.py index df7cea62..bb584941 100755 --- a/code/mock/src/mock_intersection_clear.py +++ b/code/mock/src/mock_intersection_clear.py @@ -10,18 +10,17 @@ class MockIntersectionClearPublisher(CompatibleNode): This node publishes intersection clear information. It can be used for testing. """ + def __init__(self): - super(MockIntersectionClearPublisher, self).\ - __init__('intersectionClearMock') + super(MockIntersectionClearPublisher, self).__init__("intersectionClearMock") - self.control_loop_rate = self.get_param('control_loop_rate', 10) - self.role_name = self.get_param('role_name', 'ego_vehicle') + self.control_loop_rate = self.get_param("control_loop_rate", 10) + self.role_name = self.get_param("role_name", "ego_vehicle") # self.enabled = self.get_param('enabled', False) self.stop_sign_pub: Publisher = self.new_publisher( - Bool, - f"/paf/{self.role_name}/intersection_clear", - qos_profile=1) + Bool, f"/paf/{self.role_name}/intersection_clear", qos_profile=1 + ) self.delta = 0.2 self.distance = 75.0 self.isClear = False @@ -33,7 +32,7 @@ def run(self): """ # if not self.enabled: # return - self.loginfo('Stopsignmock node running') + self.loginfo("Stopsignmock node running") def loop(timer_event=None): """ @@ -47,6 +46,7 @@ def loop(timer_event=None): if self.distance < 0.0: self.isClear = True self.stop_sign_pub.publish(msg) + self.new_timer(self.control_loop_rate, loop) self.spin() @@ -56,7 +56,7 @@ def main(args=None): Main function starts the node :param args: """ - roscomp.init('velocity_publisher_dummy', args=args) + roscomp.init("velocity_publisher_dummy", args=args) try: node = MockIntersectionClearPublisher() @@ -67,5 +67,5 @@ def main(args=None): roscomp.shutdown() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/code/mock/src/mock_stop_sign.py b/code/mock/src/mock_stop_sign.py index a5ec4691..f85f6344 100755 --- a/code/mock/src/mock_stop_sign.py +++ b/code/mock/src/mock_stop_sign.py @@ -2,6 +2,7 @@ import ros_compatibility as roscomp from ros_compatibility.node import CompatibleNode from rospy import Publisher + # from std_msgs.msg import Float32 from mock.msg import Stop_sign @@ -11,18 +12,17 @@ class MockStopSignPublisher(CompatibleNode): This node publishes stop sign light information. It can be used for testing. """ + def __init__(self): - super(MockStopSignPublisher, self).\ - __init__('stopSignMock') + super(MockStopSignPublisher, self).__init__("stopSignMock") - self.control_loop_rate = self.get_param('control_loop_rate', 10) - self.role_name = self.get_param('role_name', 'ego_vehicle') + self.control_loop_rate = self.get_param("control_loop_rate", 10) + self.role_name = self.get_param("role_name", "ego_vehicle") # self.enabled = self.get_param('enabled', False) self.stop_sign_pub: Publisher = self.new_publisher( - Stop_sign, - f"/paf/{self.role_name}/stop_sign", - qos_profile=1) + Stop_sign, f"/paf/{self.role_name}/stop_sign", qos_profile=1 + ) self.delta = 0.2 self.distance = 20.0 self.isStop = False @@ -34,7 +34,7 @@ def run(self): """ # if not self.enabled: # return - self.loginfo('Stopsignmock node running') + self.loginfo("Stopsignmock node running") def loop(timer_event=None): """ @@ -50,6 +50,7 @@ def loop(timer_event=None): self.distance = 20.0 msg.distance = self.distance self.stop_sign_pub.publish(msg) + self.new_timer(self.control_loop_rate, loop) self.spin() @@ -59,7 +60,7 @@ def main(args=None): Main function starts the node :param args: """ - roscomp.init('velocity_publisher_dummy', args=args) + roscomp.init("velocity_publisher_dummy", args=args) try: node = MockStopSignPublisher() @@ -70,5 +71,5 @@ def main(args=None): roscomp.shutdown() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/code/mock/src/mock_traffic_light.py b/code/mock/src/mock_traffic_light.py index 13852e03..63ff289a 100755 --- a/code/mock/src/mock_traffic_light.py +++ b/code/mock/src/mock_traffic_light.py @@ -2,6 +2,7 @@ import ros_compatibility as roscomp from ros_compatibility.node import CompatibleNode from rospy import Publisher + # from std_msgs.msg import Float32 from mock.msg import Traffic_light @@ -10,18 +11,17 @@ class MockTrafficLightPublisher(CompatibleNode): """ This node publishes traffic light information. It can be used for testing. """ + def __init__(self): - super(MockTrafficLightPublisher, self).\ - __init__('trafficLightMock') + super(MockTrafficLightPublisher, self).__init__("trafficLightMock") - self.control_loop_rate = self.get_param('control_loop_rate', 10) - self.role_name = self.get_param('role_name', 'ego_vehicle') + self.control_loop_rate = self.get_param("control_loop_rate", 10) + self.role_name = self.get_param("role_name", "ego_vehicle") # self.enabled = self.get_param('enabled', False) self.traffic_light_pub: Publisher = self.new_publisher( - Traffic_light, - f"/paf/{self.role_name}/traffic_light", - qos_profile=1) + Traffic_light, f"/paf/{self.role_name}/traffic_light", qos_profile=1 + ) self.delta = 0.2 self.distance = 20.0 self.color = "green" @@ -33,7 +33,7 @@ def run(self): """ # if not self.enabled: # return - self.loginfo('TrafficLightmock node running') + self.loginfo("TrafficLightmock node running") def loop(timer_event=None): """ @@ -54,6 +54,7 @@ def loop(timer_event=None): self.distance = 20.0 msg.distance = self.distance self.traffic_light_pub.publish(msg) + self.new_timer(self.control_loop_rate, loop) self.spin() @@ -63,7 +64,7 @@ def main(args=None): Main function starts the node :param args: """ - roscomp.init('traffic_light_publisher_dummy', args=args) + roscomp.init("traffic_light_publisher_dummy", args=args) try: node = MockTrafficLightPublisher() @@ -74,5 +75,5 @@ def main(args=None): roscomp.shutdown() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/code/perception/launch/perception.launch b/code/perception/launch/perception.launch index 45d970be..8d6072e7 100644 --- a/code/perception/launch/perception.launch +++ b/code/perception/launch/perception.launch @@ -41,8 +41,8 @@ -* [Title of wiki page](#title-of-wiki-page) - * [Author](#author) - * [Date](#date) - * [Prerequisite](#prerequisite) - * [Cheat Sheet](#cheat-sheet) - * [Basics](#basics) - * [Extended](#extended) - * [more Content](#more-content) - * [Sources](#sources) - - -## Cheat Sheet - -### Basics - ---- - -Headings: - -(# H1) - -(## H2) - -(### H3) - ---- -Bold **bold text** - ---- -Italic *italicized text* - ---- -Blockquote - -> blockquote ---- -Ordered List - -1. First item -2. Second item -3. Third item - ---- -Unordered List - -* First item -* Second item -* Third item - ---- -Code - -`code` - ---- - -Horizontal Rule - ---- - -Link -[title](https://www.example.com) - ---- -Image -![alt text](image.jpg) - -### Extended - ---- -Table -| Syntax | Description | -| ----------- | ----------- | -| Header | Title | -| Paragraph | Text | - ---- -Fenced Code Block - -```python -{ - "firstName": "John", - "lastName": "Smith", - "age": 25 -} -``` - ---- -Footnote - -Here's a sentence with a footnote. [^1] - -[^1]: This is the footnote. - ---- -Heading ID - -#### My Great Heading {#custom-id} - ---- -Definition List -term -: definition - ---- -Strikethrough - -~~The world is flat.~~ - ---- - -Task List - -* [x] Write the press release -* [ ] Update the website - -* [ ] Contact the media - ---- - -Subscript - -H~2~O - ---- - -Superscript - -X^2^ - ---- - -## more Content - -### Sources - - diff --git a/doc/02_development/templates/template_wiki_page_empty.md b/doc/02_development/templates/template_wiki_page_empty.md deleted file mode 100644 index bd0eb1ff..00000000 --- a/doc/02_development/templates/template_wiki_page_empty.md +++ /dev/null @@ -1,37 +0,0 @@ -# Title of wiki page - -**Summary:** This page functions a template for who to build knowledge articles for everyone to understand. The basic structure should be kept for all articles. This template further contains a cheat sheet with the most useful markdown syntax. - ---- - -## Author - -Josef Kircher - -## Date - -04.11.2022 - -## Prerequisite - -VSCode Extensions: - -* Markdown All in One by Yu Zhang (for TOC) - ---- - -* [Title of wiki page](#title-of-wiki-page) - * [Author](#author) - * [Date](#date) - * [Prerequisite](#prerequisite) - * [Some Content](#some-content) - * [more Content](#more-content) - * [Sources](#sources) - -## Some Content - -## more Content - -### Sources - - diff --git a/doc/03_research/01_acting/03_paf21_1_acting.md b/doc/03_research/01_acting/03_paf21_1_acting.md deleted file mode 100644 index 602205d9..00000000 --- a/doc/03_research/01_acting/03_paf21_1_acting.md +++ /dev/null @@ -1,35 +0,0 @@ -# Research: PAF21_1 Acting - -## Inputs - -* waypoints of the planned route -* general odometry of the vehicle - -## Curve Detection - -* Can detect curves on the planned trajectory -* Calculates the speed in which to drive the detected Curve -![Curve](../../00_assets/research_assets/curve_detection_paf21_1.png) - -## Speed Control - -* [CARLA Ackermann Control](https://carla.readthedocs.io/projects/ros-bridge/en/latest/carla_ackermann_control/) -* Speed is forwarded to the CARLA vehicle via Ackermann_message, which already includes a PID controller for safe driving/accelerating etc. -* no further controlling needed -> speed can be passed as calculated - -## Steering Control - -### Straight Trajectories - -* **Stanley Steering Controller** - * Calculates steering angle from offset and heading error - * includes PID controller - ![Stanley Controller](../../00_assets/research_assets/stanley_paf21_1.png) - -### Detected Curves - -* **Naive Steering Controller** (close to pure pursuit) - * uses Vehicle Position + Orientation + Waypoints - * Calculate direction to drive to as vector - * direction - orientation = Steering angle at each point in time - * speed is calculated in Curve Detection and taken as is diff --git a/doc/03_research/01_acting/05_autoware_acting.md b/doc/03_research/01_acting/05_autoware_acting.md deleted file mode 100644 index 8ba6b880..00000000 --- a/doc/03_research/01_acting/05_autoware_acting.md +++ /dev/null @@ -1,36 +0,0 @@ -# Research: [Autoware Acting](https://autowarefoundation.github.io/autoware-documentation/main/design/autoware-architecture/control/#autoware-control-design) - -## Inputs - -* Odometry (position and orientation, from Localization module) -* Trajectory (output of Planning) -* Steering Status (current steering of vehicle, from Vehicle Interface) -* Actuation Status (acceleration, steering, brake actuations, from Vehicle Interface) -* (“vehicle signal commands” directly into Vehicle Interface -> Handbrake, Hazard Lights, Headlights, Horn, Stationary Locking, Turn Indicators, Wipers etc.) - -### General Component Architecture - -![Node diagram](https://autowarefoundation.github.io/autoware-documentation/main/design/autoware-interfaces/components/images/Control-Bus-ODD-Architecture.drawio.svg) - -### With the Control Module - -![control-component](https://autowarefoundation.github.io/autoware-documentation/main/design/autoware-architecture/control/image/control-component.drawio.svg) - -## [Trajectory Follower](https://autowarefoundation.github.io/autoware.universe/main/control/trajectory_follower_base/) - -* generates control command to follow reference trajectory from Planning -* computes lateral (steering) and longitudinal (velocity) controls separately -* lateral controller: mpc (model predictive) or pure pursuit -* longitudinal: “currently only” PID controller - -## Vehicle Command Gate - -* filters control commands to prevent abnormal values -* sends commands to [Vehicle Interface](https://autowarefoundation.github.io/autoware-documentation/main/design/autoware-interfaces/components/vehicle-interface/) - -## Outputs - -* steering angle -* steering torque -* speed -* acceleration diff --git a/doc/03_research/01_acting/Readme.md b/doc/03_research/01_acting/Readme.md deleted file mode 100644 index b1e75e53..00000000 --- a/doc/03_research/01_acting/Readme.md +++ /dev/null @@ -1,11 +0,0 @@ -# Acting - -This folder contains all the results of our research on acting: - -* **PAF22** -* [Basics](./01_basics_acting.md) -* [Implementation](./02_implementation_acting.md) -* **PAF23** -* [PAF21_1 Acting](./03_paf21_1_acting.md) -* [PAF21_2 Acting & Pylot Control](./04_paf21_2_and_pylot_acting.md) -* [Autoware Control](./05_autoware_acting.md) diff --git a/doc/03_research/02_perception/Readme.md b/doc/03_research/02_perception/Readme.md deleted file mode 100644 index 364be7af..00000000 --- a/doc/03_research/02_perception/Readme.md +++ /dev/null @@ -1,12 +0,0 @@ -# Perception - -This folder contains all the results of research on perception: - -* **PAF22** - * [Basics](./02_basics.md) - * [First implementation plan](./03_first_implementation_plan.md) -* **PAF23** - * [Pylot Perception](./04_pylot.md) - * [PAF_21_2 Perception](./05_Research_PAF21-Perception.md) - * [PAF_21_1_Perception](./06_paf_21_1_perception.md) -* [Autoware Perception](./05-autoware-perception.md) diff --git a/doc/03_research/03_planning/00_paf22/04_decision_making.md b/doc/03_research/03_planning/00_paf22/04_decision_making.md deleted file mode 100644 index f28a01fe..00000000 --- a/doc/03_research/03_planning/00_paf22/04_decision_making.md +++ /dev/null @@ -1,257 +0,0 @@ -# Decision-making module - -**Summary:** This page gives a brief summary over possible decision-making choices their ad- and disadvantages as well as the opportunity to interchange them later on. Also, possible implementation options for those concepts are given. - ---- - -## Author - -Josef Kircher - -## Date - -01.12.2022 - -## Prerequisite - ---- - -* [Decision-making module](#decision-making-module) - * [Author](#author) - * [Date](#date) - * [Prerequisite](#prerequisite) - * [Decision-making algorithms](#decision-making-algorithms) - * [Finite State machine](#finite-state-machine) - * [Markov Chain](#markov-chain) - * [Decision Tree](#decision-tree) - * [Previous approaches](#previous-approaches) - * [PAF21-1](#paf21-1) - * [PAF21-2](#paf21-2) - * [PSAF1 2020](#psaf1-2020) - * [PSAF2 2020](#psaf2-2020) - * [Python or ROS libraries for these decision-making algorithms](#python-or-ros-libraries-for-these-decision-making-algorithms) - * [State machines](#state-machines) - * [SMACH](#smach) - * [SMACC](#smacc) - * [Markov Chains](#markov-chains) - * [QuantEcon](#quantecon) - * [markov_decision_making](#markov_decision_making) - * [Decision trees](#decision-trees) - * [pytrees](#pytrees) - * [Conclusion](#conclusion) - * [Sources](#sources) - - -## Decision-making algorithms - -### Finite State machine - -A finite-state machine (FSM) or finite-state automaton (FSA, plural: automata), finite automaton, or simply a state machine, is a mathematical model of computation. -It is an abstract machine that can be in exactly one of a finite number of states at any given time. -The FSM can change from one state to another in response to some inputs; the change from one state to another is called a transition. -An FSM is defined by a list of its states, its initial state, and the inputs that trigger each transition. -Finite-state machines are of two types—deterministic finite-state machines and non-deterministic finite-state machines. A deterministic finite-state machine can be constructed equivalent to any non-deterministic one. - -#### Advantages - -* easy to implement -* we know most of the scenarios (finite state space) -* previous groups have solutions we could adapt/extend - -#### Disadvantages - -* many states necessary -* even though we can try to map all possible states, there still might be some situation we could not account for - -### Markov Chain - -A Markov chain or Markov process is a stochastic model describing a sequence of possible events in which the probability of each event depends only on the state attained in the previous event. -A countably infinite sequence, in which the chain moves state at discrete time steps, gives a discrete-time Markov chain. A continuous-time process is called a continuous-time Markov chain. It is named after the Russian mathematician Andrey Markov. - -#### Advantages - -* possible to build Markov Chain from State machine -* experience from previous projects -* only depends on current state ("memorylessness") - -#### Disadvantages - -* might be complicated to implement -* probabilities for transitions might need to be guessed, empirically estimated - -### Decision Tree - -A decision tree is a decision support tool that uses a tree-like model of decisions and their possible consequences, including chance event outcomes, resource costs, and utility. -It is one way to display an algorithm that only contains conditional control statements. Decision trees are commonly used in operations research, specifically in decision analysis, to help identify a strategy most likely to reach a goal, but are also a popular tool in machine learning. - -#### Advantages - -* easy implementation -* tree like structure usable in Machine Learning (Random Forest e.g.) - -#### Disadvantages - -* multiple decision trees necessary -* prediction independent of previous state - -## Previous approaches - -### PAF21-1 - -#### State machine - -* 2 state machines: one for maneuvers, one for speed control -* Speed control more complex, when to brake seems like the most challenging task - -#### Take away - -* Some states seem to be comparable to what we are required to accomplish by the leaderboard -* Our task might be more complex, needs additional states and transitions -* I'm uncertain about an extra speed state, might be easier to handle that more locally by the local planner, maybe in combination with an observer element that keeps track of the surrounding by processing the information from `Perception` - -### PAF21-2 - -#### No clear concept - -* some sort of state machine integrated in local planner -* obstacle planner for dynamic obstacles (pedestrians, cars, bicycles) -* useful parameters which we could adapt -* path prediction for obstacles -* obstacles are only interesting if they cross the path of the ego vehicle - -#### Take away - -* Obstacle planner might be useful for dynamic obstacle detection if not handled elsewhere -* path prediction might reduce the number objects tracked that we could interfere with -* Also, if we adapt our local plan this path prediction of other vehicles might come in handy -* On the other hand, overhead to keep track of vehicles and maybe repredict paths if some vehicles change direction - -### PSAF1 2020 - -#### State machine - -* Three driving functions: Driving, stopping at traffic light, stopping at stop sign -* First project iteration so state machine more simple -* still covers many important scenarios - -#### Take away - -* Good starting point to have a minimal viable state machine -* Need adaption depending on what information we are getting forwarded/process in the planning module - -### PSAF2 2020 - -#### Decision tree - -* This team used a decision tree to cover the major driving scenarios -* Within the scenarios the actions are more linear -* Reminds me of the execution of a state where driving scenarios are the states and the execution the things our local planner should do within that state - -#### Take Away - -* Even though the approach is different, the execution might be similar to the other team algorithms -* We might not be interested in a decision tree as we want to keep the option to switch to a Markov chain, which would be more overhead if we start with a decision tree - -## Python or ROS libraries for these decision-making algorithms - -### State machines - -#### SMACH - -* Task-level architecture for creating state machines for robot behaviour. -* Based on Python -* Fast prototyping: Quickly create state machines -* Complex state machines can easily be created -* Introspection: smach_viewer provides a visual aid to follow the state machine executing its tasks - * smach_viewer is unmaintained and does not work with noetic -* Allows nested state machines -* Values can be passed between states -* Tutorials and documentation seems to be easy to understand so creating a first state machine shouldn't be too hard -* working with several ROS topics and messages within the state machine needs to be evaluated: - * the execution of states is mostly planned to happen in the local planner so for just sending a ROS message, SMACH might be efficient - -Not use SMACH for: - -* Unstructured tasks: SMACH is not efficient in sheduling unstructured tasks -* Low-level systems: SMACH is not build for high efficiency, might fall short for emergency maneuvers - -* Simple examples run without problem - -#### SMACC - -* event-driven, asynchronous, behavioral state machine library -* real-time ROS applications -* written in C++ -* designed to allow programmers to build robot control applications for multicomponent robots, in an intuitive and systematic manner. -* well maintained, lots of prebuild state machines to possibly start from - -Why not use SMACC: - -* might get some time to get back into C++ -* more sophisticated library might need more time to get used to -* awful country music in the back of tutorial videos - -* Tutorials do not run without further debugging which I didn't invest the time to do so - -### Markov Chains - -#### QuantEcon - -* a economics library for implementing Markov chains -* more focussed on simulation than actually using it in an AD agent -* maybe usable for testing and simulating a Markov chain before implementing it - -#### markov_decision_making - -* ROS library for robot decision-making based on Markov Decision Problems -* written in C++ -* callback-based action interpretation allows to use other frameworks (SMACH) -* relatively easy to implement hierarchical MDPs -* supports synchronous and asynchronous execution - -Why not use markov_decision_making: - -* not maintained -* only works with ROS hydro - -### Decision trees - -#### pytrees - -* easy framework for implementing behaviour trees -* written in python -* used by a group two years ago -* not usable for real-time application code according to their docs -* priority handling - higher level interrupts are handled first - -## Conclusion - -In my opinion, a state machine would be a great start for the project. There are plenty of resources available from recent projects. -It needs to be further discussed if the libraries presented above possess the needed functionality to run our state machine. The planning team might meet on the issue and present a suitable solution. -It is possible to start with a skeleton of both and compare them. - -### Sources - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/03_research/03_planning/Readme.md b/doc/03_research/03_planning/Readme.md deleted file mode 100644 index e48c2530..00000000 --- a/doc/03_research/03_planning/Readme.md +++ /dev/null @@ -1,7 +0,0 @@ -# Planning - -This folder contains all the results of research on planning from PAF 23 and 22. -The research documents from the previous project were kept as they contain helpful information. The documents are separated in different folders: - -* **[PAF22](./00_paf22/)** -* **[PAF23](./00_paf23/)** diff --git a/doc/03_research/04_requirements/03_requirements.md b/doc/03_research/04_requirements/03_requirements.md deleted file mode 100644 index 9f8755ab..00000000 --- a/doc/03_research/04_requirements/03_requirements.md +++ /dev/null @@ -1,82 +0,0 @@ -# Requirements - -**Summary:** This page contains the requirements obtained from the Carla Leaderboard website as well as former projects in the `Praktikum Autonomes Fahren` - ---- - -## Author - -Josef Kircher, Simon Erlbacher - -## Date - -17.11.2022 - -## Prerequisite - ---- - -* [Requirements](#requirements) - * [Author](#author) - * [Date](#date) - * [Prerequisite](#prerequisite) - * [Requirements from Leaderboard tasks](#requirements-from-leaderboard-tasks) - * [Carla Leaderboard Score](#carla-leaderboard-score) - * [Prioritized driving aspects](#prioritized-driving-aspects) - * [more Content](#more-content) - * [Sources](#sources) - - -## Requirements from Leaderboard tasks - -* follow waypoints on a route -* don't deviate from route by more than 30 meters -* act in accordance with traffic rules -* don't get blocked -* complete 10 routes (2 weather conditions) - ---- - -## Prioritized driving aspects - -There are different ways to prioritize the driving aspects mentioned in the document [08_use_cases](https://github.com/ll7/paf22/blob/482c1f5a201b52276d7b77cf402009bd99c93317/doc/03_research/08_use_cases.md). -The most important topics, in relation to this project, are the driving score and the safety aspect. -Also, it is appropriate to implement the basic features of an autonomous car first. The list is a mixture of the different approaches. Prioritizing from very important functionalities to less important features. - -`Very important:` - -* Recognize the street limitations -* Recognize pedestrians -* Follow the waypoints -* Recognize traffic lights -* Recognize obstacles -* Recognize cars in front of the agent (keep distance) -* Steering, accelerate, decelerate -* Street rules (no street signs available) -* Change lane (obstacles) - -`Important:` - -* Check Intersection -* Sense traffic (speed and trajectory) -* Predict traffic -* Emergency brake -* Sense length of ramp -* Recognize space (Turn into highway) -* Change lane (safe) -* Recognize emergency vehicle -* Recognize unexpected dynamic situations (opening door, bycicles,...) - -`Less important:` - -* Smooth driving (accelerate, decelerate, stop) -* Weather Condition -* Predict pedestrians - ---- - -## more Content - -### Sources - - diff --git a/doc/03_research/04_requirements/Readme.md b/doc/03_research/04_requirements/Readme.md deleted file mode 100644 index e45c90be..00000000 --- a/doc/03_research/04_requirements/Readme.md +++ /dev/null @@ -1,7 +0,0 @@ -# Requirements - -This folder contains all the results of our research on requirements: - -* [Leaderboard information](./02_informations_from_leaderboard.md) -* [Reqirements for agent](./03_requirements.md) -* [Use case scenarios](./04_use_cases.md) diff --git a/doc/03_research/Readme.md b/doc/03_research/Readme.md deleted file mode 100644 index f4948302..00000000 --- a/doc/03_research/Readme.md +++ /dev/null @@ -1,10 +0,0 @@ -# Research - -This folder contains every research we did before we started the project. - -The research is structured in the following folders: - -* [Acting](./01_acting/Readme.md) -* [Perception](./02_perception/Readme.md) -* [Planning](./03_planning/Readme.md) -* [Requirements](./04_requirements/Readme.md) diff --git a/doc/05_acting/Readme.md b/doc/05_acting/Readme.md deleted file mode 100644 index d84fdf21..00000000 --- a/doc/05_acting/Readme.md +++ /dev/null @@ -1,10 +0,0 @@ -# Documentation of acting component - -This folder contains the documentation of the acting component. - -1. [Architecture](./01_architecture_documentation.md) -2. [Overview of the Velocity Controller](./02_velocity_controller.md) -3. [Overview of the Steering Controllers](./03_steering_controllers.md) -4. [Overview of the Vehicle Controller Component](./04_vehicle_controller.md) -5. [How to test/tune acting components independedly](./05_acting_testing.md) -6. [Main frame publisher](./06_mainframe_publisher.md) diff --git a/doc/06_perception/Readme.md b/doc/06_perception/Readme.md deleted file mode 100644 index 56f169a4..00000000 --- a/doc/06_perception/Readme.md +++ /dev/null @@ -1,22 +0,0 @@ -# Documentation of perception component - -This folder contains further documentation of the perception components. - -1. [Vision Node](./06_vision_node.md) - - The Visison Node provides an adaptive interface that is able to perform object-detection and/or image-segmentation on multiple cameras at the same time. -2. [Position Heading Filter Debug Node](./07_position_heading_filter_debug_node.md) -3. [Kalman Filter](./08_kalman_filter.md) -4. [Position Heading Publisher Node](./09_position_heading_publisher_node.md) -5. [Distance to Objects](./10_distance_to_objects.md) -6. [Traffic Light Detection](./11_traffic_light_detection.md) -7. [Coordinate Transformation (helper functions)](./00_coordinate_transformation.md) -8. [Dataset Generator](./01_dataset_generator.md) -9. [Dataset Structure](./02_dataset_structure.md) -10. [Lidar Distance Utility](./03_lidar_distance_utility.md) - 1. not used since paf22 -11. [Efficient PS](./04_efficientps.md) - 1. not used scince paf22 and never successfully tested - -## Experiments - -- The overview of performance evaluations is located in the [experiments](./experiments/README.md) folder. diff --git a/doc/06_perception/experiments/object-detection-model_evaluation/globals.py b/doc/06_perception/experiments/object-detection-model_evaluation/globals.py deleted file mode 100644 index df8a9d5c..00000000 --- a/doc/06_perception/experiments/object-detection-model_evaluation/globals.py +++ /dev/null @@ -1,12 +0,0 @@ -IMAGE_BASE_FOLDER = '/home/maxi/paf23/code/output/12-dev/rgb/center' - -IMAGES_FOR_TEST = { - 'start': '1600.png', - 'intersection': '1619.png', - 'traffic_light': '1626.png', - 'traffic': '1660.png', - 'bicycle_far': '1663.png', - 'bicycle_close': '1668.png', - 'construction_sign_far': '2658.png', - 'construction_sign_close': '2769.png' -} diff --git a/doc/README.md b/doc/README.md new file mode 100644 index 00000000..7546ccb8 --- /dev/null +++ b/doc/README.md @@ -0,0 +1,50 @@ +# PAF Documentation + +This document provides an overview of the structure of the documentation. + +- [PAF Documentation](#paf-documentation) + - [`general`](#general) + - [`development`](#development) + - [`research`](#research) + - [`examples`](#examples) + - [`perception`](#perception) + - [`planning`](#planning) + - [`acting`](#acting) + - [`assets`](#assets) + - [`dev_talks`](#dev_talks) + +## `general` + +The [`general`](./general/) folder contains installation instructions for the project and an overview of the system architecture. + +## `development` + +The [`development`](./development/) folder contains guidelines for developing inside the project. It also provides templates for documentation files and python classes. Further information can be found in the [README](development/README.md). + +## `research` + +The [`research`](./research/) folder contains the findings of each group during the initial phase of the project. + +## `examples` + +To-do + +## `perception` + +The [`perception`](./perception/) folder contains documentation for the whole perception module and its individual components. + +## `planning` + +The [`planning`](./planning/) folder contains documentation for the whole planning module and its individual components. + +## `acting` + +The [`acting`](./acting/) folder contains documentation for the whole acting module and its individual components. + +## `assets` + +The [`assets`](./assets/) folder contains mainly images that are used inside the documentation. + +## `dev_talks` + +The [`dev_talks`](./dev_talks/) folder contains the protocols of each sprint review and roles that the students fill during the project. diff --git a/doc/acting/README.md b/doc/acting/README.md new file mode 100644 index 00000000..f55e66eb --- /dev/null +++ b/doc/acting/README.md @@ -0,0 +1,10 @@ +# Documentation of acting component + +This folder contains the documentation of the acting component. + +1. [Architecture](./architecture_documentation.md) +2. [Overview of the Velocity Controller](./velocity_controller.md) +3. [Overview of the Steering Controllers](./steering_controllers.md) +4. [Overview of the Vehicle Controller Component](./vehicle_controller.md) +5. [How to test/tune acting components independedly](./acting_testing.md) +6. [Main frame publisher](./mainframe_publisher.md) diff --git a/doc/05_acting/05_acting_testing.md b/doc/acting/acting_testing.md similarity index 96% rename from doc/05_acting/05_acting_testing.md rename to doc/acting/acting_testing.md index fafc00c3..a5a67791 100644 --- a/doc/05_acting/05_acting_testing.md +++ b/doc/acting/acting_testing.md @@ -2,24 +2,10 @@ **Summary:** This page shows ways to test and tune acting components and to verify that they work as intended. ---- - -## Author - -Alexander Hellmann - -## Date - -01.04.2024 - - - [How to test/tune acting components independedly](#how-to-testtune-acting-components-independedly) - - [Author](#author) - - [Date](#date) - [Acting\_Debug\_Node](#acting_debug_node) - [Setup for Testing with the Debug-Node](#setup-for-testing-with-the-debug-node) - [Operating the Debug-Node](#operating-the-debug-node) - ## Acting_Debug_Node diff --git a/doc/05_acting/01_architecture_documentation.md b/doc/acting/architecture_documentation.md similarity index 95% rename from doc/05_acting/01_architecture_documentation.md rename to doc/acting/architecture_documentation.md index 33031b0a..5c1aeb9d 100644 --- a/doc/05_acting/01_architecture_documentation.md +++ b/doc/acting/architecture_documentation.md @@ -2,18 +2,7 @@ **Summary**: This documentation shows the current Acting Architecture. -## Authors - -Alexander Hellmann - -## Date - -01.04.2024 - - - [Architecture](#architecture) - - [Authors](#authors) - - [Date](#date) - [Acting Architecture](#acting-architecture) - [Summary of Acting Components](#summary-of-acting-components) - [pure\_pursuit\_controller.py](#pure_pursuit_controllerpy) @@ -22,11 +11,10 @@ Alexander Hellmann - [vehicle\_controller.py](#vehicle_controllerpy) - [helper\_functions.py](#helper_functionspy) - [MainFramePublisher.py](#mainframepublisherpy) - ## Acting Architecture -![MISSING: Acting-ARCHITECTURE](../00_assets/acting/Architecture_Acting.png) +![MISSING: Acting-ARCHITECTURE](../assets/acting/Architecture_Acting.png) ## Summary of Acting Components diff --git a/doc/05_acting/06_main_frame_publisher.md b/doc/acting/main_frame_publisher.md similarity index 85% rename from doc/05_acting/06_main_frame_publisher.md rename to doc/acting/main_frame_publisher.md index 5999eda3..706e9c2e 100644 --- a/doc/05_acting/06_main_frame_publisher.md +++ b/doc/acting/main_frame_publisher.md @@ -2,24 +2,10 @@ **Summary:** This page informs about the main frame publisher ---- - -## Author - -Julian Graf - -## Date - -29.03.2023 - - - [Main frame publisher](#main-frame-publisher) - - [Author](#author) - - [Date](#date) - [Overview: Main frame publisher](#overview-main-frame-publisher) - [How to use](#how-to-use) - [Known issues](#known-issues) - ## Overview: Main frame publisher @@ -38,4 +24,4 @@ There are issues if the vehicle drives upwards or downwards. In this case the path will start to rise above the street (see picture) or start to move bellow the street. You can counteract this by changing the z offset of the path in rviz. -![main frame publisher bug](./../00_assets/acting/main_frame_publisher_bug.png) +![main frame publisher bug](./../assets/acting/main_frame_publisher_bug.png) diff --git a/doc/05_acting/03_steering_controllers.md b/doc/acting/steering_controllers.md similarity index 90% rename from doc/05_acting/03_steering_controllers.md rename to doc/acting/steering_controllers.md index 46101b9e..e7c0a474 100644 --- a/doc/05_acting/03_steering_controllers.md +++ b/doc/acting/steering_controllers.md @@ -2,24 +2,10 @@ **Summary:** This page provides an overview of the current status of both steering controllers, the PurePursuit and the Stanley Controller. ---- - -## Author - -Alexander Hellmann - -## Date - -01.04.2024 - - - [Overview of the Steering Controllers](#overview-of-the-steering-controllers) - - [Author](#author) - - [Date](#date) - [General Introduction to Steering Controllers](#general-introduction-to-steering-controllers) - [PurePursuit Controller](#purepursuit-controller) - [Stanley Controller](#stanley-controller) - ## General Introduction to Steering Controllers @@ -35,7 +21,7 @@ For more indepth information about the PurePursuit Controller, click [this link] At every moment it checks a point of the trajectory in front of the vehicle with a distance of **$d_{la}$** and determines a steering-angle so that the vehicle will aim straight to this point of the trajectory. -![MISSING: PurePursuit-ShowImage](../00_assets/acting/Steering_PurePursuit.png) +![MISSING: PurePursuit-ShowImage](../assets/acting/Steering_PurePursuit.png) This **look-ahead-distance $d_{la}$** is velocity-dependent, as at higher velocities, the controller should look further ahead onto the trajectory. @@ -46,7 +32,7 @@ $$ \delta = arctan({2 \cdot L_{vehicle} \cdot sin(\alpha) \over d_{la}})$$ To tune the PurePursuit Controller, you can tune the factor of this velocity-dependence **$k_{ld}$**. Also, for an unknown reason, we needed to add an amplification to the output-steering signal before publishing aswell **$k_{pub}$**, which highly optimized the steering performance in the dev-launch: -![MISSING: PurePursuit-Optimization_Image](../00_assets/acting/Steering_PurePursuit_Tuning.png) +![MISSING: PurePursuit-Optimization_Image](../assets/acting/Steering_PurePursuit_Tuning.png) **NOTE:** The **look-ahead-distance $d_{la}$** should be highly optimally tuned already for optimal sensor data and on the dev-launch! In the Leaderboard-Launch this sadly does not work the same, so it requires different tuning and needs to be optimized/fixed. @@ -56,7 +42,7 @@ In the Leaderboard-Launch this sadly does not work the same, so it requires diff The [Stanley Controller's](../../code/acting/src/acting/stanley.py) main features to determine a steering-output is the so-called **cross-track-error** (e_fa in Image) and the **trajectory-heading** (theta_e in Image). For more indepth information about the Stanley Controller, click [this link](https://medium.com/roboquest/understanding-geometric-path-tracking-algorithms-stanley-controller-25da17bcc219) and [this link](https://ai.stanford.edu/~gabeh/papers/hoffmann_stanley_control07.pdf). -![MISSING: Stanley-SHOW-IMAGE](../00_assets/acting/Steering_Stanley.png) +![MISSING: Stanley-SHOW-IMAGE](../assets/acting/Steering_Stanley.png) At every moment it checks the closest point of the trajectory to itself and determines a two steering-angles: @@ -69,7 +55,7 @@ $$ \delta = \theta_e - arctan({k_{ce} \cdot e_{fa} \over v})$$ To tune the Stanley Controller, you tune the factor **$k_{ce}$**, which amplifies (or diminishes) how strong the **cross-track-error**-calculated steering-angle will "flow" into the output steering-angle. -![MISSING: Stanley-Compared to PurePursuit](../00_assets/acting/Steering_Stanley_ComparedToPurePur.png) +![MISSING: Stanley-Compared to PurePursuit](../assets/acting/Steering_Stanley_ComparedToPurePur.png) As for the PurePursuit Controller, sadly the achieved good tuning in the Dev-Launch was by far too strong for the Leaderboard-Launch, which is why we needed to Hotfix the Steering in the last week to Tune Stanley alot "weaker". We do not exactly know, why the two launches are this different. (Dev-Launch and Leaderboard-Launch differentiate in synchronicity, Dev-Launch is synchronous, Leaderboard-Launch is asynchronous?) diff --git a/doc/05_acting/04_vehicle_controller.md b/doc/acting/vehicle_controller.md similarity index 92% rename from doc/05_acting/04_vehicle_controller.md rename to doc/acting/vehicle_controller.md index 1bb45558..62dfefe7 100644 --- a/doc/05_acting/04_vehicle_controller.md +++ b/doc/acting/vehicle_controller.md @@ -2,25 +2,11 @@ **Summary:** This page provides an overview of the current status of the Vehicle Controller Component. ---- - -## Authors - -Robert Fischer, Alexander Hellmann - -## Date - -01.04.2024 - - - [Overview of the Vehicle Controller Component](#overview-of-the-vehicle-controller-component) - - [Authors](#authors) - - [Date](#date) - [General Introduction to the Vehicle Controller Component](#general-introduction-to-the-vehicle-controller-component) - [Vehicle Controller Output](#vehicle-controller-output) - [Emergency Brake](#emergency-brake) - [Unstuck Routine](#unstuck-routine) - ## General Introduction to the Vehicle Controller Component @@ -64,7 +50,7 @@ This is done to prevent firing the emergency brake each time the main loop is re Comparison between normal braking and emergency braking: -![Braking Comparison](/doc/00_assets/acting/emergency_brake_stats_graph.png) +![Braking Comparison](/doc/assets/acting/emergency_brake_stats_graph.png) _Please be aware, that this bug abuse might not work in newer updates!_ @@ -72,7 +58,7 @@ _Please be aware, that this bug abuse might not work in newer updates!_ The Vehicle Controller also reads ```current_behavior```-Messages, published by Planning, currently reacting to the **unstuck-behavior**: -This is done to drive in a specific way whenever we get into a stuck situation and the [Unstuck Behavior](/doc/07_planning/Behavior_detailed.md) is persued. +This is done to drive in a specific way whenever we get into a stuck situation and the [Unstuck Behavior](/doc/planning/Behavior_detailed.md) is persued. Inside the Unstuck Behavior we want drive backwards without steering, which is why it is the only case, where we do not use any of our steering controllers. diff --git a/doc/05_acting/02_velocity_controller.md b/doc/acting/velocity_controller.md similarity index 87% rename from doc/05_acting/02_velocity_controller.md rename to doc/acting/velocity_controller.md index 645cae21..b4715997 100644 --- a/doc/05_acting/02_velocity_controller.md +++ b/doc/acting/velocity_controller.md @@ -2,23 +2,9 @@ **Summary:** This page provides an overview of the current status of the velocity_controller. ---- - -## Author - -Alexander Hellmann - -## Date - -01.04.2024 - - - [Overview of the Velocity Controller](#overview-of-the-velocity-controller) - - [Author](#author) - - [Date](#date) - [General Introduction to Velocity Controller](#general-introduction-to-velocity-controller) - [Current Implementation](#current-implementation) - ## General Introduction to Velocity Controller @@ -31,18 +17,18 @@ For more information about PID-Controllers and how they work, follow [this link] Currently, we use a tuned PID-Controller which was tuned for the speed of 14 m/s (around 50 km/h), as this is the most commonly driven velocity in this simulation: -![MISSING: PID-TUNING-IMAGE](../00_assets/acting/VelContr_PID_StepResponse.png) +![MISSING: PID-TUNING-IMAGE](../assets/acting/VelContr_PID_StepResponse.png) Be aware, that the CARLA-Vehicle shifts gears automatically, resulting in the bumps you see! As PID-Controllers are linear by nature, the velocity-system is therefore linearized around 50 km/h, meaning the further you deviate from 50 km/h the worse the controller's performance gets: -![MISSING: PID-LINEARIZATION-IMAGE](../00_assets/acting/VelContr_PID_differentVelocities.png) +![MISSING: PID-LINEARIZATION-IMAGE](../assets/acting/VelContr_PID_differentVelocities.png) As the Velocity Controller also has to handle braking, we currently use ```throttle```-optimized PID-Controller to calculate ```brake``` aswell (Since adding another Controller, like a P-Controller, did not work nearly as well!): -![MISSING: PID-BRAKING-IMAGE](../00_assets/acting/VelContr_PID_BrakingWithThrottlePID.png) +![MISSING: PID-BRAKING-IMAGE](../assets/acting/VelContr_PID_BrakingWithThrottlePID.png) -Currently, there is no general backwards-driving implemented here, as this was not needed (other than the [Unstuck Routine](/doc/07_planning/Behavior_detailed.md)). +Currently, there is no general backwards-driving implemented here, as this was not needed (other than the [Unstuck Routine](/doc/planning/Behavior_detailed.md)). Negative ```target_velocity``` signals are currently taken care off by braking until we stand still. The ONLY exception is a ```target_velocity``` of **-3!**! diff --git a/doc/00_assets/2_15_layover.png b/doc/assets/2_15_layover.png similarity index 100% rename from doc/00_assets/2_15_layover.png rename to doc/assets/2_15_layover.png diff --git a/doc/00_assets/2_layover.png b/doc/assets/2_layover.png similarity index 100% rename from doc/00_assets/2_layover.png rename to doc/assets/2_layover.png diff --git a/doc/00_assets/3_layover.png b/doc/assets/3_layover.png similarity index 100% rename from doc/00_assets/3_layover.png rename to doc/assets/3_layover.png diff --git a/doc/00_assets/3d_2d_formula.png b/doc/assets/3d_2d_formula.png similarity index 100% rename from doc/00_assets/3d_2d_formula.png rename to doc/assets/3d_2d_formula.png diff --git a/doc/00_assets/3d_2d_projection.png b/doc/assets/3d_2d_projection.png similarity index 100% rename from doc/00_assets/3d_2d_projection.png rename to doc/assets/3d_2d_projection.png diff --git a/doc/00_assets/4_layover.png b/doc/assets/4_layover.png similarity index 100% rename from doc/00_assets/4_layover.png rename to doc/assets/4_layover.png diff --git a/doc/00_assets/Back_Detection.png b/doc/assets/Back_Detection.png similarity index 100% rename from doc/00_assets/Back_Detection.png rename to doc/assets/Back_Detection.png diff --git a/doc/00_assets/Comment_PR.png b/doc/assets/Comment_PR.png similarity index 100% rename from doc/00_assets/Comment_PR.png rename to doc/assets/Comment_PR.png diff --git a/doc/00_assets/Comment_viewed.png b/doc/assets/Comment_viewed.png similarity index 100% rename from doc/00_assets/Comment_viewed.png rename to doc/assets/Comment_viewed.png diff --git a/doc/00_assets/Commit_suggestion.png b/doc/assets/Commit_suggestion.png similarity index 100% rename from doc/00_assets/Commit_suggestion.png rename to doc/assets/Commit_suggestion.png diff --git a/doc/00_assets/Driving_SM.png b/doc/assets/Driving_SM.png similarity index 100% rename from doc/00_assets/Driving_SM.png rename to doc/assets/Driving_SM.png diff --git a/doc/00_assets/Files_Changed.png b/doc/assets/Files_Changed.png similarity index 100% rename from doc/00_assets/Files_Changed.png rename to doc/assets/Files_Changed.png diff --git a/doc/00_assets/Front_Detection.png b/doc/assets/Front_Detection.png similarity index 100% rename from doc/00_assets/Front_Detection.png rename to doc/assets/Front_Detection.png diff --git a/doc/00_assets/Global_Plan.png b/doc/assets/Global_Plan.png similarity index 100% rename from doc/00_assets/Global_Plan.png rename to doc/assets/Global_Plan.png diff --git a/doc/00_assets/Intersection_SM.png b/doc/assets/Intersection_SM.png similarity index 100% rename from doc/00_assets/Intersection_SM.png rename to doc/assets/Intersection_SM.png diff --git a/doc/00_assets/Lane_Change_SM.png b/doc/assets/Lane_Change_SM.png similarity index 100% rename from doc/00_assets/Lane_Change_SM.png rename to doc/assets/Lane_Change_SM.png diff --git a/doc/00_assets/Lanelets.png b/doc/assets/Lanelets.png similarity index 100% rename from doc/00_assets/Lanelets.png rename to doc/assets/Lanelets.png diff --git a/doc/00_assets/Left_Detection.png b/doc/assets/Left_Detection.png similarity index 100% rename from doc/00_assets/Left_Detection.png rename to doc/assets/Left_Detection.png diff --git a/doc/00_assets/PR_overview.png b/doc/assets/PR_overview.png similarity index 100% rename from doc/00_assets/PR_overview.png rename to doc/assets/PR_overview.png diff --git a/doc/00_assets/Planning_Implementierung.png b/doc/assets/Planning_Implementierung.png similarity index 100% rename from doc/00_assets/Planning_Implementierung.png rename to doc/assets/Planning_Implementierung.png diff --git a/doc/00_assets/Pycharm_PR.png b/doc/assets/Pycharm_PR.png similarity index 100% rename from doc/00_assets/Pycharm_PR.png rename to doc/assets/Pycharm_PR.png diff --git a/doc/00_assets/Resolve_conversation.png b/doc/assets/Resolve_conversation.png similarity index 100% rename from doc/00_assets/Resolve_conversation.png rename to doc/assets/Resolve_conversation.png diff --git a/doc/00_assets/Review_changes.png b/doc/assets/Review_changes.png similarity index 100% rename from doc/00_assets/Review_changes.png rename to doc/assets/Review_changes.png diff --git a/doc/00_assets/Right_Detection.png b/doc/assets/Right_Detection.png similarity index 100% rename from doc/00_assets/Right_Detection.png rename to doc/assets/Right_Detection.png diff --git a/doc/00_assets/Right_lane.png b/doc/assets/Right_lane.png similarity index 100% rename from doc/00_assets/Right_lane.png rename to doc/assets/Right_lane.png diff --git a/doc/00_assets/Road0_cutout.png b/doc/assets/Road0_cutout.png similarity index 100% rename from doc/00_assets/Road0_cutout.png rename to doc/assets/Road0_cutout.png diff --git a/doc/00_assets/Stop_sign_OpenDrive.png b/doc/assets/Stop_sign_OpenDrive.png similarity index 100% rename from doc/00_assets/Stop_sign_OpenDrive.png rename to doc/assets/Stop_sign_OpenDrive.png diff --git a/doc/00_assets/Suggestion.png b/doc/assets/Suggestion.png similarity index 100% rename from doc/00_assets/Suggestion.png rename to doc/assets/Suggestion.png diff --git a/doc/00_assets/Super_SM.png b/doc/assets/Super_SM.png similarity index 100% rename from doc/00_assets/Super_SM.png rename to doc/assets/Super_SM.png diff --git a/doc/00_assets/TR01.png b/doc/assets/TR01.png similarity index 100% rename from doc/00_assets/TR01.png rename to doc/assets/TR01.png diff --git a/doc/00_assets/TR02.png b/doc/assets/TR02.png similarity index 100% rename from doc/00_assets/TR02.png rename to doc/assets/TR02.png diff --git a/doc/00_assets/TR03.png b/doc/assets/TR03.png similarity index 100% rename from doc/00_assets/TR03.png rename to doc/assets/TR03.png diff --git a/doc/00_assets/TR04.png b/doc/assets/TR04.png similarity index 100% rename from doc/00_assets/TR04.png rename to doc/assets/TR04.png diff --git a/doc/00_assets/TR05.png b/doc/assets/TR05.png similarity index 100% rename from doc/00_assets/TR05.png rename to doc/assets/TR05.png diff --git a/doc/00_assets/TR06.png b/doc/assets/TR06.png similarity index 100% rename from doc/00_assets/TR06.png rename to doc/assets/TR06.png diff --git a/doc/00_assets/TR07.png b/doc/assets/TR07.png similarity index 100% rename from doc/00_assets/TR07.png rename to doc/assets/TR07.png diff --git a/doc/00_assets/TR08.png b/doc/assets/TR08.png similarity index 100% rename from doc/00_assets/TR08.png rename to doc/assets/TR08.png diff --git a/doc/00_assets/TR09.png b/doc/assets/TR09.png similarity index 100% rename from doc/00_assets/TR09.png rename to doc/assets/TR09.png diff --git a/doc/00_assets/TR10.png b/doc/assets/TR10.png similarity index 100% rename from doc/00_assets/TR10.png rename to doc/assets/TR10.png diff --git a/doc/00_assets/TR11.png b/doc/assets/TR11.png similarity index 100% rename from doc/00_assets/TR11.png rename to doc/assets/TR11.png diff --git a/doc/00_assets/TR12.png b/doc/assets/TR12.png similarity index 100% rename from doc/00_assets/TR12.png rename to doc/assets/TR12.png diff --git a/doc/00_assets/TR14.png b/doc/assets/TR14.png similarity index 100% rename from doc/00_assets/TR14.png rename to doc/assets/TR14.png diff --git a/doc/00_assets/TR15.png b/doc/assets/TR15.png similarity index 100% rename from doc/00_assets/TR15.png rename to doc/assets/TR15.png diff --git a/doc/00_assets/TR16.png b/doc/assets/TR16.png similarity index 100% rename from doc/00_assets/TR16.png rename to doc/assets/TR16.png diff --git a/doc/00_assets/TR17.png b/doc/assets/TR17.png similarity index 100% rename from doc/00_assets/TR17.png rename to doc/assets/TR17.png diff --git a/doc/00_assets/TR18.png b/doc/assets/TR18.png similarity index 100% rename from doc/00_assets/TR18.png rename to doc/assets/TR18.png diff --git a/doc/00_assets/TR19.png b/doc/assets/TR19.png similarity index 100% rename from doc/00_assets/TR19.png rename to doc/assets/TR19.png diff --git a/doc/00_assets/TR20.png b/doc/assets/TR20.png similarity index 100% rename from doc/00_assets/TR20.png rename to doc/assets/TR20.png diff --git a/doc/00_assets/TR21.png b/doc/assets/TR21.png similarity index 100% rename from doc/00_assets/TR21.png rename to doc/assets/TR21.png diff --git a/doc/00_assets/TR22.png b/doc/assets/TR22.png similarity index 100% rename from doc/00_assets/TR22.png rename to doc/assets/TR22.png diff --git a/doc/00_assets/TR23.png b/doc/assets/TR23.png similarity index 100% rename from doc/00_assets/TR23.png rename to doc/assets/TR23.png diff --git a/doc/00_assets/Traffic_SM.png b/doc/assets/Traffic_SM.png similarity index 100% rename from doc/00_assets/Traffic_SM.png rename to doc/assets/Traffic_SM.png diff --git a/doc/00_assets/acting/Architecture_Acting.png b/doc/assets/acting/Architecture_Acting.png similarity index 100% rename from doc/00_assets/acting/Architecture_Acting.png rename to doc/assets/acting/Architecture_Acting.png diff --git a/doc/00_assets/acting/Steering_PurePursuit.png b/doc/assets/acting/Steering_PurePursuit.png similarity index 100% rename from doc/00_assets/acting/Steering_PurePursuit.png rename to doc/assets/acting/Steering_PurePursuit.png diff --git a/doc/00_assets/acting/Steering_PurePursuit_Tuning.png b/doc/assets/acting/Steering_PurePursuit_Tuning.png similarity index 100% rename from doc/00_assets/acting/Steering_PurePursuit_Tuning.png rename to doc/assets/acting/Steering_PurePursuit_Tuning.png diff --git a/doc/00_assets/acting/Steering_Stanley.png b/doc/assets/acting/Steering_Stanley.png similarity index 100% rename from doc/00_assets/acting/Steering_Stanley.png rename to doc/assets/acting/Steering_Stanley.png diff --git a/doc/00_assets/acting/Steering_Stanley_ComparedToPurePur.png b/doc/assets/acting/Steering_Stanley_ComparedToPurePur.png similarity index 100% rename from doc/00_assets/acting/Steering_Stanley_ComparedToPurePur.png rename to doc/assets/acting/Steering_Stanley_ComparedToPurePur.png diff --git a/doc/00_assets/acting/VelContr_PID_BrakingWithThrottlePID.png b/doc/assets/acting/VelContr_PID_BrakingWithThrottlePID.png similarity index 100% rename from doc/00_assets/acting/VelContr_PID_BrakingWithThrottlePID.png rename to doc/assets/acting/VelContr_PID_BrakingWithThrottlePID.png diff --git a/doc/00_assets/acting/VelContr_PID_StepResponse.png b/doc/assets/acting/VelContr_PID_StepResponse.png similarity index 100% rename from doc/00_assets/acting/VelContr_PID_StepResponse.png rename to doc/assets/acting/VelContr_PID_StepResponse.png diff --git a/doc/00_assets/acting/VelContr_PID_differentVelocities.png b/doc/assets/acting/VelContr_PID_differentVelocities.png similarity index 100% rename from doc/00_assets/acting/VelContr_PID_differentVelocities.png rename to doc/assets/acting/VelContr_PID_differentVelocities.png diff --git a/doc/00_assets/acting/emergency_brake_stats_graph.png b/doc/assets/acting/emergency_brake_stats_graph.png similarity index 100% rename from doc/00_assets/acting/emergency_brake_stats_graph.png rename to doc/assets/acting/emergency_brake_stats_graph.png diff --git a/doc/00_assets/acting/main_frame_publisher_bug.png b/doc/assets/acting/main_frame_publisher_bug.png similarity index 100% rename from doc/00_assets/acting/main_frame_publisher_bug.png rename to doc/assets/acting/main_frame_publisher_bug.png diff --git a/doc/00_assets/behaviour_tree.png b/doc/assets/behaviour_tree.png similarity index 100% rename from doc/00_assets/behaviour_tree.png rename to doc/assets/behaviour_tree.png diff --git a/doc/00_assets/berechnungsmodell.png b/doc/assets/berechnungsmodell.png similarity index 100% rename from doc/00_assets/berechnungsmodell.png rename to doc/assets/berechnungsmodell.png diff --git a/doc/00_assets/branch_overview.png b/doc/assets/branch_overview.png similarity index 100% rename from doc/00_assets/branch_overview.png rename to doc/assets/branch_overview.png diff --git a/doc/00_assets/bug_template.png b/doc/assets/bug_template.png similarity index 100% rename from doc/00_assets/bug_template.png rename to doc/assets/bug_template.png diff --git a/doc/00_assets/create_issue.png b/doc/assets/create_issue.png similarity index 100% rename from doc/00_assets/create_issue.png rename to doc/assets/create_issue.png diff --git a/doc/00_assets/distance_visualization.png b/doc/assets/distance_visualization.png similarity index 100% rename from doc/00_assets/distance_visualization.png rename to doc/assets/distance_visualization.png diff --git a/doc/00_assets/efficientps_structure.png b/doc/assets/efficientps_structure.png similarity index 100% rename from doc/00_assets/efficientps_structure.png rename to doc/assets/efficientps_structure.png diff --git a/doc/00_assets/fahrzeugapproximation.png b/doc/assets/fahrzeugapproximation.png similarity index 100% rename from doc/00_assets/fahrzeugapproximation.png rename to doc/assets/fahrzeugapproximation.png diff --git a/doc/00_assets/fahrzeugpositionsberechnung.png b/doc/assets/fahrzeugpositionsberechnung.png similarity index 100% rename from doc/00_assets/fahrzeugpositionsberechnung.png rename to doc/assets/fahrzeugpositionsberechnung.png diff --git a/doc/00_assets/fahrzeugwinkelberechnung.png b/doc/assets/fahrzeugwinkelberechnung.png similarity index 100% rename from doc/00_assets/fahrzeugwinkelberechnung.png rename to doc/assets/fahrzeugwinkelberechnung.png diff --git a/doc/00_assets/feature_template.png b/doc/assets/feature_template.png similarity index 100% rename from doc/00_assets/feature_template.png rename to doc/assets/feature_template.png diff --git a/doc/00_assets/filter_img/avg_10_w_0_500.png b/doc/assets/filter_img/avg_10_w_0_500.png similarity index 100% rename from doc/00_assets/filter_img/avg_10_w_0_500.png rename to doc/assets/filter_img/avg_10_w_0_500.png diff --git a/doc/00_assets/filter_img/avg_10_w_0_750.png b/doc/assets/filter_img/avg_10_w_0_750.png similarity index 100% rename from doc/00_assets/filter_img/avg_10_w_0_750.png rename to doc/assets/filter_img/avg_10_w_0_750.png diff --git a/doc/00_assets/filter_img/avg_10_w_1_000.png b/doc/assets/filter_img/avg_10_w_1_000.png similarity index 100% rename from doc/00_assets/filter_img/avg_10_w_1_000.png rename to doc/assets/filter_img/avg_10_w_1_000.png diff --git a/doc/00_assets/filter_img/avg_1_w_0_500.png b/doc/assets/filter_img/avg_1_w_0_500.png similarity index 100% rename from doc/00_assets/filter_img/avg_1_w_0_500.png rename to doc/assets/filter_img/avg_1_w_0_500.png diff --git a/doc/00_assets/filter_img/avg_1_w_0_750.png b/doc/assets/filter_img/avg_1_w_0_750.png similarity index 100% rename from doc/00_assets/filter_img/avg_1_w_0_750.png rename to doc/assets/filter_img/avg_1_w_0_750.png diff --git a/doc/00_assets/filter_img/avg_1_w_1_000.png b/doc/assets/filter_img/avg_1_w_1_000.png similarity index 100% rename from doc/00_assets/filter_img/avg_1_w_1_000.png rename to doc/assets/filter_img/avg_1_w_1_000.png diff --git a/doc/00_assets/filter_img/avg_20_w_0_750.png b/doc/assets/filter_img/avg_20_w_0_750.png similarity index 100% rename from doc/00_assets/filter_img/avg_20_w_0_750.png rename to doc/assets/filter_img/avg_20_w_0_750.png diff --git a/doc/00_assets/filter_img/avg_7_w_0_500.png b/doc/assets/filter_img/avg_7_w_0_500.png similarity index 100% rename from doc/00_assets/filter_img/avg_7_w_0_500.png rename to doc/assets/filter_img/avg_7_w_0_500.png diff --git a/doc/00_assets/filter_img/avg_7_w_0_750.png b/doc/assets/filter_img/avg_7_w_0_750.png similarity index 100% rename from doc/00_assets/filter_img/avg_7_w_0_750.png rename to doc/assets/filter_img/avg_7_w_0_750.png diff --git a/doc/00_assets/filter_img/avg_7_w_1_000.png b/doc/assets/filter_img/avg_7_w_1_000.png similarity index 100% rename from doc/00_assets/filter_img/avg_7_w_1_000.png rename to doc/assets/filter_img/avg_7_w_1_000.png diff --git a/doc/00_assets/filter_img/rolling_avg_1.png b/doc/assets/filter_img/rolling_avg_1.png similarity index 100% rename from doc/00_assets/filter_img/rolling_avg_1.png rename to doc/assets/filter_img/rolling_avg_1.png diff --git a/doc/00_assets/filter_img/rolling_avg_10.png b/doc/assets/filter_img/rolling_avg_10.png similarity index 100% rename from doc/00_assets/filter_img/rolling_avg_10.png rename to doc/assets/filter_img/rolling_avg_10.png diff --git a/doc/00_assets/filter_img/rolling_avg_20.png b/doc/assets/filter_img/rolling_avg_20.png similarity index 100% rename from doc/00_assets/filter_img/rolling_avg_20.png rename to doc/assets/filter_img/rolling_avg_20.png diff --git a/doc/00_assets/filter_img/rolling_avg_5.png b/doc/assets/filter_img/rolling_avg_5.png similarity index 100% rename from doc/00_assets/filter_img/rolling_avg_5.png rename to doc/assets/filter_img/rolling_avg_5.png diff --git a/doc/00_assets/gdrive-paf.png b/doc/assets/gdrive-paf.png similarity index 100% rename from doc/00_assets/gdrive-paf.png rename to doc/assets/gdrive-paf.png diff --git a/doc/00_assets/gdrive-permissions.png b/doc/assets/gdrive-permissions.png similarity index 100% rename from doc/00_assets/gdrive-permissions.png rename to doc/assets/gdrive-permissions.png diff --git a/doc/00_assets/gewinnerteam19-architektur.png b/doc/assets/gewinnerteam19-architektur.png similarity index 100% rename from doc/00_assets/gewinnerteam19-architektur.png rename to doc/assets/gewinnerteam19-architektur.png diff --git a/doc/00_assets/git-flow.svg b/doc/assets/git-flow.svg similarity index 100% rename from doc/00_assets/git-flow.svg rename to doc/assets/git-flow.svg diff --git a/doc/00_assets/github-action-md.png b/doc/assets/github-action-md.png similarity index 100% rename from doc/00_assets/github-action-md.png rename to doc/assets/github-action-md.png diff --git a/doc/00_assets/github-action-py.png b/doc/assets/github-action-py.png similarity index 100% rename from doc/00_assets/github-action-py.png rename to doc/assets/github-action-py.png diff --git a/doc/00_assets/github_create_a_branch.png b/doc/assets/github_create_a_branch.png similarity index 100% rename from doc/00_assets/github_create_a_branch.png rename to doc/assets/github_create_a_branch.png diff --git a/doc/00_assets/global_trajectory.png b/doc/assets/global_trajectory.png similarity index 100% rename from doc/00_assets/global_trajectory.png rename to doc/assets/global_trajectory.png diff --git a/doc/00_assets/gnss_ohne_rolling_average.png b/doc/assets/gnss_ohne_rolling_average.png similarity index 100% rename from doc/00_assets/gnss_ohne_rolling_average.png rename to doc/assets/gnss_ohne_rolling_average.png diff --git a/doc/00_assets/implementation_plan_perception.jpg b/doc/assets/implementation_plan_perception.jpg similarity index 100% rename from doc/00_assets/implementation_plan_perception.jpg rename to doc/assets/implementation_plan_perception.jpg diff --git a/doc/00_assets/intersection.png b/doc/assets/intersection.png similarity index 100% rename from doc/00_assets/intersection.png rename to doc/assets/intersection.png diff --git a/doc/00_assets/intersection_2.png b/doc/assets/intersection_2.png similarity index 100% rename from doc/00_assets/intersection_2.png rename to doc/assets/intersection_2.png diff --git a/doc/00_assets/issue_wizard.png b/doc/assets/issue_wizard.png similarity index 100% rename from doc/00_assets/issue_wizard.png rename to doc/assets/issue_wizard.png diff --git a/doc/00_assets/junction.png b/doc/assets/junction.png similarity index 100% rename from doc/00_assets/junction.png rename to doc/assets/junction.png diff --git a/doc/00_assets/kollisionsberechnung.png b/doc/assets/kollisionsberechnung.png similarity index 100% rename from doc/00_assets/kollisionsberechnung.png rename to doc/assets/kollisionsberechnung.png diff --git a/doc/00_assets/kreuzungszonen.png b/doc/assets/kreuzungszonen.png similarity index 100% rename from doc/00_assets/kreuzungszonen.png rename to doc/assets/kreuzungszonen.png diff --git a/doc/00_assets/lane_midpoint.png b/doc/assets/lane_midpoint.png similarity index 100% rename from doc/00_assets/lane_midpoint.png rename to doc/assets/lane_midpoint.png diff --git a/doc/00_assets/leaderboard-1.png b/doc/assets/leaderboard-1.png similarity index 100% rename from doc/00_assets/leaderboard-1.png rename to doc/assets/leaderboard-1.png diff --git a/doc/00_assets/leaderboard-2.png b/doc/assets/leaderboard-2.png similarity index 100% rename from doc/00_assets/leaderboard-2.png rename to doc/assets/leaderboard-2.png diff --git a/doc/00_assets/legend_bt.png b/doc/assets/legend_bt.png similarity index 100% rename from doc/00_assets/legend_bt.png rename to doc/assets/legend_bt.png diff --git a/doc/00_assets/lidar_filter.png b/doc/assets/lidar_filter.png similarity index 100% rename from doc/00_assets/lidar_filter.png rename to doc/assets/lidar_filter.png diff --git a/doc/00_assets/lidarhinderniserkennung.png b/doc/assets/lidarhinderniserkennung.png similarity index 100% rename from doc/00_assets/lidarhinderniserkennung.png rename to doc/assets/lidarhinderniserkennung.png diff --git a/doc/00_assets/local_trajectory.png b/doc/assets/local_trajectory.png similarity index 100% rename from doc/00_assets/local_trajectory.png rename to doc/assets/local_trajectory.png diff --git a/doc/00_assets/multi_lane.png b/doc/assets/multi_lane.png similarity index 100% rename from doc/00_assets/multi_lane.png rename to doc/assets/multi_lane.png diff --git a/doc/00_assets/nvcc_version.png b/doc/assets/nvcc_version.png similarity index 100% rename from doc/00_assets/nvcc_version.png rename to doc/assets/nvcc_version.png diff --git a/doc/00_assets/occupancygrid.png b/doc/assets/occupancygrid.png similarity index 100% rename from doc/00_assets/occupancygrid.png rename to doc/assets/occupancygrid.png diff --git a/doc/00_assets/optimierungsvisualisierung.png b/doc/assets/optimierungsvisualisierung.png similarity index 100% rename from doc/00_assets/optimierungsvisualisierung.png rename to doc/assets/optimierungsvisualisierung.png diff --git a/doc/00_assets/overtaking_overview.png b/doc/assets/overtaking_overview.png similarity index 100% rename from doc/00_assets/overtaking_overview.png rename to doc/assets/overtaking_overview.png diff --git a/doc/00_assets/overview.jpg b/doc/assets/overview.jpg similarity index 100% rename from doc/00_assets/overview.jpg rename to doc/assets/overview.jpg diff --git a/doc/00_assets/perception/adding_new_position_methods.png b/doc/assets/perception/adding_new_position_methods.png similarity index 100% rename from doc/00_assets/perception/adding_new_position_methods.png rename to doc/assets/perception/adding_new_position_methods.png diff --git a/doc/00_assets/perception/data_26_MAE_Boxed.png b/doc/assets/perception/data_26_MAE_Boxed.png similarity index 100% rename from doc/00_assets/perception/data_26_MAE_Boxed.png rename to doc/assets/perception/data_26_MAE_Boxed.png diff --git a/doc/00_assets/perception/data_26_MSE_Boxed.png b/doc/assets/perception/data_26_MSE_Boxed.png similarity index 100% rename from doc/00_assets/perception/data_26_MSE_Boxed.png rename to doc/assets/perception/data_26_MSE_Boxed.png diff --git a/doc/00_assets/perception/kalman_installation_guide.png b/doc/assets/perception/kalman_installation_guide.png similarity index 100% rename from doc/00_assets/perception/kalman_installation_guide.png rename to doc/assets/perception/kalman_installation_guide.png diff --git a/doc/00_assets/perception/modular_subscriber_example.png b/doc/assets/perception/modular_subscriber_example.png similarity index 100% rename from doc/00_assets/perception/modular_subscriber_example.png rename to doc/assets/perception/modular_subscriber_example.png diff --git a/doc/00_assets/perception/new_heading_pub_example.png b/doc/assets/perception/new_heading_pub_example.png similarity index 100% rename from doc/00_assets/perception/new_heading_pub_example.png rename to doc/assets/perception/new_heading_pub_example.png diff --git a/doc/00_assets/perception/non_linear_kalman_example.png b/doc/assets/perception/non_linear_kalman_example.png similarity index 100% rename from doc/00_assets/perception/non_linear_kalman_example.png rename to doc/assets/perception/non_linear_kalman_example.png diff --git a/doc/00_assets/perception/quat_to_angle.png b/doc/assets/perception/quat_to_angle.png similarity index 100% rename from doc/00_assets/perception/quat_to_angle.png rename to doc/assets/perception/quat_to_angle.png diff --git a/doc/00_assets/perception/sensor_debug_change.png b/doc/assets/perception/sensor_debug_change.png similarity index 100% rename from doc/00_assets/perception/sensor_debug_change.png rename to doc/assets/perception/sensor_debug_change.png diff --git a/doc/00_assets/perception/sensor_debug_data_saving.png b/doc/assets/perception/sensor_debug_data_saving.png similarity index 100% rename from doc/00_assets/perception/sensor_debug_data_saving.png rename to doc/assets/perception/sensor_debug_data_saving.png diff --git a/doc/00_assets/perception/sensor_debug_viz_config.png b/doc/assets/perception/sensor_debug_viz_config.png similarity index 100% rename from doc/00_assets/perception/sensor_debug_viz_config.png rename to doc/assets/perception/sensor_debug_viz_config.png diff --git "a/doc/00_assets/planning \303\274bersicht.png" "b/doc/assets/planning \303\274bersicht.png" similarity index 100% rename from "doc/00_assets/planning \303\274bersicht.png" rename to "doc/assets/planning \303\274bersicht.png" diff --git a/doc/00_assets/planning/BT_paper.png b/doc/assets/planning/BT_paper.png similarity index 100% rename from doc/00_assets/planning/BT_paper.png rename to doc/assets/planning/BT_paper.png diff --git a/doc/00_assets/planning/BehaviorTree_medium.png b/doc/assets/planning/BehaviorTree_medium.png similarity index 100% rename from doc/00_assets/planning/BehaviorTree_medium.png rename to doc/assets/planning/BehaviorTree_medium.png diff --git a/doc/00_assets/planning/Globalplan.png b/doc/assets/planning/Globalplan.png similarity index 100% rename from doc/00_assets/planning/Globalplan.png rename to doc/assets/planning/Globalplan.png diff --git a/doc/00_assets/planning/Overtake_car_trajectory.png b/doc/assets/planning/Overtake_car_trajectory.png similarity index 100% rename from doc/00_assets/planning/Overtake_car_trajectory.png rename to doc/assets/planning/Overtake_car_trajectory.png diff --git a/doc/00_assets/planning/Planning.png b/doc/assets/planning/Planning.png similarity index 100% rename from doc/00_assets/planning/Planning.png rename to doc/assets/planning/Planning.png diff --git a/doc/00_assets/planning/Planning_architecture.png b/doc/assets/planning/Planning_architecture.png similarity index 100% rename from doc/00_assets/planning/Planning_architecture.png rename to doc/assets/planning/Planning_architecture.png diff --git a/doc/00_assets/planning/Planning_paf21.png b/doc/assets/planning/Planning_paf21.png similarity index 100% rename from doc/00_assets/planning/Planning_paf21.png rename to doc/assets/planning/Planning_paf21.png diff --git a/doc/00_assets/planning/collision_check.png b/doc/assets/planning/collision_check.png similarity index 100% rename from doc/00_assets/planning/collision_check.png rename to doc/assets/planning/collision_check.png diff --git a/doc/00_assets/planning/intersection_scenario.png b/doc/assets/planning/intersection_scenario.png similarity index 100% rename from doc/00_assets/planning/intersection_scenario.png rename to doc/assets/planning/intersection_scenario.png diff --git a/doc/00_assets/planning/localplan.png b/doc/assets/planning/localplan.png similarity index 100% rename from doc/00_assets/planning/localplan.png rename to doc/assets/planning/localplan.png diff --git a/doc/00_assets/planning/overtaking_scenario.png b/doc/assets/planning/overtaking_scenario.png similarity index 100% rename from doc/00_assets/planning/overtaking_scenario.png rename to doc/assets/planning/overtaking_scenario.png diff --git a/doc/00_assets/planning/overview.jpg b/doc/assets/planning/overview.jpg similarity index 100% rename from doc/00_assets/planning/overview.jpg rename to doc/assets/planning/overview.jpg diff --git a/doc/00_assets/planning/overview.png b/doc/assets/planning/overview.png similarity index 100% rename from doc/00_assets/planning/overview.png rename to doc/assets/planning/overview.png diff --git a/doc/00_assets/planning/overview_paper1.png b/doc/assets/planning/overview_paper1.png similarity index 100% rename from doc/00_assets/planning/overview_paper1.png rename to doc/assets/planning/overview_paper1.png diff --git a/doc/00_assets/planning/plot_full_trajectory_1_degree.png b/doc/assets/planning/plot_full_trajectory_1_degree.png similarity index 100% rename from doc/00_assets/planning/plot_full_trajectory_1_degree.png rename to doc/assets/planning/plot_full_trajectory_1_degree.png diff --git a/doc/00_assets/planning/prios.png b/doc/assets/planning/prios.png similarity index 100% rename from doc/00_assets/planning/prios.png rename to doc/assets/planning/prios.png diff --git a/doc/00_assets/planning/simple_final_tree.png b/doc/assets/planning/simple_final_tree.png similarity index 100% rename from doc/00_assets/planning/simple_final_tree.png rename to doc/assets/planning/simple_final_tree.png diff --git a/doc/00_assets/planning/test_frenet_results.png b/doc/assets/planning/test_frenet_results.png similarity index 100% rename from doc/00_assets/planning/test_frenet_results.png rename to doc/assets/planning/test_frenet_results.png diff --git a/doc/00_assets/planning/three_scenarios.png b/doc/assets/planning/three_scenarios.png similarity index 100% rename from doc/00_assets/planning/three_scenarios.png rename to doc/assets/planning/three_scenarios.png diff --git a/doc/00_assets/planning/vector_calculation.png b/doc/assets/planning/vector_calculation.png similarity index 100% rename from doc/00_assets/planning/vector_calculation.png rename to doc/assets/planning/vector_calculation.png diff --git a/doc/00_assets/planning/vision_objects_filter_cc.png b/doc/assets/planning/vision_objects_filter_cc.png similarity index 100% rename from doc/00_assets/planning/vision_objects_filter_cc.png rename to doc/assets/planning/vision_objects_filter_cc.png diff --git a/doc/00_assets/positionsvektor.png b/doc/assets/positionsvektor.png similarity index 100% rename from doc/00_assets/positionsvektor.png rename to doc/assets/positionsvektor.png diff --git a/doc/00_assets/preplanning_start.png b/doc/assets/preplanning_start.png similarity index 100% rename from doc/00_assets/preplanning_start.png rename to doc/assets/preplanning_start.png diff --git a/doc/00_assets/pytree_PAF_status.drawio.png b/doc/assets/pytree_PAF_status.drawio.png similarity index 100% rename from doc/00_assets/pytree_PAF_status.drawio.png rename to doc/assets/pytree_PAF_status.drawio.png diff --git a/doc/00_assets/reference.png b/doc/assets/reference.png similarity index 100% rename from doc/00_assets/reference.png rename to doc/assets/reference.png diff --git a/doc/00_assets/reference_xodr.png b/doc/assets/reference_xodr.png similarity index 100% rename from doc/00_assets/reference_xodr.png rename to doc/assets/reference_xodr.png diff --git a/doc/00_assets/research_assets/bicyclegeometry.png b/doc/assets/research_assets/bicyclegeometry.png similarity index 100% rename from doc/00_assets/research_assets/bicyclegeometry.png rename to doc/assets/research_assets/bicyclegeometry.png diff --git a/doc/00_assets/research_assets/chattering.gif b/doc/assets/research_assets/chattering.gif similarity index 100% rename from doc/00_assets/research_assets/chattering.gif rename to doc/assets/research_assets/chattering.gif diff --git a/doc/00_assets/research_assets/curve_detection_paf21_1.png b/doc/assets/research_assets/curve_detection_paf21_1.png similarity index 100% rename from doc/00_assets/research_assets/curve_detection_paf21_1.png rename to doc/assets/research_assets/curve_detection_paf21_1.png diff --git a/doc/00_assets/research_assets/danglingcarrotgeometry.png b/doc/assets/research_assets/danglingcarrotgeometry.png similarity index 100% rename from doc/00_assets/research_assets/danglingcarrotgeometry.png rename to doc/assets/research_assets/danglingcarrotgeometry.png diff --git a/doc/00_assets/research_assets/messages_paf21_2.png b/doc/assets/research_assets/messages_paf21_2.png similarity index 100% rename from doc/00_assets/research_assets/messages_paf21_2.png rename to doc/assets/research_assets/messages_paf21_2.png diff --git a/doc/00_assets/research_assets/mpc.png b/doc/assets/research_assets/mpc.png similarity index 100% rename from doc/00_assets/research_assets/mpc.png rename to doc/assets/research_assets/mpc.png diff --git a/doc/00_assets/research_assets/pure_pursuit.png b/doc/assets/research_assets/pure_pursuit.png similarity index 100% rename from doc/00_assets/research_assets/pure_pursuit.png rename to doc/assets/research_assets/pure_pursuit.png diff --git a/doc/00_assets/research_assets/standard_routine_paf21_2.png b/doc/assets/research_assets/standard_routine_paf21_2.png similarity index 100% rename from doc/00_assets/research_assets/standard_routine_paf21_2.png rename to doc/assets/research_assets/standard_routine_paf21_2.png diff --git a/doc/00_assets/research_assets/stanley_controller.png b/doc/assets/research_assets/stanley_controller.png similarity index 100% rename from doc/00_assets/research_assets/stanley_controller.png rename to doc/assets/research_assets/stanley_controller.png diff --git a/doc/00_assets/research_assets/stanley_paf21_1.png b/doc/assets/research_assets/stanley_paf21_1.png similarity index 100% rename from doc/00_assets/research_assets/stanley_paf21_1.png rename to doc/assets/research_assets/stanley_paf21_1.png diff --git a/doc/00_assets/research_assets/stanleyerror.png b/doc/assets/research_assets/stanleyerror.png similarity index 100% rename from doc/00_assets/research_assets/stanleyerror.png rename to doc/assets/research_assets/stanleyerror.png diff --git a/doc/00_assets/road_option.png b/doc/assets/road_option.png similarity index 100% rename from doc/00_assets/road_option.png rename to doc/assets/road_option.png diff --git a/doc/00_assets/road_options_concept.png b/doc/assets/road_options_concept.png similarity index 100% rename from doc/00_assets/road_options_concept.png rename to doc/assets/road_options_concept.png diff --git a/doc/00_assets/roads_vis.png b/doc/assets/roads_vis.png similarity index 100% rename from doc/00_assets/roads_vis.png rename to doc/assets/roads_vis.png diff --git a/doc/00_assets/segmentation.png b/doc/assets/segmentation.png similarity index 100% rename from doc/00_assets/segmentation.png rename to doc/assets/segmentation.png diff --git a/doc/00_assets/sensoranordnung.png b/doc/assets/sensoranordnung.png similarity index 100% rename from doc/00_assets/sensoranordnung.png rename to doc/assets/sensoranordnung.png diff --git a/doc/00_assets/statemachines.png b/doc/assets/statemachines.png similarity index 100% rename from doc/00_assets/statemachines.png rename to doc/assets/statemachines.png diff --git a/doc/00_assets/top-level.png b/doc/assets/top-level.png similarity index 100% rename from doc/00_assets/top-level.png rename to doc/assets/top-level.png diff --git a/doc/00_assets/trajectory_roads.png b/doc/assets/trajectory_roads.png similarity index 100% rename from doc/00_assets/trajectory_roads.png rename to doc/assets/trajectory_roads.png diff --git a/doc/00_assets/trajekorienfehlermin.png b/doc/assets/trajekorienfehlermin.png similarity index 100% rename from doc/00_assets/trajekorienfehlermin.png rename to doc/assets/trajekorienfehlermin.png diff --git a/doc/00_assets/trajektorienberechnung.png b/doc/assets/trajektorienberechnung.png similarity index 100% rename from doc/00_assets/trajektorienberechnung.png rename to doc/assets/trajektorienberechnung.png diff --git a/doc/assets/vulkan_device_not_available.png b/doc/assets/vulkan_device_not_available.png new file mode 100644 index 00000000..f0aa64d2 Binary files /dev/null and b/doc/assets/vulkan_device_not_available.png differ diff --git a/doc/08_dev_talks/paf23/sprint_1.md b/doc/dev_talks/paf23/sprint_1.md similarity index 100% rename from doc/08_dev_talks/paf23/sprint_1.md rename to doc/dev_talks/paf23/sprint_1.md diff --git a/doc/08_dev_talks/paf23/sprint_2.md b/doc/dev_talks/paf23/sprint_2.md similarity index 100% rename from doc/08_dev_talks/paf23/sprint_2.md rename to doc/dev_talks/paf23/sprint_2.md diff --git a/doc/08_dev_talks/paf23/sprint_3.md b/doc/dev_talks/paf23/sprint_3.md similarity index 100% rename from doc/08_dev_talks/paf23/sprint_3.md rename to doc/dev_talks/paf23/sprint_3.md diff --git a/doc/08_dev_talks/paf23/sprint_4.md b/doc/dev_talks/paf23/sprint_4.md similarity index 100% rename from doc/08_dev_talks/paf23/sprint_4.md rename to doc/dev_talks/paf23/sprint_4.md diff --git a/doc/08_dev_talks/paf23/sprint_5.md b/doc/dev_talks/paf23/sprint_5.md similarity index 100% rename from doc/08_dev_talks/paf23/sprint_5.md rename to doc/dev_talks/paf23/sprint_5.md diff --git a/doc/08_dev_talks/paf23/sprint_6.md b/doc/dev_talks/paf23/sprint_6.md similarity index 100% rename from doc/08_dev_talks/paf23/sprint_6.md rename to doc/dev_talks/paf23/sprint_6.md diff --git a/doc/08_dev_talks/paf23/sprint_7.md b/doc/dev_talks/paf23/sprint_7.md similarity index 100% rename from doc/08_dev_talks/paf23/sprint_7.md rename to doc/dev_talks/paf23/sprint_7.md diff --git a/doc/08_dev_talks/paf24/mermaid_paf24.md b/doc/dev_talks/paf24/mermaid_paf24.md similarity index 100% rename from doc/08_dev_talks/paf24/mermaid_paf24.md rename to doc/dev_talks/paf24/mermaid_paf24.md diff --git a/doc/08_dev_talks/paf24/student_roles24.md b/doc/dev_talks/paf24/student_roles24.md similarity index 86% rename from doc/08_dev_talks/paf24/student_roles24.md rename to doc/dev_talks/paf24/student_roles24.md index 552005c2..2822486c 100644 --- a/doc/08_dev_talks/paf24/student_roles24.md +++ b/doc/dev_talks/paf24/student_roles24.md @@ -2,13 +2,15 @@ ## Role overview -2-3 Students per Role +2-4 Students per Role - **Systems Engineer** - Oversee the entire development process, ensuring smooth interaction between different subsystems (perception, planning, control, decision-making, etc.). - Define system-level architecture, ensuring each module (e.g., sensors, planning, control) interacts through well-defined interfaces. - Manage requirements (e.g. in issues) and ensure each team's outputs align with the overall system goals, including performance, reliability, and safety standards. - Serve as the point of contact for inter-team communication, ensuring alignment between roles such as Perception Engineers, Control Engineers, and Decision-Making Engineers. + - Take responsibility for identifying and managing dependencies between subsystems and methods, especially in relation to the timeline. Ensure that the sequence of development is logical and efficient, avoiding resource investment in features that rely on unfinished or unavailable modules. + - For example, avoid focusing efforts on decision-making algorithms that depend on perception data (e.g., stop lines) if it’s clear that the sensors or detection mechanisms won't be ready until later stages of the project. - Develop and enforce a systems integration strategy that covers continuous testing, validation, and verification of the autonomous driving stack. - Ensure proper data flow between modules using middleware (e.g., ROS). - Define and monitor key performance indicators (KPIs) for each subsystem, ensuring they collectively meet reliability, stability, and safety goals. @@ -25,14 +27,6 @@ - Collaborate with perception, planning, and control engineers to ensure the decision-making module aligns with the data and actions generated by other subsystems. - Simulate and validate decision-making in various complex driving scenarios within CARLA, such as navigating congested traffic or adverse weather conditions. - Ensure decision-making algorithms are interpretable and explainable to enhance debugging and safety validation. -- **Machine Learning Engineer** - - Implement machine learning techniques (e.g., deep learning, reinforcement learning) to improve various subsystems in the autonomous driving stack. - - Train neural networks for perception tasks (e.g., image segmentation, object detection, classification) using both simulated and real-world datasets. - - Develop and optimize behavior cloning, imitation learning, or other algorithms to enable the vehicle to learn from human driving examples. - - Integrate machine learning models into the perception or decision-making pipeline, ensuring smooth interaction with other system components. - - Collaborate with Perception Engineers to fine-tune sensor fusion models using AI techniques for improved environmental understanding. - - Analyze model performance and iteratively improve accuracy, efficiency, and real-time processing capability. - - Monitor and manage the data pipeline for model training, ensuring data quality, labeling accuracy, and sufficient coverage of edge cases. - **Perception Engineer** - Develop and improve sensor models (e.g., camera, LiDAR, radar) within the simulation, ensuring realistic sensor behavior and noise characteristics. - Implement state-of-the-art object detection, tracking, and sensor fusion algorithms to accurately interpret environmental data. @@ -57,7 +51,7 @@ - Ensure path planning algorithms balance safety, efficiency, and passenger comfort while maintaining vehicle controllability. - **Control Systems Engineer** - Work on the low-level control of the vehicle, including steering, throttle, braking, and handling. - - Implement advanced control algorithms (e.g., PID, MPC) to ensure the vehicle follows planned paths with stability and precision. + - Implement advanced control algorithms (e.g. MPC) to ensure the vehicle follows planned paths with stability and precision. - Tune control parameters to ensure smooth and reliable vehicle behavior under dynamic environmental conditions. - Collaborate with Path Planning Engineers to translate high-level paths into precise control actions. - Ensure the control system reacts dynamically to changes in the environment (e.g., obstacles, traffic conditions). @@ -82,19 +76,16 @@ graph TD SE[Systems Engineer] --> DME[Decision-Making Engineer] SE --> PE[Perception Engineer] - SE --> MLE[Machine Learning Engineer] SE --> LME[Localization and Mapping Engineer] SE --> PPE[Path Planning Engineer] SE --> CSE[Control Systems Engineer] - SE --> TVE[Testing and Validation Engineer] + SE --> TVE[Testing and Validation Eng.] SE --> IE[Infrastructure Engineer] DME --> PE DME --> PPE DME --> CSE - DME --> MLE - - PE <--> MLE + PE --> LME PE --> PPE @@ -108,14 +99,12 @@ graph TD IE --> SE IE --> TVE - IE --> MLE LME --> DME subgraph Module Teams DME PE - MLE LME PPE CSE diff --git a/doc/development/README.md b/doc/development/README.md new file mode 100644 index 00000000..75a99b4c --- /dev/null +++ b/doc/development/README.md @@ -0,0 +1,35 @@ +# Development guidelines + +If you contribute to this project please read the following guidelines first: + +1. [Documentation Requirements](./documentation_requirements.md) +2. [Linting](./linting.md) +3. [Coding style](./coding_style.md) +4. [Git Style](./git_workflow.md) +5. [Reviewing](./review_guideline.md) +6. [Project management](./project_management.md) +7. Github actions + 1. [linting action](./linter_action.md) + 2. [build action](./build_action.md) +8. [Install python packages](./installing_python_packages.md) +9. [Discord Webhook Documentation](./discord_webhook.md) + +## Templates + +Templates for documentation and code are provided in [`doc/development/templates`](./templates). + +### [`template_class.py`](./templates/template_class.py) + +Use this class if you don't have much experience with python. If you just want to copy an empty class use `template_class_no_comments.py`. + +### [`template_class_no_comments.py`](./templates/template_class_no_comments.py) + +If you just want to copy an empty class use this class. + +### [`template_wiki_page.md`](./templates/template_wiki_page.md) + +This template functions a template for who to build knowledge articles for everyone to understand. The basic structure should be kept for all articles. This template is empty and can be used straight forward. + +## Discord Webhook + +[Discord Webhook Documentation](./discord_webhook.md) diff --git a/doc/02_development/10_build_action.md b/doc/development/build_action.md similarity index 82% rename from doc/02_development/10_build_action.md rename to doc/development/build_action.md index c72b6488..77f71011 100644 --- a/doc/02_development/10_build_action.md +++ b/doc/development/build_action.md @@ -7,23 +7,7 @@ - create an executable image of our work - evaluate our Agent with the leaderboard ---- - -## Authors - -Tim Dreier, Korbinian Stein - -## Date - -2.12.2022 - -## Table of contents - - - - [GitHub actions](#github-actions) - - [Authors](#authors) - - [Date](#date) - [Table of contents](#table-of-contents) - [General](#general) - [The Dockerfile (`build/docker/build/Dockerfile`)](#the-dockerfile-builddockerbuilddockerfile) @@ -32,7 +16,6 @@ Tim Dreier, Korbinian Stein - [2. Set up Docker Buildx (`docker/setup-buildx-action@v2`)](#2-set-up-docker-buildx-dockersetup-buildx-actionv2) - [3. Log in to the Container registry (`docker/login-action@v2`)](#3-log-in-to-the-container-registry-dockerlogin-actionv2) - [4. Bump version and push tag (`mathieudutour/github-tag-action`)](#4-bump-version-and-push-tag-mathieudutourgithub-tag-action) - - [Example](#example) - [5. Get commit hash](#5-get-commit-hash) - [6. Build and push Docker image](#6-build-and-push-docker-image) - [The drive job](#the-drive-job) @@ -43,16 +26,14 @@ Tim Dreier, Korbinian Stein - [5. Comment result in pull request `actions/github-script@v6`](#5-comment-result-in-pull-request-actionsgithub-scriptv6) - [Simulation results](#simulation-results) - - ## General The workflow defined in [`.github/workflows/build.yml`](../../.github/workflows/build.yml) creates an executable image which can later be submitted to the [CARLA leaderboard](https://leaderboard.carla.org) and pushes it to [GitHub Packages](ghcr.io). -The image can then be pulled with `docker pull ghcr.io/ll7/paf22:latest` to get the latest version -or `docker pull ghcr.io/ll7/paf22:` to get a specific version. +The image can then be pulled with `docker pull ghcr.io/una-auxme/paf:latest` to get the latest version +or `docker pull ghcr.io/una-auxme/paf:` to get a specific version. If action is triggered by a pull request the created image is then used to execute a test run in the leaderboard, using the devtest routes. The results of this simulation are then added as a comment to the pull request. @@ -85,18 +66,6 @@ Example taken from [here](https://docs.github.com/en/actions/publishing-packages ### 4. Bump version and push tag ([`mathieudutour/github-tag-action`](https://github.com/mathieudutour/github-tag-action)) If the current commit is on the `main` branch, this action bumps the version and pushes a new tag to the repo. -Creates a new tag with a [semantic version](https://semver.org/) number for the release. -The version number is determinated by the name of the commits in the release. - -This is possible since [conventional commits](https://www.conventionalcommits.org/) are enforced by comlipy as -described [here](./02_linting.md). - -#### Example - -| Commit message | Release type | Previous version number | New version number | -|--------------------------------------------------------|---------------|-------------------------|--------------------| -| fix(#39): build failing due to incorrect configuration | Patch Release | 0.0.1 | 0.0.2 | -| feat(#39): Add automatic build process | Minor Release | 0.0.1 | 0.1.0 | Major releases can be done manually (e.g. `git tag v1.0.0`). @@ -122,10 +91,10 @@ Same step as in the [build job](#1-checkout-repository--actionscheckoutv3-) ### 2. Run agent with docker-compose -Runs the agent with the [`build/docker-compose.test.yml`](../../build/docker-compose.test.yml) that only contains the +Runs the agent with the [`build/docker-compose.cicd.yaml`](../../build/docker-compose.cicd.yaml) that only contains the bare minimum components for test execution: -- Carla Simulator (running in headless mode) +- Carla Simulator - roscore - Agent container, run through the Carla [`leaderboard_evaluator`](https://github.com/carla-simulator/leaderboard/blob/leaderboard-2.0/leaderboard/leaderboard_evaluator.py). diff --git a/doc/02_development/04_coding_style.md b/doc/development/coding_style.md similarity index 99% rename from doc/02_development/04_coding_style.md rename to doc/development/coding_style.md index 0455af0c..544c397f 100644 --- a/doc/02_development/04_coding_style.md +++ b/doc/development/coding_style.md @@ -4,32 +4,7 @@ **Summary:** This page contains the coding rules we want to follow as a team to improve readability and reviewing of our code.This document is for reference only and should be consolidated in case of uncertainty of following the style guidelines. Based on PEP 8 () ---- - -## Author - -Josef Kircher - -## Date - -04.11.2022 - -## Prerequisite - -VSCode Extensions: - -- autoDostring - Python Docstring Generator by Nils Werner - -To get the ReST format like in Pycharm: - -- Go to Extension setting and change it under `Auto Doctring:Docstring Format` to `sphinx-notypes` - ---- - - [Coding style guidelines](#coding-style-guidelines) - - [Author](#author) - - [Date](#date) - - [Prerequisite](#prerequisite) - [Code lay-out](#code-lay-out) - [Indentation](#indentation) - [Tabs or Spaces?](#tabs-or-spaces) @@ -69,8 +44,6 @@ To get the ReST format like in Pycharm: - [Footnotes](#footnotes) - [Copyright](#copyright) - [Source](#source) - ---- ## Code lay-out diff --git a/doc/02_development/12_discord_webhook.md b/doc/development/discord_webhook.md similarity index 65% rename from doc/02_development/12_discord_webhook.md rename to doc/development/discord_webhook.md index f9b427f5..08de569c 100644 --- a/doc/02_development/12_discord_webhook.md +++ b/doc/development/discord_webhook.md @@ -1,9 +1,11 @@ # Discord Webhook -Author: Lennart Luttkus, 15.11.2023 +**Summary**: This page explains the webhook that posts updates of the repository to Discord. + +- [Discord Webhook](#discord-webhook) The discord bot has access to the `#gitupdates` text channel on our discord server. It is an Integration as a Webhook. -Settings for this webhook can be found in the repository settings . +Settings for this webhook can be found in the repository settings . The Webhook post updates from the repository in the `#gitupdates` channel. Helpful tutorial can be found here: diff --git a/doc/02_development/14_distributed_simulation.md b/doc/development/distributed_simulation.md similarity index 60% rename from doc/02_development/14_distributed_simulation.md rename to doc/development/distributed_simulation.md index 75a981d7..f09d1ecb 100644 --- a/doc/02_development/14_distributed_simulation.md +++ b/doc/development/distributed_simulation.md @@ -1,15 +1,20 @@ # Distributed Simulation -If you have not enough compute resources, start the `carla-simulator-server` on a remote machine and execute the agent on your local machine. -As far as we know, you need more than **10 GB of VRAM** to run the server and the agent on the same machine. - -## Author +**Summary:** This page documents the distributed execution of the Carla simulator and the agent. -Julian Trommer and Lennart Luttkus +- [Distributed Simulation](#distributed-simulation) + - [General](#general) + - [Remote Machine Setup](#remote-machine-setup) + - [Local Machine Setup](#local-machine-setup) + - [Ensure similarity between normal docker-compose and distributed docker-compose files](#ensure-similarity-between-normal-docker-compose-and-distributed-docker-compose-files) + - [Set the `` of the carla simulator in docker-compose distributed files](#set-the-ip-address-of-the-carla-simulator-in-docker-compose-distributed-files) + - [Start the agent on your local machine](#start-the-agent-on-your-local-machine) + - [How do you know that you do not have enough compute resources?](#how-do-you-know-that-you-do-not-have-enough-compute-resources) -## Date +## General -2024-06-28 +If you have not enough compute resources, start the `carla-simulator` on a remote machine and execute the agent on your local machine. +As far as we know, you need more than **10 GB of VRAM** to run the server and the agent on the same machine. ## Remote Machine Setup @@ -45,15 +50,12 @@ Typically, the ip address is the first one in the list. Replace the ip-address in the following files: -- `docker-compose.distributed.yml` -- `docker-compose.dev.distributed.yml` +- `build/docker-compose.devroute-distributed.yaml` +- `build/docker-compose.leaderboard-distributed.yaml` ### Start the agent on your local machine -```bash -b5 run_distributed -b5 run_dev_distributed -``` +Navigate to the files mentioned above in the VS Code Explorer and select `Compose Up` after right-clicking one of the files. ## How do you know that you do not have enough compute resources? diff --git a/doc/development/documentation_requirements.md b/doc/development/documentation_requirements.md new file mode 100644 index 00000000..163db979 --- /dev/null +++ b/doc/development/documentation_requirements.md @@ -0,0 +1,68 @@ +# Documentation Requirements + +**Summary:** This document provides the guidelines for the documentation. + +- [Documentation Requirements](#documentation-requirements) + - [Readability and Maintainability](#readability-and-maintainability) + - [Code Structure](#code-structure) + - [Efficiency and Performance](#efficiency-and-performance) + - [Error Handling](#error-handling) + - [Testing](#testing) + - [Security](#security) + - [Documentation](#documentation) + - [Version Control](#version-control) + - [Scalability](#scalability) + - [Consistency with Coding Standards](#consistency-with-coding-standards) + +## Readability and Maintainability + +- **Consistent Formatting:** Code should follow a consistent and readable formatting style. Tools like linters or formatters can help enforce a consistent code style. + - [linting](./linting.md) + - [coding_style](./coding_style.md) +- **Meaningful Names:** Variable and function names should be descriptive and convey the purpose of the code. +- **Comments:** Clear and concise comments should be used where necessary to explain complex logic or provide context. + +## Code Structure + +- **Modularity:** Code should be organized into modular components or functions, promoting reusability and maintainability. +- **Appropriate Use of Functions/Methods:** Functions should have a clear purpose and adhere to the single responsibility principle. +- **Hierarchy and Nesting:** Avoid overly nested structures; use appropriate levels of indentation to enhance readability. + +## Efficiency and Performance + +- **Optimized Algorithms:** Code should use efficient algorithms and data structures to achieve good performance. +- **Avoidance of Code Smells:** Detect and eliminate code smells such as duplicated code, unnecessary complexity, or anti-patterns. + +## Error Handling + +- **Effective Error Messages:** Error messages should be clear and provide useful information for debugging. +- **Graceful Error Handling:** Code should handle errors gracefully, avoiding crashes and providing appropriate feedback. + +## Testing + +- **Comprehensive Test Coverage:** Code should be accompanied by a suite of tests that cover different scenarios, ensuring reliability and maintainability. +- **Test Readability:** Tests should be clear and easy to understand, serving as documentation for the codebase. + +## Security + +- **Input Validation:** Code should validate and sanitize inputs. + +## Documentation + +- **Code Comments:** In addition to in-code comments, consider external documentation for the overall project structure, APIs, and configurations. +- **README Files:** Include a well-written README file that provides an overview of the project, installation instructions, and usage examples. + +## Version Control + +- **Commit Messages:** Use descriptive and meaningful commit messages to track changes effectively. + - [commit](./commit.md) +- **Branching Strategy:** Follow a consistent and well-defined branching strategy to manage code changes. + +## Scalability + +- **Avoid Hardcoding:** Parameterize values that might change, making it easier to scale the application. +- **Optimized Resource Usage:** Ensure efficient utilization of resources to support scalability. + +## Consistency with Coding Standards + +- **Adherence to Coding Guidelines:** Follow established coding standards and best practices for the programming language or framework used. diff --git a/doc/02_development/11_dvc.md b/doc/development/dvc.md similarity index 94% rename from doc/02_development/11_dvc.md rename to doc/development/dvc.md index 61436128..abaa2ac3 100644 --- a/doc/02_development/11_dvc.md +++ b/doc/development/dvc.md @@ -4,22 +4,7 @@ **Summary:** This page describes what DVC is and how/where to use it. ---- - -## Author - -Tim Dreier - -## Date - -8.12.2022 - -## Table of contents - - [Data Version Control (DVC)](#data-version-control-dvc) - - [Author](#author) - - [Date](#date) - - [Table of contents](#table-of-contents) - [General](#general) - [Installation](#installation) - [Storage](#storage) @@ -37,7 +22,6 @@ Tim Dreier - [Commit an experiment](#commit-an-experiment) - [Dvclive](#dvclive) - [Example](#example) - ## General @@ -67,11 +51,11 @@ An administrator has to add your Google Account by doing the following. 1. Go to `https://drive.google.com` and login with our user 2. Click the folder `paf22`: -![paf22 folder](../00_assets/gdrive-paf.png) +![paf22 folder](../assets/gdrive-paf.png) 3. click on `Manage permissions` on the right side 4. Add the user as `Collaborator` -![paf22 folder](../00_assets/gdrive-permissions.png) +![paf22 folder](../assets/gdrive-permissions.png) ## Using DVC @@ -162,7 +146,7 @@ navigate among them and commit only the ones that we need to Git." [(Source)](ht Detailed documentation with a [good example](https://github.com/iterative/example-dvc-experiments) can be found [here](https://dvc.org/doc/start/experiment-management/experiments). -A working experiment in this project can be found [here](../../code/perception/src/traffic_light_detection/Readme.md). +A working experiment in this project can be found [here](../../code/perception/src/traffic_light_detection/README.md). #### Setup a new experiment @@ -241,15 +225,15 @@ Storing a model file can be done the same way. > The commands below are not meant to execute, since the example is already added in git. > It should give a brief overview about how DVC works. -> However, the process is adaptable for any file or folder if you replace `doc/04_examples/dvc_example/dataset` with your path. +> However, the process is adaptable for any file or folder if you replace `doc/examples/dvc_example/dataset` with your path. -1. Add the folder `doc/04_examples/dvc_example/dataset` to DVC +1. Add the folder `doc/examples/dvc_example/dataset` to DVC ```shell - dvc add doc/04_examples/dvc_example/dataset + dvc add doc/examples/dvc_example/dataset ``` - > ❗️ if you already added the directory to git you have to remove it by running `git rm -r --cached 'doc/04_examples/dvc_example/dataset'` + > ❗️ if you already added the directory to git you have to remove it by running `git rm -r --cached 'doc/examples/dvc_example/dataset'` 2. Commit your changes in git diff --git a/doc/02_development/05_git_workflow.md b/doc/development/git_workflow.md similarity index 78% rename from doc/02_development/05_git_workflow.md rename to doc/development/git_workflow.md index 7e721190..dcd9f0be 100644 --- a/doc/02_development/05_git_workflow.md +++ b/doc/development/git_workflow.md @@ -4,40 +4,25 @@ **Summary:** This page gives an overview over different types of git workflows to choose from. ---- - -## Author - -Josef Kircher - -## Date - -07.11.2022 - -## Prerequisite - ---- - - [Git Style](#git-style) - - [Author](#author) - - [Date](#date) - - [Prerequisite](#prerequisite) - [Git workflow](#git-workflow) - [Git Feature Branch](#git-feature-branch) - [Branch strategy](#branch-strategy) - [Git style](#git-style-1) - [Branch naming](#branch-naming) - [For example](#for-example) - - [Commit messages](#commit-messages) + - [Branch naming workflow](#branch-naming-workflow) + - [Branch Creation Settings](#branch-creation-settings) + - [Creating a Branch in the Web Interface](#creating-a-branch-in-the-web-interface) + - [Creating a Branch in VSCode](#creating-a-branch-in-vscode) - [Git commands cheat sheet](#git-commands-cheat-sheet) - [Sources](#sources) - ## Git workflow ### Git Feature Branch -![Git Feature](../00_assets/git-flow.svg) +![Git Feature](../assets/git-flow.svg) #### Branch strategy @@ -53,7 +38,7 @@ Two types of branches: ### Branch naming --- -Feature branch: issue number-description-of-issue (separator: '-') generated by Github automatically +Feature branch: issue number-description-of-issue (separator: '-') generated by Github automatically #### For example @@ -74,9 +59,9 @@ The `.vscode/settings.json` file in this repository contains settings that autom #### Creating a Branch in the Web Interface -To create a branch in the web interface, follow these steps: +To create a branch in the web interface, navigate to the corresponding issue and select the `Create Branch` option: -![Create Branch](../00_assets/github_create_a_branch.png) +![Create Branch](../assets/github_create_a_branch.png) #### Creating a Branch in VSCode @@ -86,12 +71,6 @@ In Visual Studio Code, use the "GitHub.vscode-pull-request-github" extension. 2. These queries allow you to access different issues. 3. Click the button "->" to create a new branch from the selected issue, check out the branch, and assign the issue to yourself. -### Commit messages - ---- - -- proceed to [Commit Messages](./03_commit.md) - ### Git commands cheat sheet --- diff --git a/doc/02_development/installing_cuda.md b/doc/development/installing_cuda.md similarity index 76% rename from doc/02_development/installing_cuda.md rename to doc/development/installing_cuda.md index 0daa0002..622c5fad 100644 --- a/doc/02_development/installing_cuda.md +++ b/doc/development/installing_cuda.md @@ -4,20 +4,14 @@ **Summary:** This page gives a short overview how to install cuda-toolkit on your computer. ---- - -## Author - -Marco Riedenauer - -## Date - -10.01.2023 +- [Install cuda-toolkit](#install-cuda-toolkit) + - [First install](#first-install) + - [Common Problems](#common-problems) + - [Wrong version of cuda-toolkit installed](#wrong-version-of-cuda-toolkit-installed) ## First install For execution of the program, cuda-toolkit v11.7 has to be installed on both, your computer and the docker container. -Cuda-toolkit should already be installed on the docker container by executing ```b5 install``` in your build folder: For installing cuda-toolkit on your computer, execute step-by-step the following commands in your command line: @@ -38,7 +32,7 @@ export LD_LIBRARY_PATH="/usr/local/cuda-x.y/lib64:$LD_LIBRARY_PATH" The path may be different depending on the system. You can get the path by executing ```which nvcc``` in the console. You can find your installed version of cuda-toolkit by executing ```nvcc-version```. The output should look like this: -![Implementation](../00_assets/nvcc_version.png) +![Implementation](../assets/nvcc_version.png) `release x.y` in the fourth column represents the version of the installed cuda-toolkit. @@ -51,8 +45,3 @@ the installer outputs that already another version of cuda-toolkit is installed, you have to uninstall the old version first. This can be done by executing the file `cuda-uninstaller` in the installation folder, usually `/usr/local/cuda-x.y/bin`. - -### Executing b5 install/update leads to an error of incompatible nvcc and drivers - -I had this problem after reinstalling cuda-toolkit on my computer. The best workaround I found is to uninstall all -NVIDIA drivers and cuda-toolkit and reinstall both of them. diff --git a/doc/02_development/10_installing_python_packages.md b/doc/development/installing_python_packages.md similarity index 56% rename from doc/02_development/10_installing_python_packages.md rename to doc/development/installing_python_packages.md index 445cfe9c..fc908d9a 100644 --- a/doc/02_development/10_installing_python_packages.md +++ b/doc/development/installing_python_packages.md @@ -4,15 +4,8 @@ **Summary:** This page gives a short overview how to add python packages to the project. ---- - -## Author - -Tim Dreier - -## Date - -7.12.2022 +- [Install python packages](#install-python-packages) + - [Adding packages with pip](#adding-packages-with-pip) ## Adding packages with pip @@ -28,15 +21,3 @@ An example how this file could look like is given below: torch==1.13.0 torchvision==0.1.9 ``` - -To install the added packages run `b5 install` afterwards. - -## Common Problems - -Sometimes, PyCharm does not recognize installed packages on the docker container. -This leads to the problem that the program cannot be started in PyCharm via the run button, but only via command line. - -A workaround for this problem is: - -1. Run ```docker compose build``` in the console in the build folder. -2. Click on the python interpreter in the lower right corner and reselect it. diff --git a/doc/02_development/09_linter_action.md b/doc/development/linter_action.md similarity index 85% rename from doc/02_development/09_linter_action.md rename to doc/development/linter_action.md index d041ba00..3cc5da67 100644 --- a/doc/02_development/09_linter_action.md +++ b/doc/development/linter_action.md @@ -4,28 +4,12 @@ **Summary:** This page explains the GitHub lint action we use to unsure Code quality. ---- - -## Author - -Tim Dreier - -## Date - -25.11.2022 - -## Table of contents - - [Github actions](#github-actions) - - [Author](#author) - - [Date](#date) - - [Table of contents](#table-of-contents) - [General](#general) - [Pull requests](#pull-requests) - [🚨 Common Problems](#-common-problems) - [1. Error in the markdown linter](#1-error-in-the-markdown-linter) - [2. Error in the python linter](#2-error-in-the-python-linter) - ## General @@ -39,9 +23,9 @@ This is done by limiting the execution of the action by the following line: on: pull_request ``` -The actions uses the same linters described in the section [Linting](./02_linting.md). +The actions uses the same linters described in the section [Linting](./linting.md). -Event though the linters are already executed during commit, +Event though the linters are already active during development, the execution on pull request ensures that nobody skips the linter during commit. ## Pull requests @@ -59,7 +43,7 @@ To enforce this behaviour, we set the action as requirement as described in the > > [(Source)](https://stackoverflow.com/questions/60776412/github-actions-is-there-a-way-to-make-it-mandatory-for-pull-request-to-merge) -More information about creating and merging pull requests can be found [here](./08_project_management.md). +More information about creating and merging pull requests can be found [here](./project_management.md). ## 🚨 Common Problems @@ -68,14 +52,14 @@ More information about creating and merging pull requests can be found [here](./ If there are errors in any file which need to be fixed, the output of the action will look similar to this: -![markdown lint error](../00_assets/github-action-md.png) +![markdown lint error](../assets/github-action-md.png) ### 2. Error in the python linter If there are errors in any python file, the output of the action will look similar to this: -![python lint error](../00_assets/github-action-py.png) +![python lint error](../assets/github-action-py.png) This step even runs if the markdown linter has already failed. This way, all errors of different steps are directly visible diff --git a/doc/02_development/02_linting.md b/doc/development/linting.md similarity index 70% rename from doc/02_development/02_linting.md rename to doc/development/linting.md index ab42b32f..16d887e8 100644 --- a/doc/02_development/02_linting.md +++ b/doc/development/linting.md @@ -2,7 +2,12 @@ (Kept from previous group [paf22]) -To ensure unified standards in the project, the following linters are applied during commit. +**Summary:** To ensure unified standards in the project, the following linters are applied during commit. + +- [Linting](#linting) + - [🐍 Python conventions](#-python-conventions) + - [💬 Markdown Linter](#-markdown-linter) + - [🚨 Common Problems](#-common-problems) ## 🐍 Python conventions @@ -18,8 +23,6 @@ To enforce unified standards in all python files, we use [Flake8](https://pypi.o To enforce unified standards in all markdown files, we use [markdownlint-cli](https://github.com/igorshubovych/markdownlint-cli). More details on it can be found in the according documentation. -The markdown linter can fix some errors on its own by executing `b5 markdown:fix`. - ## 🚨 Common Problems Currently, we are not aware about any Problems. diff --git a/doc/02_development/08_project_management.md b/doc/development/project_management.md similarity index 82% rename from doc/02_development/08_project_management.md rename to doc/development/project_management.md index c82c2035..6ac08813 100644 --- a/doc/02_development/08_project_management.md +++ b/doc/development/project_management.md @@ -5,25 +5,7 @@ **Summary:** We use a [Github Project](https://github.com/users/ll7/projects/2) for project management. Any bugs or features requests are managed in Github. ---- - -## Author - -- Tim Dreier -- Josef Kircher - -## Date - -23.11.2022 - -## Prerequisite - ---- - - [Project management](#project-management) - - [Author](#author) - - [Date](#date) - - [Prerequisite](#prerequisite) - [Create bug or feature requests](#create-bug-or-feature-requests) - [🐞 Bug](#-bug) - [Example for "Bug"](#example-for-bug) @@ -34,19 +16,16 @@ Any bugs or features requests are managed in Github. - [Create a Pull Request](#create-a-pull-request) - [Merging a Pull Request](#merging-a-pull-request) - [Deadlines for pull requests and reviews](#deadlines-for-pull-requests-and-reviews) - - ---- ## Create bug or feature requests Bugs or features can be added [here](https://github.com/ll7/paf22/issues/new/choose) or via the [issue overview](https://github.com/ll7/paf22/issues). -![create issue](../00_assets/create_issue.png) +![create issue](../assets/create_issue.png) By clicking "New issue" in the overview or using the direct link above a wizard guides you to the creation of an issue: -![issue wizard](../00_assets/issue_wizard.png) +![issue wizard](../assets/issue_wizard.png) The possibilities are described in the following sections. @@ -60,7 +39,7 @@ If something is not expected to work, but you want to have it, please refer to t The documentation says that the vehicle should detect about 90% of the traffic lights. However, for you it ignores almost all traffic lights. -![bug template](../00_assets/bug_template.png) +![bug template](../assets/bug_template.png) ### 💡 Feature @@ -71,7 +50,7 @@ Use this template if you want a new Feature which is not implemented yet. Currently, the vehicle can't make u-turns. Implementing the ability to perform u-turns would be a new feature. -![feature template](../00_assets/feature_template.png) +![feature template](../assets/feature_template.png) ### 🚗 Bug in CARLA Simulator @@ -84,17 +63,11 @@ CARLA simulator crashes on startup on your machine. ## Create a Pull Request To create a pull request, go to the [branches overview](https://github.com/ll7/paf22/branches) and select ``New Pull Request`` for the branch you want to create a PR for. -![img.png](../00_assets/branch_overview.png) - -Alternatively you can create a PR directly from PyCharm using the ``Pull Request`` tab on the sidebar. - -![img.png](../00_assets/Pycharm_PR.png) - -For completing the pull request, fill out the template that opens up automatically. +![img.png](../assets/branch_overview.png) Merge the pull request after the review process is complete and all the feedback from the reviewer has been worked in. -For more information about the review process, see [Review process](./07_review_guideline.md). +For more information about the review process, see [Review process](./review_guideline.md). ## Merging a Pull Request diff --git a/doc/02_development/07_review_guideline.md b/doc/development/review_guideline.md similarity index 87% rename from doc/02_development/07_review_guideline.md rename to doc/development/review_guideline.md index e42c4a17..005a8650 100644 --- a/doc/02_development/07_review_guideline.md +++ b/doc/development/review_guideline.md @@ -4,52 +4,33 @@ **Summary:** This page gives an overview over the steps that should be taken during a review and how to give a helpful and constructive review ---- - -## Author - -Josef Kircher - -## Date - -17.11.2022 - -## Prerequisite - ---- - - [Review Guidelines](#review-guidelines) - - [Author](#author) - - [Date](#date) - - [Prerequisite](#prerequisite) - [How to review](#how-to-review) - [How to comment on a pull request](#how-to-comment-on-a-pull-request) + - [CodeRabbit](#coderabbit) - [Incorporating feedback](#incorporating-feedback) - [Responding to comments](#responding-to-comments) - [Applying suggested changes](#applying-suggested-changes) - [Re-requesting a review](#re-requesting-a-review) - [Resolving conversations](#resolving-conversations) - [Sources](#sources) - - ---- ## How to review -1. Select der PR you want to review on GitHub -![img.png](../00_assets/PR_overview.png) +1. Select the PR you want to review on GitHub +![img.png](../assets/PR_overview.png) 2. Go to Files Changed -![img.png](../00_assets/Files_Changed.png) +![img.png](../assets/Files_Changed.png) 3. Hover over the line where you want to add a comment and click on the blue `+` at the beginning of the line to add a comment -![img.png](../00_assets/Comment_PR.png) +![img.png](../assets/Comment_PR.png) 4. If you want to comment on multiple lines click and drag over these lines 5. In the comment field type your comment. How to write a good comment is handled in the next section. 6. You can also add a suggestion by using ``Ctrl+G`` or the small paper icon in the header line of the comment -![img.png](../00_assets/Suggestion.png) +![img.png](../assets/Suggestion.png) 7. If you finished with the file you can check ``Viewed`` in the top right corner and the file collapses -![img.png](../00_assets/Comment_viewed.png) +![img.png](../assets/Comment_viewed.png) 8. To finish your review click ``Review Changes`` -![img.png](../00_assets/Review_changes.png) +![img.png](../assets/Review_changes.png) 9. Type a comment summarizing your review 10. Select the type of review you like to leave: 11. Comment - General feedback without approval @@ -71,6 +52,10 @@ Josef Kircher - Be aware of negative bias with online communication. (If content is neutral, we assume the tone is negative.) Can you use positive language as opposed to neutral? - Use emoji to clarify tone. Compare “✨ ✨ Looks good 👍 ✨ ✨” to “Looks good.” +## CodeRabbit + +The repository also comes with CodeRabbit integration. This tool generates automatic reviews for a pull request. Although the proposed changes do not have to be incorporated, they can point to a better solution for parts of the implementation. + ## Incorporating feedback ### Responding to comments @@ -90,8 +75,8 @@ If the reviewer not only left comments but also made specific suggestions on cod 2. Navigate to the first suggested change 3. If you want to commit that change in a single commit, click ``Commit suggestion`` 4. If you want to put more changes together to a single commit, click ``Add suggestion to batch`` -![img.png](../00_assets/Commit_suggestion.png) -5. In the commit message field, type a short and meaningful commit message according to the [commit rules](./03_commit.md) +![img.png](../assets/Commit_suggestion.png) +5. In the commit message field, type a short and meaningful commit message according to the [commit rules](./commit.md) 6. Click ``Commit changes`` ### Re-requesting a review @@ -102,7 +87,7 @@ If you made substantial changes to your pull request and want to a fresh review If a comment of a review was resolved by either, a new commit or a discussion between the reviewer and the team that created the pull request, the conversation can be marked as resolved by clicking ``Resolve conversation`` in the ``Conversation`` or ``Files Changed`` tab of the pull request on GitHub. If a new commit took place it is encouraged to comment the commit SHA to have a connection between comment and resolving commit -![img.png](../00_assets/Resolve_conversation.png) +![img.png](../assets/Resolve_conversation.png) --- diff --git a/doc/02_development/templates/template_class.py b/doc/development/templates/template_class.py similarity index 67% rename from doc/02_development/templates/template_class.py rename to doc/development/templates/template_class.py index 9db00607..76edcb37 100644 --- a/doc/02_development/templates/template_class.py +++ b/doc/development/templates/template_class.py @@ -16,18 +16,20 @@ # imports are always on the top of a file # imports in seperate lines -import os -import sys +# import os +# import sys -# Use from x import y where x is the package prefix and y is the module name with no prefix. -from matplotlib import pyplot +# Use from x import y where x is the package prefix and +# y is the module name with no prefix. +# from matplotlib import pyplot # Use import y as z only when z is a standard abbreviation (e.g., np for numpy). -import numpy as np +# import numpy as np # two blank lines between top level functions and class definition + ############################# # 3. Class-Defintion # ############################# @@ -44,7 +46,8 @@ def __init__(self): self.x = 0.0 # one leading underscore for non-public instance and method names self._name = "Max" - # use a trailing underscore to avoid collision of attribute names with reserved keywords + # use a trailing underscore to avoid collision of attribute names + # with reserved keywords self.if_ = False # function names should be lower case with underscores to improve readability @@ -71,29 +74,16 @@ def test_function3(self): # inline comment # 6. Docstrings # ############################# def test_function4(self, param1, param2): - # This docstring style is supported by Sphinx and helps with automated API documentation creation, automatically created by PyCharm - """ - This is the description of the function. + # This docstring style is the default google style of the autoDocstring + # extension and helps with automated API documentation creation + """This is the description of the function. - :param param1: first parameter - :param param2: second parameter - :return: return value(s) + Args: + param1 (_type_): _description_ + param2 (_type_): _description_ """ pass - def test_function5(self, param1, param2): - # This docstring style is supported by Sphinx and helps with automated API documentation creation, automatically created by VSCode extension autoDocstring - # VSCode Extentsion: autoDocstring- Python Docstring Generator by Nils Werner - # To get the ReST format like in Pycharm - # Go to Extension setting and change it under `Auto Doctring:Docstring Format` to `sphinx-notypes` - """_summary_ - - :param param1: _description_ - :param param2: _description_ - :return: _description_ - """ - return param1 - # main function of the class def main(self): print("Hello World") diff --git a/doc/02_development/templates/template_class_no_comments.py b/doc/development/templates/template_class_no_comments.py similarity index 85% rename from doc/02_development/templates/template_class_no_comments.py rename to doc/development/templates/template_class_no_comments.py index 13640a5b..9d57c511 100644 --- a/doc/02_development/templates/template_class_no_comments.py +++ b/doc/development/templates/template_class_no_comments.py @@ -1,9 +1,9 @@ #!/usr/bin/env python -import os -import sys -from matplotlib import pyplot -import numpy as np +# import os +# import sys +# from matplotlib import pyplot +# import numpy as np class TestClass: @@ -20,12 +20,12 @@ def test_function1(self, param1): def test_function2(cls): """ - + :return: """ pass - def test_function3(self): # inline comment + def test_function3(self): # inline comment # This is a block comment # It goes over multiple lines # All comments start with a blank space @@ -51,10 +51,10 @@ def test_function5(self, param1, param2): return param1 def main(self): - """_summary_ - """ + """_summary_""" print("Hello World") + if __name__ == "__main__": runner = TestClass() runner.main() diff --git a/doc/development/templates/template_wiki_page.md b/doc/development/templates/template_wiki_page.md new file mode 100644 index 00000000..95e95bff --- /dev/null +++ b/doc/development/templates/template_wiki_page.md @@ -0,0 +1,24 @@ +# Title of wiki page + +**Summary:** This page functions a template for who to build knowledge articles for everyone to understand. The basic structure should be kept for all articles. This template further contains a cheat sheet with the most useful markdown syntax. + +- [Title of wiki page](#title-of-wiki-page) + - [Generate Table of Contents](#generate-table-of-contents) + - [Some Content](#some-content) + - [more Content](#more-content) + - [Sources](#sources) + +## Generate Table of Contents + +How to generate a TOC in VS Code: + +1. ``Ctrl+Shift+P`` +2. Command "Create Table of Contents" + +## Some Content + +## more Content + +## Sources + + diff --git a/doc/04_examples/dvc_example/.gitignore b/doc/examples/dvc_example/.gitignore similarity index 100% rename from doc/04_examples/dvc_example/.gitignore rename to doc/examples/dvc_example/.gitignore diff --git a/doc/04_examples/dvc_example/dataset.dvc b/doc/examples/dvc_example/dataset.dvc similarity index 100% rename from doc/04_examples/dvc_example/dataset.dvc rename to doc/examples/dvc_example/dataset.dvc diff --git a/doc/04_examples/gps_example/gps_signal_example.md b/doc/examples/gps_example/gps_signal_example.md similarity index 84% rename from doc/04_examples/gps_example/gps_signal_example.md rename to doc/examples/gps_example/gps_signal_example.md index 5314652c..2fb92639 100644 --- a/doc/04_examples/gps_example/gps_signal_example.md +++ b/doc/examples/gps_example/gps_signal_example.md @@ -2,29 +2,14 @@ **Summary:** This page explains how the GPS sensor is handled including a short example on how to use it. -**The Filter that's currently in use: [Kalman Filter](../../06_perception/08_kalman_filter.md)** +**The Filter that's currently in use: [Kalman Filter](../../perception/kalman_filter.md)** ---- - -## Authors - -Gabriel Schwald - -### Date - -07.01.2023 - ---- - - [GPS sensor](#gps-sensor) - - [Authors](#authors) - - [Date](#date) - [Raw sensor data](#raw-sensor-data) - [Filters for the sensor data](#filters-for-the-sensor-data) - [Intuitive filter](#intuitive-filter) - [Rolling average](#rolling-average) - [Kalman Filter](#kalman-filter) - ## Raw sensor data @@ -34,7 +19,7 @@ While latitude and longitude are measured in degrees, altitude is measured in me ## Filters for the sensor data As with all sensors provided by Carla, the GPS sensor output contains artificial noise. -![Unfiltered GPS signal](../../00_assets/filter_img/avg_1_w_1_000.png) +![Unfiltered GPS signal](../../assets/filter_img/avg_1_w_1_000.png) Right now there are multiple types of filters implemented. ### Intuitive filter @@ -49,11 +34,11 @@ parameters. The following graphs were taken while the car was stationary, the time on the bottom is therefore irrelevant. Shown is the position translated to a local coordinate system, the transformation will be discussed later. -![GPS signal (m=1, w=0,5)](../../00_assets/filter_img/avg_1_w_0_500.png) +![GPS signal (m=1, w=0,5)](../../assets/filter_img/avg_1_w_0_500.png) Using $w = 0.5$ clearly reduces the magnitude of the noise, however such a small value reduces the responsiveness of the output signal. -![GPS signal (m=1, w=0,5)](../../00_assets/filter_img/avg_10_w_1_000.png) +![GPS signal (m=1, w=0,5)](../../assets/filter_img/avg_10_w_1_000.png) Using a large number of data points ( $m = 10$ ) also improves the magnitude of the noise. The main drawback here is the reduced frequency of the output signal, as the frequency of the output signal is $\frac{1}{m}$ that of the input signal. @@ -61,7 +46,7 @@ This can be avoided through the use of a rolling average where for every output the last $m$ inputs are taken into account. Combining these two parameters can lead to further improve the result. -![GPS signal (m=1, w=0,5)](../../00_assets/filter_img/avg_20_w_0_750.png) +![GPS signal (m=1, w=0,5)](../../assets/filter_img/avg_20_w_0_750.png) The output signals frequency has now been reduced to 1Hz compared to the original 20Hz frequency, with the weight now being set to $w = 0.75$ @@ -76,7 +61,7 @@ whenever a new signal is received. Once new data is received the matrix is rotated by one position and the oldest measurement is overwritten. The output is equal to the average of all $n$ vectors. -![Rolling average filter (n=20)](../../00_assets/filter_img/rolling_avg_20.png) +![Rolling average filter (n=20)](../../assets/filter_img/rolling_avg_20.png) More arguments smooth out the gps signal, however the also add sluggishness to the output. The number of arguments taken into account can be adjusted using the @@ -84,21 +69,21 @@ The number of arguments taken into account can be adjusted using the This was the method ultimately chosen with $n=10$, leading to the following gps signal. -![Final gps signal (n=10)](../../00_assets/filter_img/rolling_avg_10.png) +![Final gps signal (n=10)](../../assets/filter_img/rolling_avg_10.png) ### Kalman Filter -A little more complex, but quicker reacting filter is the [Kalman Filter](../../06_perception/08_kalman_filter.md). +A little more complex, but quicker reacting filter is the [Kalman Filter](../../perception/kalman_filter.md). It is heavily dependent on which system model you use and how you tune its parameters. When done correctly it reduces the GPS noise greatly without adding any delay to the output such as the filters above do. -![MAE Boxed Graph of Location Error with respect to ideal Location](../../../doc/00_assets/perception/data_26_MAE_Boxed.png) +![MAE Boxed Graph of Location Error with respect to ideal Location](../../../doc/assets/perception/data_26_MAE_Boxed.png) In the upper graph a smaller box indicates less noise. Also the lower values are, the less deviation from the ideal position we have. This is the graph that was used for tuning the kalman parameters: -![MSE Boxed Graph of Location Error with respect to ideal Location](../../../doc/00_assets/perception/data_26_MSE_Boxed.png) +![MSE Boxed Graph of Location Error with respect to ideal Location](../../../doc/assets/perception/data_26_MSE_Boxed.png) It's depciting the MSE (mean squared errors) for the error distace to the ideal position. As you can see the filtered Positions are still noisy, but way closer to the ideal position. In comparison, the running average filter is not as noisy, but constantly wrong by about 1 meter, because it is time delayed. diff --git a/doc/general/README.md b/doc/general/README.md new file mode 100644 index 00000000..cbf937f4 --- /dev/null +++ b/doc/general/README.md @@ -0,0 +1,7 @@ +# General project setup + +This Folder contains instruction on installation, execution and architecture of the agent. + +1. [Installation](./installation.md) +2. [Execution](./execution.md) +3. [Current architecture of the agent](./architecture.md) diff --git a/doc/01_general/04_architecture.md b/doc/general/architecture.md similarity index 92% rename from doc/01_general/04_architecture.md rename to doc/general/architecture.md index c1a9de98..72eacdce 100644 --- a/doc/01_general/04_architecture.md +++ b/doc/general/architecture.md @@ -3,21 +3,7 @@ **Summary:** This page gives an overview over the planned general architecture of the vehicle agent. The document contains an overview over all [nodes](#overview) and [topics](#topics). ---- - -## Authors - -Julius Miller, Alexander Hellmann, Samuel Kühnel - -## Date - -29.03.2024 - ---- - - [Planned architecture of vehicle agent](#planned-architecture-of-vehicle-agent) - - [Authors](#authors) - - [Date](#date) - [Overview](#overview) - [Perception](#perception) - [Obstacle Detection and Classification](#obstacle-detection-and-classification) @@ -35,7 +21,6 @@ Julius Miller, Alexander Hellmann, Samuel Kühnel - [Velocity control](#velocity-control) - [Vehicle controller](#vehicle-controller) - [Visualization](#visualization) - ## Overview @@ -47,7 +32,7 @@ found [here](https://carla.readthedocs.io/projects/ros-bridge/en/latest/ros_sens The msgs necessary to control the vehicle via the Carla bridge can be found [here](https://carla.readthedocs.io/en/0.9.8/ros_msgs/#CarlaEgoVehicleControlmsg) -![Architecture overview](../00_assets/overview.jpg) +![Architecture overview](../assets/overview.jpg) The miro-board can be found [here](https://miro.com/welcomeonboard/a1F0d1dya2FneWNtbVk4cTBDU1NiN3RiZUIxdGhHNzJBdk5aS3N4VmdBM0R5c2Z1VXZIUUN4SkkwNHpuWlk2ZXwzNDU4NzY0NTMwNjYwNzAyODIzfDI=?share_link_id=785020837509). ## Perception @@ -55,8 +40,8 @@ The miro-board can be found [here](https://miro.com/welcomeonboard/a1F0d1dya2Fne The perception is responsible for the efficient conversion of raw sensor and map data into a useful environment representation that can be used by the [Planning](#Planning) for further processing. -Further information regarding the perception can be found [here](../06_perception/Readme.md). -Research for the perception can be found [here](../03_research/02_perception/Readme.md). +Further information regarding the perception can be found [here](../perception/README.md). +Research for the perception can be found [here](../research/perception/README.md). ### Obstacle Detection and Classification @@ -120,10 +105,10 @@ The planning uses the data from the [Perception](#Perception) to find a path on its destination. It also detects situations and reacts accordingly in traffic. It publishes signals such as a trajecotry or a target speed to acting. -Further information regarding the planning can be found [here](../07_planning/README.md). -Research for the planning can be found [here](../03_research/03_planning/Readme.md). +Further information regarding the planning can be found [here](../planning/README.md). +Research for the planning can be found [here](../research/planning/README.md). -### [Global Planning](../07_planning/Global_Planner.md) +### [Global Planning](../planning/Global_Planner.md) Uses information from the map and the path specified by CARLA to find a first concrete path to the next intermediate point. @@ -138,7 +123,7 @@ Publishes: - ```provisional_path``` ([nav_msgs/Path Message](http://docs.ros.org/en/noetic/api/nav_msgs/html/msg/Path.html)) -### [Decision Making](../07_planning/Behavior_tree.md) +### [Decision Making](../planning/Behavior_tree.md) Decides which speed is the right one to pass through a certain situation and also checks if an overtake is necessary. @@ -157,7 +142,7 @@ Publishes: - ```curr_behavior``` ([std_msgs/String](https://docs.ros.org/en/api/std_msgs/html/msg/String.html)) -### [Local Planning](../07_planning/Local_Planning.md) +### [Local Planning](../planning/Local_Planning.md) It consists of three components: @@ -165,7 +150,7 @@ It consists of three components: - ACC: Generates a new speed based on a possible collision recieved from Collision Check and speedlimits recieved from [Global Planner](#global-planning) - Motion Planning: Decides the target speed and modifies trajectory if signal recieved from [Decision Making](#decision-making) -#### [Collision Check](../07_planning//Collision_Check.md) +#### [Collision Check](../planning//Collision_Check.md) Subscriptions: @@ -182,7 +167,7 @@ Publishes: - ```current_wp``` ([std_msgs/Float32](https://docs.ros.org/en/api/std_msgs/html/msg/Float32.html)) - ```speed_limit``` ([std_msgs/Float32](https://docs.ros.org/en/api/std_msgs/html/msg/Float32.html)) -#### [ACC](../07_planning/ACC.md) +#### [ACC](../planning/ACC.md) Subscriptions: @@ -195,7 +180,7 @@ Publishes: - ```collision``` ([std_msgs/Float32MultiArray](https://docs.ros.org/en/api/std_msgs/html/msg/Float32MultiArray.html)) - ```oncoming``` ([std_msgs/Float32](https://docs.ros.org/en/api/std_msgs/html/msg/Float32.html)) -#### [Motion Planning](../07_planning/motion_planning.md) +#### [Motion Planning](../planning/motion_planning.md) Subscriptions: @@ -225,9 +210,9 @@ Publishes: The job of this component is to take the planned trajectory and target-velocities from the [Planning](#Planning) component and convert them into steering and throttle/brake controls for the CARLA-vehicle. -All information regarding research done about acting can be found [here](../03_research/01_acting/Readme.md). +All information regarding research done about acting can be found [here](../research/acting/README.md). -Indepth information about the currently implemented acting Components can be found [HERE](../05_acting/Readme.md)! +Indepth information about the currently implemented acting Components can be found [HERE](../acting/README.md)! ### Path following with Steering Controllers @@ -245,7 +230,7 @@ Publishes: - ```steering_angle``` for ```vehicle_control_cmd``` ([CarlaEgoVehicleControl.msg](https://carla.readthedocs.io/en/0.9.8/ros_msgs/#CarlaEgoVehicleControlmsg)) -For further indepth information about the currently implemented Steering Controllers click [HERE](../05_acting/03_steering_controllers.md) +For further indepth information about the currently implemented Steering Controllers click [HERE](../acting/steering_controllers.md) ### Velocity control @@ -263,7 +248,7 @@ Publishes: - ```reverse``` for ```vehicle_control_cmd``` ([CarlaEgoVehicleControl.msg](https://carla.readthedocs.io/en/0.9.8/ros_msgs/#CarlaEgoVehicleControlmsg)) -For further indepth information about the currently implemented Velocity Controller click [HERE](../05_acting/02_velocity_controller.md) +For further indepth information about the currently implemented Velocity Controller click [HERE](../acting/velocity_controller.md) ### Vehicle controller @@ -282,7 +267,7 @@ Publishes: - ```vehicle_control_cmd``` ([CarlaEgoVehicleControl.msg](https://carla.readthedocs.io/en/0.9.8/ros_msgs/#CarlaEgoVehicleControlmsg)) -For further indepth information about the currently implemented Vehicle Controller click [HERE](../05_acting/04_vehicle_controller.md) +For further indepth information about the currently implemented Vehicle Controller click [HERE](../acting/vehicle_controller.md) ## Visualization diff --git a/build/README.md b/doc/general/execution.md similarity index 65% rename from build/README.md rename to doc/general/execution.md index a3d8ff0d..8b8d498f 100644 --- a/build/README.md +++ b/doc/general/execution.md @@ -1,27 +1,36 @@ -# Build Directory Documentation +# Execution -This document provides an overview of the build structure of the project, -detailing the purpose and usage of the various configuration files located in the `build` directory. +This document provides an overview of how to execute the project, +detailing the purpose and usage of the various configuration files located in the [build](../../build/) directory. The project utilizes Docker and Docker Compose to manage services and dependencies, facilitating both normal and distributed execution modes. ## Table of Contents -- [Directory Structure](#directory-structure) -- [Base Service Files](#base-service-files) - - [`agent_service.yaml`](#agent_serviceyaml) - - [`carla-simulator_service.yaml`](#carla-simulator_serviceyaml) - - [`linter_services.yaml`](#linter_servicesyaml) - - [`roscore_service.yaml`](#roscore_serviceyaml) -- [Docker Compose Files](#docker-compose-files) - - [`docker-compose.yaml`](#docker-composeyaml) - - [`docker-compose_dev.yaml`](#docker-composedevyaml) - - [`docker-compose_cicd.yaml`](#docker-compose_cicdyaml) -- [Execution Modes](#execution-modes) - - [Normal Execution](#normal-execution) - - [Distributed Execution](#distributed-execution) -- [Usage](#usage) -- [Notes](#notes) +- [Execution](#execution) + - [Table of Contents](#table-of-contents) + - [Quick Start](#quick-start) + - [Directory Structure](#directory-structure) + - [Base Service Files](#base-service-files) + - [`agent_service.yaml`](#agent_serviceyaml) + - [`roscore_service.yaml`](#roscore_serviceyaml) + - [Docker Compose Files](#docker-compose-files) + - [`docker-compose.carla-simulator.yaml`](#docker-composecarla-simulatoryaml) + - [`docker-compose.linter.yaml`](#docker-composelinteryaml) + - [`docker-compose.leaderboard.yaml`](#docker-composeleaderboardyaml) + - [`docker-compose.devroute.yaml`](#docker-composedevrouteyaml) + - [`docker-compose.dev.yaml`](#docker-composedevyaml) + - [`docker-compose.cicd.yaml`](#docker-composecicdyaml) + - [Execution Modes](#execution-modes) + - [Normal Execution](#normal-execution) + - [Distributed Execution](#distributed-execution) + - [Usage](#usage) + - [Notes](#notes) + - [Conclusion](#conclusion) + +## Quick Start + +In order to start the default leaderboard execution simply navigate to the [build](../../build/) folder and select the `Compose up` option in the right-click menu of the `docker-compose.leaderboard.yaml` file. ## Directory Structure @@ -29,17 +38,18 @@ The `build` directory contains the necessary configuration and setup files for b - **Base Service Files** - `agent_service.yaml` - - `carla-simulator_service.yaml` - - `linter_services.yaml` - `roscore_service.yaml` - **Docker Compose Files** - - `docker-compose.yaml` - - `docker-compose_dev.yaml` - - `docker-compose_cicd.yaml` + - `docker-compose.carla-simulator.yaml` + - `docker-compose.linter.yaml` + - `docker-compose.leaderboard.yaml` + - `docker-compose.devroute.yaml` + - `docker-compose.dev.yaml` + - `docker-compose.cicd.yaml` ## Base Service Files -The base service files define the configurations for individual services used in the project. These files are included or extended in the Docker Compose files to create different execution setups. +The base service files define the configurations for individual services used in the project. These files are included or extended in the Docker Compose files to create different execution setups and are not intended for standalone execution. ### `agent_service.yaml` @@ -50,7 +60,20 @@ Defines the configuration for the `agent` service, which represents the autonomo - **Volumes**: Mounts directories like `/workspace` to share code and data between the host and the container. - **Networks**: Connects the agent to the `carla` and `ros` networks. -### `carla-simulator_service.yaml` +### `roscore_service.yaml` + +Defines the `roscore` service for running the ROS master node. Key configurations include: + +- **Image**: Uses the official ROS Noetic image. +- **Command**: Starts `roscore`. +- **Environment Variables**: Sets up ROS networking variables. +- **Networks**: Connects to the `ros` network. + +## Docker Compose Files + +The Docker Compose files allow the execution of different components or whole scenarios that include multiple services. + +### `docker-compose.carla-simulator.yaml` Defines the configuration for the `carla-simulator` service, which runs the CARLA simulator. Key configurations include: @@ -60,44 +83,30 @@ Defines the configuration for the `carla-simulator` service, which runs the CARL - **Volumes**: Shares the X11 UNIX socket and custom CARLA settings. - **Networks**: Connects to the `carla` network. -### `linter_services.yaml` +### `docker-compose.linter.yaml` Defines services for code linting and static analysis. Includes: - **flake8**: For Python linting. -- **comlipy**: Custom linting based on project requirements. - **mdlint**: For Markdown file linting. - **Volumes**: Mounts the project directory for linting files within the container. -### `roscore_service.yaml` - -Defines the `roscore` service for running the ROS master node. Key configurations include: - -- **Image**: Uses the official ROS Noetic image. -- **Command**: Starts `roscore`. -- **Environment Variables**: Sets up ROS networking variables. -- **Networks**: Connects to the `ros` network. - -## Docker Compose Files - -The Docker Compose files orchestrate multiple services defined in the base service files, allowing for different execution scenarios. - -### `docker-compose.yaml` +### `docker-compose.leaderboard.yaml` - **Includes**: - - `linter_services.yaml` + - `docker-compose.linter.yaml` + - `docker-compose.carla-simulator.yaml` - `roscore_service.yaml` - - `carla-simulator_service.yaml` - **Services**: - Extends the `agent` service from `agent_service.yaml`. - **Purpose**: Runs the agent with special scenarios included. Solving these scenarios is the primary goal of the project. -### `docker-compose_dev.yaml` +### `docker-compose.devroute.yaml` - **Includes**: - - `linter_services.yaml` + - `docker-compose.linter.yaml` + - `docker-compose.carla-simulator.yaml` - `roscore_service.yaml` - - `carla-simulator_service.yaml` - **Services**: - Extends the `agent` service from `agent_service.yaml`. - **Environment Overrides**: @@ -106,11 +115,17 @@ The Docker Compose files orchestrate multiple services defined in the base servi - Runs the agent with simplified settings suitable for development and testing. - **Purpose**: Provides a minimal setup for development without special scenarios. -### `docker-compose_cicd.yaml` +### `docker-compose.dev.yaml` + +- **Services**: + - Defines an `agent-dev` service using the corresponding Dockerfile. +- **Purpose**: Provides a container for attaching a VS Code instance for development. + +### `docker-compose.cicd.yaml` - **Includes**: + - `docker-compose.carla-simulator.yaml` - `roscore_service.yaml` - - `carla-simulator_service.yaml` - **Services**: - Defines an `agent` service using a prebuilt image from the project's container registry. - **Dependencies**: @@ -135,34 +150,22 @@ Distributed execution separates the agent and the CARLA simulator onto different - Running large vision models that require extensive VRAM. - The single machine's resources are insufficient to handle both the agent and simulator. -**Note**: In distributed execution, the CARLA simulator must be running on a second desktop PC, and the `CARLA_SIM_HOST` environment variable should be set accordingly. +**Note**: In distributed execution, the CARLA simulator must be running on a second desktop PC, and the `CARLA_SIM_HOST` environment variable should be set accordingly. Further information can be found in [here](../doc/development/distributed_simulation.md). ## Usage -To run the project using the provided Docker Compose files: - -- **Standard Execution with Special Scenarios**: - - ```bash - docker-compose -f build/docker-compose.yaml up - ``` - -- **Development Execution without Special Scenarios**: - - ```bash - docker-compose -f build/docker-compose_dev.yaml up - ``` +To run the project using the provided Docker Compose files simply navigate to the files in the VS Code Explorer and select `Compose Up` after right-clicking the file. - **CI/CD Execution**: - The `docker-compose_cicd.yaml` is intended to be used within CI/CD pipelines and may be invoked as part of automated scripts. + The `docker-compose.cicd.yaml` is intended to be used within CI/CD pipelines and may be invoked as part of automated scripts. ## Notes - Ensure that you have NVIDIA GPU support configured if running models that require GPU acceleration. - The `agent_service.yaml` and other base service files are crucial for defining the common configurations and should not be modified unless necessary. - When running in distributed mode, update the `CARLA_SIM_HOST` environment variable in the appropriate service configurations to point to the simulator's IP address. -- The linter services defined in `linter_services.yaml` can be used to maintain code quality and should be run regularly during development. +- The linter services defined in `docker-compose.linter.yaml` can be used to maintain code quality and should be run regularly during development. ## Conclusion diff --git a/doc/general/installation.md b/doc/general/installation.md new file mode 100644 index 00000000..f5bc39a3 --- /dev/null +++ b/doc/general/installation.md @@ -0,0 +1,117 @@ +# 🛠️ Installation + +**Summary:** This page explains the installation process for the + +- [🛠️ Installation](#️-installation) + - [Docker with NVIDIA GPU support](#docker-with-nvidia-gpu-support) + - [Docker](#docker) + - [Allow non-root user to execute Docker commands](#allow-non-root-user-to-execute-docker-commands) + - [NVIDIA Container toolkit](#nvidia-container-toolkit) + - [VS Code Extensions](#vs-code-extensions) + - [🚨 Common Problems](#-common-problems) + - [Vulkan device not available](#vulkan-device-not-available) + +To run the project you have to install [docker](https://docs.docker.com/engine/install/) with NVIDIA GPU support, [nvidia-docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker). + +For development, we recommend Visual Studio Code with the plugins that are recommended inside the `.vscode` folder. + +## Docker with NVIDIA GPU support + +If not yet installed first install docker as described in section [Docker with NVIDIA GPU support](#docker-with-nvidia-gpu-support). + +For NVIDIA GPU support it's easiest to follow the guide in the [NVIDIA docs](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker). + +For simplicity, we list the necessary steps here: + +### Docker + +Install Docker using the convenience script. + +```shell +curl https://get.docker.com | sh \ + && sudo systemctl --now enable docker +``` + +### Allow non-root user to execute Docker commands + +We recommend this step, to not have to execute every command as root. + +```shell +# add docker group +sudo groupadd docker + +# add your current user to the docker group +sudo usermod -aG docker $USER +``` + +After this, _restart_ your system to propagate the group changes. + +### NVIDIA Container toolkit + +Set up the package repository and the GPG key: + +```shell +distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \ + && curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \ + && curl -s -L https://nvidia.github.io/libnvidia-container/$distribution/libnvidia-container.list | \ + sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ + sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list +``` + +Install the `nvidia-docker2` package (and dependencies) after updating the package listing: + +```shell +sudo apt-get update +sudo apt-get install -y nvidia-docker2 +``` + +Restart the Docker daemon to complete the installation after setting the default runtime: + +```shell +sudo systemctl restart docker +``` + +## VS Code Extensions + +The repository comes with a suite of recommended VS Code extensions. Install them via the `Extensions` tab inside VS Code. + +## 🚨 Common Problems + +### Vulkan device not available + +Cannot find a compatible Vulkan Device. +Try updating your video driver to a more recent version and make sure your video card supports Vulkan. + +![Vulkan device not available](../assets/vulkan_device_not_available.png) + +Verify the issue with the following command: + +```shell +$ docker run --rm --runtime=nvidia --gpus all ubuntu nvidia-smi +Failed to initialize NVML: Unknown Error +``` + +> [!TIP] Solution found in + +```shell +sudo vim /etc/nvidia-container-runtime/config.toml +``` + +, then changed `no-cgroups = false`, save + +Restart docker daemon: + +```shell +sudo systemctl restart docker +``` + +, then you can test by running + +```shell +docker run --rm --runtime=nvidia --gpus all ubuntu nvidia-smi +``` + +Based on: + +1. +2. diff --git a/doc/perception/README.md b/doc/perception/README.md new file mode 100644 index 00000000..9b747565 --- /dev/null +++ b/doc/perception/README.md @@ -0,0 +1,22 @@ +# Documentation of perception component + +This folder contains further documentation of the perception components. + +1. [Vision Node](./vision_node.md) + - The Visison Node provides an adaptive interface that is able to perform object-detection and/or image-segmentation on multiple cameras at the same time. +2. [Position Heading Filter Debug Node](./position_heading_filter_debug_node.md) +3. [Kalman Filter](./kalman_filter.md) +4. [Position Heading Publisher Node](./position_heading_publisher_node.md) +5. [Distance to Objects](./distance_to_objects.md) +6. [Traffic Light Detection](./traffic_light_detection.md) +7. [Coordinate Transformation (helper functions)](./coordinate_transformation.md) +8. [Dataset Generator](./dataset_generator.md) +9. [Dataset Structure](./dataset_structure.md) +10. [Lidar Distance Utility](./lidar_distance_utility.md) + 1. not used since paf22 +11. [Efficient PS](./efficientps.md) + 1. not used scince paf22 and never successfully tested + +## Experiments + +- The overview of performance evaluations is located in the [experiments](./experiments/README.md) folder. diff --git a/doc/06_perception/00_coordinate_transformation.md b/doc/perception/coordinate_transformation.md similarity index 88% rename from doc/06_perception/00_coordinate_transformation.md rename to doc/perception/coordinate_transformation.md index 46eea5ca..f4803887 100644 --- a/doc/06_perception/00_coordinate_transformation.md +++ b/doc/perception/coordinate_transformation.md @@ -2,26 +2,10 @@ **Summary:** Used for various helper functions such as quat_to_heading, that are useful in a lot of cases. **It is not yet fully documented**. ---- - -## Author - -Robert Fischer - -## Date - -12.01.2024 - - - -- [Coordinate Transformation](#coordonate-transformation) - - [Author](#author) - - [Date](#date) +- [Coordinate Transformation](#coordinate-transformation) - [Usage](#usage) - [Methods](#methods) - - [quat_to_heading(quaternion)](#quat_to_headingquaternion) - - + - [quat\_to\_heading(quaternion)](#quat_to_headingquaternion) ## Usage @@ -95,7 +79,7 @@ $$ So we end up with a vector that's rotated into the x-y plane with the new x and y coordinates being `a` and `d`: -![quat_to_angle](../../doc/00_assets/perception/quat_to_angle.png) +![quat_to_angle](../../doc/assets/perception/quat_to_angle.png) Now all we need to do is calculate the angle $\theta$ around the z-axis which this vector creates between the x-axis and itself using the `atan` function: @@ -112,7 +96,7 @@ $$heading = \theta$$ def quat_to_heading(quaternion): """ Converts a quaternion to a heading of the car in radians - (see ../../doc/06_perception/00_coordinate_transformation.md) + (see ../../doc/perception/coordinate_transformation.md) :param quaternion: quaternion of the car as a list [q.x, q.y, q.z, q.w] where q is the quaternion :return: heading of the car in radians (float) diff --git a/doc/06_perception/01_dataset_generator.md b/doc/perception/dataset_generator.md similarity index 94% rename from doc/06_perception/01_dataset_generator.md rename to doc/perception/dataset_generator.md index ad688f7d..d250b24e 100644 --- a/doc/06_perception/01_dataset_generator.md +++ b/doc/perception/dataset_generator.md @@ -3,29 +3,13 @@ **Summary:** The dataset generator located in perception/src/dataset_generator.py is a standalone script, directly hooking into the Carla Python API. It is used to generate a dataset to train perception models. ---- - -## Author - -Korbinian Stein - -## Date - -24.01.2023 - - - - [Dataset generator](#dataset-generator) - - [Author](#author) - - [Date](#date) - [Necessary adjustments](#necessary-adjustments) - [Dockerfile](#dockerfile) - [docker-compose.yml](#docker-composeyml) - [Usage](#usage) - [Using with leaderboard](#using-with-leaderboard) - - ## Necessary adjustments Important to note: The dataset generator uses @@ -81,11 +65,9 @@ index d1ae1df..ef1b503 100644 To run the dataset generator, first the Carla Simulator has to be running: - ```bash - b5 run carla-simulator - ``` +Start the docker containr `leaderboard-2.0`. -You can then run the dataset generator by executing the following command in the `b5 shell`: +You can then run the dataset generator by executing the following command in an attached shell: ```bash python3 perception/src/dataset_generator.py --host carla-simulator --port 2000 --use-empty-world @@ -119,7 +101,7 @@ the # ... ``` -Once the leaderboard evaluator is running, you can start the dataset generator in the `b5 shell`: +Once the leaderboard evaluator is running, you can start the dataset generator in an attached shell: ```bash python3 perception/src/dataset_generator.py --host carla-simulator --port 2000 diff --git a/doc/06_perception/02_dataset_structure.md b/doc/perception/dataset_structure.md similarity index 85% rename from doc/06_perception/02_dataset_structure.md rename to doc/perception/dataset_structure.md index 24a4d8e0..4d28e87f 100644 --- a/doc/06_perception/02_dataset_structure.md +++ b/doc/perception/dataset_structure.md @@ -2,31 +2,17 @@ **Summary:** This document gives a short overview about the structure of our dataset that is needed to train EfficientPS. ---- - -## Author - -Marco Riedenauer - -## Date - -19.02.2023 - - -* [Dataset structure](#dataset-structure) - * [Author](#author) - * [Date](#date) - * [Converting the dataset](#converting-the-dataset) - * [Preparation of the dataset for training](#preparation-of-the-dataset-for-training) - * [Explanation of the conversion of groundtruth images](#explanation-of-the-conversion-of-groundtruth-images) - * [Things](#things) - * [Stuff](#stuff) - * [Explanation of creating json files](#explanation-of-creating-json-files) - +- [Dataset structure](#dataset-structure) + - [Converting the dataset](#converting-the-dataset) + - [Preparation of the dataset for training](#preparation-of-the-dataset-for-training) + - [Explanation of the conversion of groundtruth images](#explanation-of-the-conversion-of-groundtruth-images) + - [Things](#things) + - [Stuff](#stuff) + - [Explanation of creating json files](#explanation-of-creating-json-files) ## Converting the dataset -After creating the dataset with the [Dataset Generator](01_dataset_generator.md) or creating a dataset on your own, +After creating the dataset with the [Dataset Generator](dataset_generator.md) or creating a dataset on your own, execute the [Dataset Converter](../../code/perception/src/dataset_converter.py) to ensure that your dataset has the following structure: @@ -64,7 +50,7 @@ following structure: When the dataset has the correct structure, the groundtruth images have to be converted to COCO format and some json files have to be created. -To do so, execute the following command in your b5 shell: +To do so, execute the following command in an attached shell: ```shell python3 perception/src/panoptic_segmentation/preparation/createPanopticImgs.py --dataset_folder diff --git a/doc/06_perception/10_distance_to_objects.md b/doc/perception/distance_to_objects.md similarity index 82% rename from doc/06_perception/10_distance_to_objects.md rename to doc/perception/distance_to_objects.md index f49fb860..07caf2cc 100644 --- a/doc/06_perception/10_distance_to_objects.md +++ b/doc/perception/distance_to_objects.md @@ -1,9 +1,14 @@ # Getting the Distance to Objects -Using the vision node and the lidar distance node we can calculate the distance of detected objects. -We can solve this problem from two directions mapping either pixel into the 3D-World or mapping 3D-LidarPoints into Pixel. +**Summary:** Using the vision node and the lidar distance node we can calculate the distance of detected objects. +We can solve this problem from two directions mapping either pixel into the 3D-World or mapping 3D-LidarPoints into Pixel. This file will explain the mapping of 3D-Points into 2D. -This file will will explain the mapping of 3D-Points into 2D. +- [Getting the Distance to Objects](#getting-the-distance-to-objects) + - [Converting 3D-Points into 2D-Camera-Space](#converting-3d-points-into-2d-camera-space) + - [Concept](#concept) + - [Purpose](#purpose) + - [Implementation](#implementation) + - [LIDAR-Configuration](#lidar-configuration) ## Converting 3D-Points into 2D-Camera-Space @@ -22,7 +27,7 @@ I found ways online, that seemed to solve this issue though. ### Concept -![3d_2d_porjection](../00_assets/3d_2d_projection.png) +![3d_2d_porjection](../assets/3d_2d_projection.png) The goal is to calculate the projection of point P and find its Pixl-Coordinates (u,v) on the Image-Plain. To do this you need a couple of thins: @@ -39,7 +44,7 @@ To do this you need a couple of thins: The formula for this projection proposed by the literature looks like this: -![3d_2d_formula](../00_assets/3d_2d_formula.png) +![3d_2d_formula](../assets/3d_2d_formula.png) To get the camera-intrinsic matrix we need the width, height and fov of the image produced by the camera. Luckily we cn easly get these values from the sensor configuration in (agent.py) @@ -65,7 +70,7 @@ To reconstruct the depth image, we simply implement the above formulas using num The resulting Image takes the distance in meters as values for its pixels. It therefore is a grayscale image. -![Grayscale Depth Image](../00_assets/2_15_layover.png) +![Grayscale Depth Image](../assets/2_15_layover.png) In the next step we want to get the distance for every bounding box the object-detection found. @@ -92,9 +97,9 @@ If there is no distance found in the depth image, we will return infinity for th This topic came to our attention, as we realised that the LIDAR was flickering, as you can see in the following image series. -![Grayscale Depth Image](../00_assets/2_layover.png) -![Grayscale Depth Image](../00_assets/3_layover.png) -![Grayscale Depth Image](../00_assets/4_layover.png) +![Grayscale Depth Image](../assets/2_layover.png) +![Grayscale Depth Image](../assets/3_layover.png) +![Grayscale Depth Image](../assets/4_layover.png) These are the Grayscale-Depth Images reconstructed within 600 milliseconds. diff --git a/doc/06_perception/04_efficientps.md b/doc/perception/efficientps.md similarity index 59% rename from doc/06_perception/04_efficientps.md rename to doc/perception/efficientps.md index 4fa17b74..9498b27c 100644 --- a/doc/06_perception/04_efficientps.md +++ b/doc/perception/efficientps.md @@ -4,44 +4,30 @@ **Summary:** This document gives a short overview about EfficientPS and its training process. ---- - -## Author - -Marco Riedenauer - -## Date - -28.03.2023 - - -* [EfficientPS](#efficientps) - * [Author](#author) - * [Date](#date) - * [Model Overview](#model-overview) - * [Training](#training) - * [Labels](#labels) - * [Training parameters](#training-parameters) - * [Train](#train) - +- [EfficientPS](#efficientps) + - [Model Overview](#model-overview) + - [Training](#training) + - [Labels](#labels) + - [Training parameters](#training-parameters) + - [Train](#train) ## Model Overview EfficientPS is a neural network designed for panoptic segmentation -(see [Panoptic Segmentation](../03_research/02_perception/03_first_implementation_plan.md#panoptic-segmentation)). +(see [Panoptic Segmentation](../research/perception/first_implementation_plan.md#panoptic-segmentation)). The model itself consists of 4 parts as can be seen in the following figure. The displayed shapes are incorrect in our case, since we used half the image size. -![EfficientPS Structure](../00_assets/efficientps_structure.png) +![EfficientPS Structure](../assets/efficientps_structure.png) [Source](https://arxiv.org/pdf/2004.02307.pdf) -* Feature Extraction: +- Feature Extraction: This is the first part of the model on which all following parts depend on. In this part, all important features are extracted from the input image. -* Semantic Segmentation Head: As the name implies, this part of the model computes a semantic segmentation on the +- Semantic Segmentation Head: As the name implies, this part of the model computes a semantic segmentation on the extracted features. -* Instance Segmentation Head: This part computes the instance segmentation on things on the extracted features. -* Panoptic Fusion: As the last part of the model, this component is responsible for combining the information gathered +- Instance Segmentation Head: This part computes the instance segmentation on things on the extracted features. +- Panoptic Fusion: As the last part of the model, this component is responsible for combining the information gathered by the semantic segmentation and the instance segmentation heads. The output of this component and thereby the model is an image where stuff is semantic segmented and things are instance segmented. @@ -64,19 +50,19 @@ All adaptable training parameters can be found and changed in The most important configs are: -* MODEL/ROI_HEADS/NUM_CLASSES: Number of instance classes -* DATASET_PATH: Path to dataset root -* TRAIN_JSON: Relative path from DATASET_PATH to train json file -* VALID_JSON: Relative path from DATASET_PATH to validation json file -* PRED_DIR: Directory to save predictions in -* PRED_JSON: Name of prediction json file -* CHECKPOINT_PATH: Path of already trained models you want to train furthermore -* BATCH_SIZE: Number of images to be loaded during on training step -* NUM_CLASSES: Number of all classes +- MODEL/ROI_HEADS/NUM_CLASSES: Number of instance classes +- DATASET_PATH: Path to dataset root +- TRAIN_JSON: Relative path from DATASET_PATH to train json file +- VALID_JSON: Relative path from DATASET_PATH to validation json file +- PRED_DIR: Directory to save predictions in +- PRED_JSON: Name of prediction json file +- CHECKPOINT_PATH: Path of already trained models you want to train furthermore +- BATCH_SIZE: Number of images to be loaded during on training step +- NUM_CLASSES: Number of all classes ### Train -To start the training, just execute the following command in b5 shell: +To start the training, just execute the following command in an attached shell: ```shell python3 perception/src/panoptic_segmentation/train_net.py diff --git a/doc/06_perception/experiments/README.md b/doc/perception/experiments/README.md similarity index 100% rename from doc/06_perception/experiments/README.md rename to doc/perception/experiments/README.md diff --git a/doc/06_perception/experiments/lanenet_evaluation/README.md b/doc/perception/experiments/lanenet_evaluation/README.md similarity index 100% rename from doc/06_perception/experiments/lanenet_evaluation/README.md rename to doc/perception/experiments/lanenet_evaluation/README.md diff --git a/doc/06_perception/experiments/lanenet_evaluation/assets/1600_lanes.jpg b/doc/perception/experiments/lanenet_evaluation/assets/1600_lanes.jpg similarity index 100% rename from doc/06_perception/experiments/lanenet_evaluation/assets/1600_lanes.jpg rename to doc/perception/experiments/lanenet_evaluation/assets/1600_lanes.jpg diff --git a/doc/06_perception/experiments/lanenet_evaluation/assets/1600_lanes_mask.jpg b/doc/perception/experiments/lanenet_evaluation/assets/1600_lanes_mask.jpg similarity index 100% rename from doc/06_perception/experiments/lanenet_evaluation/assets/1600_lanes_mask.jpg rename to doc/perception/experiments/lanenet_evaluation/assets/1600_lanes_mask.jpg diff --git a/doc/06_perception/experiments/lanenet_evaluation/assets/1619_lanes.jpg b/doc/perception/experiments/lanenet_evaluation/assets/1619_lanes.jpg similarity index 100% rename from doc/06_perception/experiments/lanenet_evaluation/assets/1619_lanes.jpg rename to doc/perception/experiments/lanenet_evaluation/assets/1619_lanes.jpg diff --git a/doc/06_perception/experiments/lanenet_evaluation/assets/1619_lanes_mask.jpg b/doc/perception/experiments/lanenet_evaluation/assets/1619_lanes_mask.jpg similarity index 100% rename from doc/06_perception/experiments/lanenet_evaluation/assets/1619_lanes_mask.jpg rename to doc/perception/experiments/lanenet_evaluation/assets/1619_lanes_mask.jpg diff --git a/doc/06_perception/experiments/lanenet_evaluation/assets/1660_lanes.jpg b/doc/perception/experiments/lanenet_evaluation/assets/1660_lanes.jpg similarity index 100% rename from doc/06_perception/experiments/lanenet_evaluation/assets/1660_lanes.jpg rename to doc/perception/experiments/lanenet_evaluation/assets/1660_lanes.jpg diff --git a/doc/06_perception/experiments/lanenet_evaluation/assets/1660_lanes_mask.jpg b/doc/perception/experiments/lanenet_evaluation/assets/1660_lanes_mask.jpg similarity index 100% rename from doc/06_perception/experiments/lanenet_evaluation/assets/1660_lanes_mask.jpg rename to doc/perception/experiments/lanenet_evaluation/assets/1660_lanes_mask.jpg diff --git a/doc/06_perception/experiments/lanenet_evaluation/assets/1663_lanes.jpg b/doc/perception/experiments/lanenet_evaluation/assets/1663_lanes.jpg similarity index 100% rename from doc/06_perception/experiments/lanenet_evaluation/assets/1663_lanes.jpg rename to doc/perception/experiments/lanenet_evaluation/assets/1663_lanes.jpg diff --git a/doc/06_perception/experiments/lanenet_evaluation/assets/1663_lanes_mask.jpg b/doc/perception/experiments/lanenet_evaluation/assets/1663_lanes_mask.jpg similarity index 100% rename from doc/06_perception/experiments/lanenet_evaluation/assets/1663_lanes_mask.jpg rename to doc/perception/experiments/lanenet_evaluation/assets/1663_lanes_mask.jpg diff --git a/doc/06_perception/experiments/object-detection-model_evaluation/README.md b/doc/perception/experiments/object-detection-model_evaluation/README.md similarity index 100% rename from doc/06_perception/experiments/object-detection-model_evaluation/README.md rename to doc/perception/experiments/object-detection-model_evaluation/README.md diff --git a/doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_PT_fasterrcnn_resnet50_fpn_v2.jpg b/doc/perception/experiments/object-detection-model_evaluation/asset-copies/1619_PT_fasterrcnn_resnet50_fpn_v2.jpg similarity index 100% rename from doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_PT_fasterrcnn_resnet50_fpn_v2.jpg rename to doc/perception/experiments/object-detection-model_evaluation/asset-copies/1619_PT_fasterrcnn_resnet50_fpn_v2.jpg diff --git a/doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_TF_faster-rcnn.jpg b/doc/perception/experiments/object-detection-model_evaluation/asset-copies/1619_TF_faster-rcnn.jpg similarity index 100% rename from doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_TF_faster-rcnn.jpg rename to doc/perception/experiments/object-detection-model_evaluation/asset-copies/1619_TF_faster-rcnn.jpg diff --git a/doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolo_nas_l.jpg b/doc/perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolo_nas_l.jpg similarity index 100% rename from doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolo_nas_l.jpg rename to doc/perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolo_nas_l.jpg diff --git a/doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolo_rtdetr_x.jpg b/doc/perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolo_rtdetr_x.jpg similarity index 100% rename from doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolo_rtdetr_x.jpg rename to doc/perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolo_rtdetr_x.jpg diff --git a/doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolov8x.jpg b/doc/perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolov8x.jpg similarity index 100% rename from doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolov8x.jpg rename to doc/perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolov8x.jpg diff --git a/doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolov8x_seg.jpg b/doc/perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolov8x_seg.jpg similarity index 100% rename from doc/06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolov8x_seg.jpg rename to doc/perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolov8x_seg.jpg diff --git a/doc/perception/experiments/object-detection-model_evaluation/globals.py b/doc/perception/experiments/object-detection-model_evaluation/globals.py new file mode 100644 index 00000000..325107dc --- /dev/null +++ b/doc/perception/experiments/object-detection-model_evaluation/globals.py @@ -0,0 +1,12 @@ +IMAGE_BASE_FOLDER = "/home/maxi/paf/code/output/12-dev/rgb/center" + +IMAGES_FOR_TEST = { + "start": "1600.png", + "intersection": "1619.png", + "traffic_light": "1626.png", + "traffic": "1660.png", + "bicycle_far": "1663.png", + "bicycle_close": "1668.png", + "construction_sign_far": "2658.png", + "construction_sign_close": "2769.png", +} diff --git a/doc/06_perception/experiments/object-detection-model_evaluation/pt.py b/doc/perception/experiments/object-detection-model_evaluation/pt.py similarity index 59% rename from doc/06_perception/experiments/object-detection-model_evaluation/pt.py rename to doc/perception/experiments/object-detection-model_evaluation/pt.py index 145fbcfe..6a46aa97 100644 --- a/doc/06_perception/experiments/object-detection-model_evaluation/pt.py +++ b/doc/perception/experiments/object-detection-model_evaluation/pt.py @@ -1,16 +1,16 @@ -''' +""" Docs: https://pytorch.org/vision/stable/models.html#object-detection -''' +""" import os from time import perf_counter import torch import torchvision -from torchvision.models.detection.faster_rcnn import \ - FasterRCNN_MobileNet_V3_Large_320_FPN_Weights, \ - FasterRCNN_ResNet50_FPN_V2_Weights -from torchvision.models.detection.retinanet import \ - RetinaNet_ResNet50_FPN_V2_Weights +from torchvision.models.detection.faster_rcnn import ( + FasterRCNN_MobileNet_V3_Large_320_FPN_Weights, + FasterRCNN_ResNet50_FPN_V2_Weights, +) +from torchvision.models.detection.retinanet import RetinaNet_ResNet50_FPN_V2_Weights from globals import IMAGE_BASE_FOLDER, IMAGES_FOR_TEST from torchvision.utils import draw_bounding_boxes from pathlib import Path @@ -20,28 +20,27 @@ from torchvision.transforms.functional import to_pil_image ALL_MODELS = { - 'fasterrcnn_mobilenet_v3_large_320_fpn': - FasterRCNN_MobileNet_V3_Large_320_FPN_Weights, - 'fasterrcnn_resnet50_fpn_v2': FasterRCNN_ResNet50_FPN_V2_Weights, - 'retinanet_resnet50_fpn_v2': RetinaNet_ResNet50_FPN_V2_Weights, + "frcnn_mobilenet_v3_large_320_fpn": FasterRCNN_MobileNet_V3_Large_320_FPN_Weights, + "frcnn_resnet50_fpn_v2": FasterRCNN_ResNet50_FPN_V2_Weights, + "retinanet_resnet50_fpn_v2": RetinaNet_ResNet50_FPN_V2_Weights, } def load_model(model_name): - print('Selected model: ' + model_name) - print('Loading model...', end='') + print("Selected model: " + model_name) + print("Loading model...", end="") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") weights = ALL_MODELS[model_name].DEFAULT - model = torchvision.models.detection.__dict__[model_name]( - weights=weights - ).to(device) + model = torchvision.models.detection.__dict__[model_name](weights=weights).to( + device + ) model.eval() return model, weights, device def load_image(image_path, model_weights, device): img = Image.open(image_path) - img = img.convert('RGB') + img = img.convert("RGB") img = transforms.Compose([transforms.PILToTensor()])(img) img = model_weights.transforms()(img) img = img.unsqueeze_(0) @@ -60,11 +59,11 @@ def load_image(image_path, model_weights, device): image_np = load_image(image_path, weights, device) if first_gen: - print('Running warmup inference...') + print("Running warmup inference...") model(image_np) first_gen = False - print(f'Running inference for {p}... ') + print(f"Running inference for {p}... ") start_time = perf_counter() @@ -79,21 +78,20 @@ def load_image(image_path, model_weights, device): label_id_offset = -1 - image_np_with_detections = torch.tensor(image_np * 255, - dtype=torch.uint8) - boxes = result['boxes'] - scores = result['scores'] - labels = [weights.meta["categories"][i] for i in result['labels']] + image_np_with_detections = torch.tensor(image_np * 255, dtype=torch.uint8) + boxes = result["boxes"] + scores = result["scores"] + labels = [weights.meta["categories"][i] for i in result["labels"]] - box = draw_bounding_boxes(image_np_with_detections[0], boxes, labels, - colors='red', width=2) + box = draw_bounding_boxes( + image_np_with_detections[0], boxes, labels, colors="red", width=2 + ) box_img = to_pil_image(box) file_name = Path(image_path).stem plt.figure(figsize=(32, 18)) - plt.title(f'PyTorch - {m} - {p} - {elapsed_time*1000:.0f}ms', - fontsize=30) + plt.title(f"PyTorch - {m} - {p} - {elapsed_time*1000:.0f}ms", fontsize=30) plt.imshow(box_img) - plt.savefig(f'{IMAGE_BASE_FOLDER}/result/{file_name}_PT_{m}.jpg') + plt.savefig(f"{IMAGE_BASE_FOLDER}/result/{file_name}_PT_{m}.jpg") plt.close() diff --git a/doc/06_perception/experiments/object-detection-model_evaluation/pylot.py b/doc/perception/experiments/object-detection-model_evaluation/pylot.py similarity index 59% rename from doc/06_perception/experiments/object-detection-model_evaluation/pylot.py rename to doc/perception/experiments/object-detection-model_evaluation/pylot.py index d59e5e75..19e2a3b1 100644 --- a/doc/06_perception/experiments/object-detection-model_evaluation/pylot.py +++ b/doc/perception/experiments/object-detection-model_evaluation/pylot.py @@ -1,7 +1,7 @@ -''' +""" Docs: https://www.tensorflow.org/hub/tutorials/tf2_object_detection, https://pylot.readthedocs.io/en/latest/perception.detection.html -''' +""" from globals import IMAGE_BASE_FOLDER, IMAGES_FOR_TEST @@ -20,55 +20,57 @@ from object_detection.utils import visualization_utils as viz_utils -matplotlib.use('TkAgg') +matplotlib.use("TkAgg") -tf.get_logger().setLevel('ERROR') +tf.get_logger().setLevel("ERROR") ALL_MODELS = [ - 'faster-rcnn', - 'ssdlite-mobilenet-v2', - 'ssd-mobilenet-fpn-640', - 'ssd-mobilenet-v1', - 'ssd-mobilenet-v1-fpn' + "faster-rcnn", + "ssdlite-mobilenet-v2", + "ssd-mobilenet-fpn-640", + "ssd-mobilenet-v1", + "ssd-mobilenet-v1-fpn", ] -MODEL_BASE_FOLDER = '/home/maxi/Downloads/models/obstacle_detection' +MODEL_BASE_FOLDER = "/home/maxi/Downloads/models/obstacle_detection" -LABEL_FILE = '/home/maxi/Downloads/pylot.names' +LABEL_FILE = "/home/maxi/Downloads/pylot.names" def load_image_into_numpy_array(path): - image_data = tf.io.gfile.GFile(path, 'rb').read() + image_data = tf.io.gfile.GFile(path, "rb").read() image = Image.open(BytesIO(image_data)) (im_width, im_height) = image.size - return np.array(image.convert('RGB').getdata()).reshape( - (1, im_height, im_width, 3)).astype(np.uint8) + return ( + np.array(image.convert("RGB").getdata()) + .reshape((1, im_height, im_width, 3)) + .astype(np.uint8) + ) def load_model(model_name): model_handle = os.path.join(MODEL_BASE_FOLDER, model_name) - print('Selected model: ' + model_name) + print("Selected model: " + model_name) - print('Loading model...', end='') + print("Loading model...", end="") hub_model = hub.load(model_handle) - print(' done!') + print(" done!") return hub_model def get_category_index(label_file): - with open(label_file, 'r') as f: + with open(label_file, "r") as f: labels = f.readlines() labels = [label.strip() for label in labels] - category_index = \ - {i: {'id': i, 'name': name} for i, name in enumerate(labels)} + category_index = {i: {"id": i, "name": name} for i, name in enumerate(labels)} return category_index -if not os.path.exists(f'{IMAGE_BASE_FOLDER}/result'): - os.makedirs(f'{IMAGE_BASE_FOLDER}/result') +if not os.path.exists(f"{IMAGE_BASE_FOLDER}/result"): + os.makedirs(f"{IMAGE_BASE_FOLDER}/result") category_index = get_category_index(LABEL_FILE) @@ -82,16 +84,16 @@ def get_category_index(label_file): image_tensor = tf.convert_to_tensor(image_np) if first_gen: - print('Running warmup inference...') - model.signatures['serving_default'](image_tensor) + print("Running warmup inference...") + model.signatures["serving_default"](image_tensor) first_gen = False - print(f'Running inference for {p}... ') + print(f"Running inference for {p}... ") start_time = perf_counter() # running inference - results = model.signatures['serving_default'](image_tensor) + results = model.signatures["serving_default"](image_tensor) elapsed_time = perf_counter() - start_time @@ -104,20 +106,20 @@ def get_category_index(label_file): viz_utils.visualize_boxes_and_labels_on_image_array( image_np_with_detections[0], - result['boxes'][0], - (result['classes'][0] + label_id_offset).astype(int), - result['scores'][0], + result["boxes"][0], + (result["classes"][0] + label_id_offset).astype(int), + result["scores"][0], category_index, use_normalized_coordinates=True, max_boxes_to_draw=200, - min_score_thresh=.10, - agnostic_mode=False) + min_score_thresh=0.10, + agnostic_mode=False, + ) file_name = Path(image_path).stem plt.figure(figsize=(32, 18)) - plt.title(f'Pylot (TF) - {m} - {p} - {elapsed_time*1000:.0f}ms', - fontsize=30) + plt.title(f"Pylot (TF) - {m} - {p} - {elapsed_time*1000:.0f}ms", fontsize=30) plt.imshow(image_np_with_detections[0]) - plt.savefig(f'{IMAGE_BASE_FOLDER}/result/{file_name}_TF_{m}.jpg') + plt.savefig(f"{IMAGE_BASE_FOLDER}/result/{file_name}_TF_{m}.jpg") plt.close() diff --git a/doc/06_perception/experiments/object-detection-model_evaluation/requirements.txt b/doc/perception/experiments/object-detection-model_evaluation/requirements.txt similarity index 100% rename from doc/06_perception/experiments/object-detection-model_evaluation/requirements.txt rename to doc/perception/experiments/object-detection-model_evaluation/requirements.txt diff --git a/doc/06_perception/experiments/object-detection-model_evaluation/yolo.py b/doc/perception/experiments/object-detection-model_evaluation/yolo.py similarity index 53% rename from doc/06_perception/experiments/object-detection-model_evaluation/yolo.py rename to doc/perception/experiments/object-detection-model_evaluation/yolo.py index 39d727b7..b4949622 100644 --- a/doc/06_perception/experiments/object-detection-model_evaluation/yolo.py +++ b/doc/perception/experiments/object-detection-model_evaluation/yolo.py @@ -1,9 +1,9 @@ -''' +""" Docs: https://docs.ultralytics.com/modes/predict/ https://docs.ultralytics.com/tasks/detect/#models https://docs.ultralytics.com/models/yolo-nas -''' +""" import os from globals import IMAGE_BASE_FOLDER, IMAGES_FOR_TEST @@ -12,33 +12,34 @@ import torch ALL_MODELS = { - 'yolov8n': YOLO, - 'yolov8s': YOLO, - 'yolov8m': YOLO, - 'yolov8l': YOLO, - 'yolov8x': YOLO, - 'yolo_nas_l': NAS, - 'yolo_nas_m': NAS, - 'yolo_nas_s': NAS, - 'rtdetr-l': RTDETR, - 'rtdetr-x': RTDETR, - 'yolov8x-seg': YOLO, - 'sam-l': SAM, - 'FastSAM-x': FastSAM, + "yolov8n": YOLO, + "yolov8s": YOLO, + "yolov8m": YOLO, + "yolov8l": YOLO, + "yolov8x": YOLO, + "yolo_nas_l": NAS, + "yolo_nas_m": NAS, + "yolo_nas_s": NAS, + "rtdetr-l": RTDETR, + "rtdetr-x": RTDETR, + "yolov8x-seg": YOLO, + "sam-l": SAM, + "FastSAM-x": FastSAM, } with torch.inference_mode(): for m, wrapper in ALL_MODELS.items(): - print('Selected model: ' + m) - model_path = os.path.join('yolo', m + '.pt') + print("Selected model: " + m) + model_path = os.path.join("yolo", m + ".pt") model = wrapper(model_path) for p in IMAGES_FOR_TEST: image_path = os.path.join(IMAGE_BASE_FOLDER, IMAGES_FOR_TEST[p]) img = Image.open(image_path) - _ = model.predict(source=img, save=True, save_conf=True, - line_width=1, half=True) + _ = model.predict( + source=img, save=True, save_conf=True, line_width=1, half=True + ) del model diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/README.md b/doc/perception/experiments/traffic-light-detection_evaluation/README.md similarity index 100% rename from doc/06_perception/experiments/traffic-light-detection_evaluation/README.md rename to doc/perception/experiments/traffic-light-detection_evaluation/README.md diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/back_1.png b/doc/perception/experiments/traffic-light-detection_evaluation/assets/back_1.png similarity index 100% rename from doc/06_perception/experiments/traffic-light-detection_evaluation/assets/back_1.png rename to doc/perception/experiments/traffic-light-detection_evaluation/assets/back_1.png diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/back_14.jpg b/doc/perception/experiments/traffic-light-detection_evaluation/assets/back_14.jpg similarity index 100% rename from doc/06_perception/experiments/traffic-light-detection_evaluation/assets/back_14.jpg rename to doc/perception/experiments/traffic-light-detection_evaluation/assets/back_14.jpg diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/green_22.jpg b/doc/perception/experiments/traffic-light-detection_evaluation/assets/green_22.jpg similarity index 100% rename from doc/06_perception/experiments/traffic-light-detection_evaluation/assets/green_22.jpg rename to doc/perception/experiments/traffic-light-detection_evaluation/assets/green_22.jpg diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/green_4.png b/doc/perception/experiments/traffic-light-detection_evaluation/assets/green_4.png similarity index 100% rename from doc/06_perception/experiments/traffic-light-detection_evaluation/assets/green_4.png rename to doc/perception/experiments/traffic-light-detection_evaluation/assets/green_4.png diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/red_10.png b/doc/perception/experiments/traffic-light-detection_evaluation/assets/red_10.png similarity index 100% rename from doc/06_perception/experiments/traffic-light-detection_evaluation/assets/red_10.png rename to doc/perception/experiments/traffic-light-detection_evaluation/assets/red_10.png diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/red_20.png b/doc/perception/experiments/traffic-light-detection_evaluation/assets/red_20.png similarity index 100% rename from doc/06_perception/experiments/traffic-light-detection_evaluation/assets/red_20.png rename to doc/perception/experiments/traffic-light-detection_evaluation/assets/red_20.png diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/yellow_1.png b/doc/perception/experiments/traffic-light-detection_evaluation/assets/yellow_1.png similarity index 100% rename from doc/06_perception/experiments/traffic-light-detection_evaluation/assets/yellow_1.png rename to doc/perception/experiments/traffic-light-detection_evaluation/assets/yellow_1.png diff --git a/doc/06_perception/experiments/traffic-light-detection_evaluation/assets/yellow_18.jpg b/doc/perception/experiments/traffic-light-detection_evaluation/assets/yellow_18.jpg similarity index 100% rename from doc/06_perception/experiments/traffic-light-detection_evaluation/assets/yellow_18.jpg rename to doc/perception/experiments/traffic-light-detection_evaluation/assets/yellow_18.jpg diff --git a/doc/06_perception/08_kalman_filter.md b/doc/perception/kalman_filter.md similarity index 95% rename from doc/06_perception/08_kalman_filter.md rename to doc/perception/kalman_filter.md index d79833f6..e7c676e1 100644 --- a/doc/06_perception/08_kalman_filter.md +++ b/doc/perception/kalman_filter.md @@ -8,24 +8,7 @@ As of now it is working with a 2D x-y-Transition model, which is why the current This implements the STANDARD Kalman Filter and NOT the Extended Kalman Filter or any other non-linear variant of the Kalman Filter. ---- - -## Author - -Robert Fischer - -## Date - -29.03.2024 - -## Prerequisite - ---- - - [Kalman Filter](#kalman-filter) - - [Author](#author) - - [Date](#date) - - [Prerequisite](#prerequisite) - [Getting started](#getting-started) - [Description](#description) - [1. Predict](#1-predict) @@ -35,9 +18,6 @@ Robert Fischer - [Inputs](#inputs) - [Outputs](#outputs) - [Performance](#performance) - - ---- ## Getting started @@ -48,7 +28,7 @@ to **"Kalman"**, depending on if you want to use the Filter for both the Positio In the case of using the Filter for both, it should look like this: -![Kalman Filter for both parameters](../../doc/00_assets/perception/kalman_installation_guide.png) +![Kalman Filter for both parameters](../../doc/assets/perception/kalman_installation_guide.png) No further installation needed. @@ -250,9 +230,9 @@ Smaller boxes mean the data is closer together and less spread. The Kalman Filter was tuned to create the smallest MSE possible, which gives more weight to larger errors which we want to minimise. The MAE on the other hand shows a 1:1 representation in terms of distance from the ideal to the predicted location. -![MSE Boxed Graph of Location Error with respect to ideal Location](../../doc/00_assets/perception/data_26_MSE_Boxed.png) +![MSE Boxed Graph of Location Error with respect to ideal Location](../../doc/assets/perception/data_26_MSE_Boxed.png) -![MAE Boxed Graph of Location Error with respect to ideal Location](../../doc/00_assets/perception/data_26_MAE_Boxed.png) +![MAE Boxed Graph of Location Error with respect to ideal Location](../../doc/assets/perception/data_26_MAE_Boxed.png) As you see this data you might think the unfiltered data seems to be just as good if not even better than the previous rolling average filter (RAF). diff --git a/doc/06_perception/03_lidar_distance_utility.md b/doc/perception/lidar_distance_utility.md similarity index 90% rename from doc/06_perception/03_lidar_distance_utility.md rename to doc/perception/lidar_distance_utility.md index 2d68e6f1..4a93449d 100644 --- a/doc/06_perception/03_lidar_distance_utility.md +++ b/doc/perception/lidar_distance_utility.md @@ -12,24 +12,9 @@ Additionally, it publishes a [Range](http://docs.ros.org/en/melodic/api/sensor_m containing the closest and the farest point. This can then be used to detect the distance to the closest object in front of us. ---- - -## Author - -Tim Dreier - -## Date - -16.03.2023 - ---- - -* [Lidar Distance Utility](#lidar-distance-utility) - * [Author](#author) - * [Date](#date) - * [Configuration](#configuration) - * [Example](#example) - +- [Lidar Distance Utility](#lidar-distance-utility) + - [Configuration](#configuration) + - [Example](#example) ## Configuration @@ -54,7 +39,7 @@ starting from 20cm above the ground you have to set min_z = -1.5. The meaning of the x and y values is described by the following image: -![lidar filter](../00_assets/lidar_filter.png) +![lidar filter](../assets/lidar_filter.png) ### Example diff --git a/doc/06_perception/07_position_heading_filter_debug_node.md b/doc/perception/position_heading_filter_debug_node.md similarity index 89% rename from doc/06_perception/07_position_heading_filter_debug_node.md rename to doc/perception/position_heading_filter_debug_node.md index fd1da95c..14e56443 100644 --- a/doc/06_perception/07_position_heading_filter_debug_node.md +++ b/doc/perception/position_heading_filter_debug_node.md @@ -3,7 +3,7 @@ **Summary:** [position_heading_filter_debug_node.py](../../code/perception/src/position_heading_filter_debug_node.py): The position_heading_filter_debug_node node is responsible for collecting sensor data from the IMU and GNSS and process the data in such a way, that it shows the errors between the real is-state and the measured state. -The data can be looked at in rqt_plots or (better) in mathplotlib plots pre-made by the [viz.py](../../code/perception/src/00_Experiments/Position_Heading_Datasets/viz.py) file. +The data can be looked at in rqt_plots or (better) in mathplotlib plots pre-made by the [viz.py](../../code/perception/src/experiments/Position_Heading_Datasets/viz.py) file. !!THIS NODE USES THE CARLA API!! @@ -11,32 +11,12 @@ Using the Carla API could disqualify us from the leaderboard when submitting ont Uncomment (maybe even remove) this file when submitting to the official leaderboard. This file is only for debugging! ---- - -## Author - -Robert Fischer - -## Date - -31.03.2024 - -## Prerequisite - ---- - - [position\_heading\_filter\_debug\_node.py](#position_heading_filter_debug_nodepy) - - [Author](#author) - - [Date](#date) - - [Prerequisite](#prerequisite) - [Getting started](#getting-started) - [Description](#description) - [Inputs](#inputs) - [Outputs](#outputs) - [Visualization](#visualization) - - ---- ## Getting started @@ -48,12 +28,12 @@ If you are trying to implement a new position/ heading filter and want to tune i 1. Create a new Filter Node class (if not already done) AND publish a paf/hero/filter_name_pos AND/OR filter_name_heading 2. Change the topic of the test_filter_subscribers to your topic (currently kalman) -![Subscriber Change](/doc/00_assets/perception/sensor_debug_change.png) +![Subscriber Change](/doc/assets/perception/sensor_debug_change.png) If you want to save the debug in csv files for better debugging you should uncomment that part in the main loop of the file: -![Save Files as CSV](/doc/00_assets/perception/sensor_debug_data_saving.png) +![Save Files as CSV](/doc/assets/perception/sensor_debug_data_saving.png) --- @@ -61,18 +41,18 @@ that part in the main loop of the file: Running the node provides you with ideal position and heading topics that can be used to debug your sensor filters by giving you ideal values you should aim for. -It also provides you with helpful data saving methods for plotting your data (with regards to ideal values) by using the [viz.py](../../code/perception/src/00_Experiments/Position_Heading_Datasets/viz.py) file, which is a lot more customizable and nicer to use than rqt plots. +It also provides you with helpful data saving methods for plotting your data (with regards to ideal values) by using the [viz.py](../../code/perception/src/experiments/Position_Heading_Datasets/viz.py) file, which is a lot more customizable and nicer to use than rqt plots. If you want to know more about how to use that, you can go on to [Visualization](#visualization) An Example of rqt plot Output can be seen here: -![Distance from current_pos to ideal_gps_pos (blue) and to carla_pos (red)](../00_assets/gnss_ohne_rolling_average.png) +![Distance from current_pos to ideal_gps_pos (blue) and to carla_pos (red)](../assets/gnss_ohne_rolling_average.png) The file is using a main loop with a fixed refresh rate, that can be changed in the perception launch file. In this loop it does the following things: 1. Refresh the Ideal Position and Heading (using the Carla API) 2. Update & Publish the Position & Heading Debug Values (see [Outputs](#outputs) for more info) -3. Save the debug data in CSV files in the corresponding folder in code/perception/00_Experiments +3. Save the debug data in CSV files in the corresponding folder in code/perception/experiments (can be outcommented if only working with rqt graphs is enough for you) @@ -180,11 +160,11 @@ It can be used to debug X data, Y data and Heading (h) data. To be able to save data in csv files you just need to uncomment the saving methods in the main loop as stated in the [Getting Started](#getting-started) chapter. -To use the [viz.py](../../code/perception/src/00_Experiments/Position_Heading_Datasets/viz.py) file you will have to: +To use the [viz.py](../../code/perception/src/experiments/Position_Heading_Datasets/viz.py) file you will have to: -1. Configure the main method to your likings inside the viz.py: ![picture](/doc/00_assets/perception/sensor_debug_viz_config.png) -2. Open up the b5 shell typing ```b5 shell``` into the terminal -3. Navigate to the code/perception/src/00_Experiments/Position_Heading folder using ```cd``` +1. Configure the main method to your likings inside the viz.py: ![picture](/doc/assets/perception/sensor_debug_viz_config.png) +2. Open up an attached shell +3. Navigate to the code/perception/src/experiments/Position_Heading folder using ```cd``` 4. run the viz.py using ```python viz.py``` With this file you can plot: diff --git a/doc/06_perception/09_position_heading_publisher_node.md b/doc/perception/position_heading_publisher_node.md similarity index 87% rename from doc/06_perception/09_position_heading_publisher_node.md rename to doc/perception/position_heading_publisher_node.md index 30064a4f..d833eef4 100644 --- a/doc/06_perception/09_position_heading_publisher_node.md +++ b/doc/perception/position_heading_publisher_node.md @@ -2,22 +2,7 @@ **Summary:** This node publishes the `current_pos` (Location of the car) and `current_heading` (Orientation of the car around the Z- axis) for every Node that needs to work with that. It also publishes all unfiltered Position and Heading signals for the Filter nodes to work with (such as Kalman). ---- - -## Author - -Robert Fischer - -## Date - -14.01.2024 - -## Prerequisite - - [position\_heading\_publisher\_node](#position_heading_publisher_node) - - [Author](#author) - - [Date](#date) - - [Prerequisite](#prerequisite) - [Usage](#usage) - [Modular Extension / Template](#modular-extension--template) - [Heading Functions](#heading-functions) @@ -53,7 +38,7 @@ You can use filters for the heading and for the location independently using the In case of using the Kalman Filter for both, it should look like this: -![Kalman Filter for both parameters](../../doc/00_assets/perception/kalman_installation_guide.png) +![Kalman Filter for both parameters](../../doc/assets/perception/kalman_installation_guide.png) _If you want to create a new Filter in the future, I suggest keeping this template intact. See next Chapter_ 😊 @@ -68,19 +53,19 @@ For example: _Implementing a new non-linear Kalman Filter could look like this_: - _perception.launch file_: -![Non Linear Kalman Filter Example](../../doc/00_assets/perception/non_linear_kalman_example.png) +![Non Linear Kalman Filter Example](../../doc/assets/perception/non_linear_kalman_example.png) - _Subscribers_: -![Non Linear Kalman Filter Example 2](../../doc/00_assets/perception/modular_subscriber_example.png) +![Non Linear Kalman Filter Example 2](../../doc/assets/perception/modular_subscriber_example.png) - _Heading Methods_: -![Non Linear Kalman Filter Example](../../doc/00_assets/perception/adding_new_position_methods.png) +![Non Linear Kalman Filter Example](../../doc/assets/perception/adding_new_position_methods.png) - _Position Methods_: -![Non Linear Kalman Filter Example](../../doc/00_assets/perception/new_heading_pub_example.png) +![Non Linear Kalman Filter Example](../../doc/assets/perception/new_heading_pub_example.png) As you can see, this file is merely for gathering and forwarding the filter values in the form of currentPos and currentHeading. @@ -100,7 +85,7 @@ If `none` is selected for the Filter, it publishes the data as the `current_pos` This method is called when new heading data is received. It handles all necessary updates and publishes the heading as a double value, indicating the cars rotation around the z-axis in rad. -For more info about how the heading is calculated see [here](./00_coordinate_transformation.md). +For more info about how the heading is calculated see [here](./coordinate_transformation.md). ### Position Functions diff --git a/doc/06_perception/11_traffic_light_detection.md b/doc/perception/traffic_light_detection.md similarity index 88% rename from doc/06_perception/11_traffic_light_detection.md rename to doc/perception/traffic_light_detection.md index 74ed555c..81340251 100644 --- a/doc/06_perception/11_traffic_light_detection.md +++ b/doc/perception/traffic_light_detection.md @@ -1,5 +1,18 @@ # Traffic Light Detection +**Summary:** This page explains how traffic lights are detected and interpreted. + +- [Traffic Light Detection](#traffic-light-detection) + - [Vision Node](#vision-node) + - [TrafficLightNode](#trafficlightnode) + - [Attributes](#attributes) + - [Methods](#methods) + - [Functions](#functions) + - [Usage](#usage) + - [Filtering of images](#filtering-of-images) + - [1. Vision Node](#1-vision-node) + - [2. Traffic Light Node](#2-traffic-light-node) + ## Vision Node For each analyzed image, it is checked whether an object with the ID=9 (traffic light) is detected. diff --git a/doc/06_perception/06_vision_node.md b/doc/perception/vision_node.md similarity index 68% rename from doc/06_perception/06_vision_node.md rename to doc/perception/vision_node.md index 951350ac..c73d7688 100644 --- a/doc/06_perception/06_vision_node.md +++ b/doc/perception/vision_node.md @@ -1,8 +1,15 @@ # Vision Node -The Visison Node provides an adaptive interface that is able to perform object-detection and/or image-segmentation on multiple cameras at the same time. +**Summary:** The Visison Node provides an adaptive interface that is able to perform object-detection and/or image-segmentation on multiple cameras at the same time. It can also subscribe to the lidar_distance publisher and calculate distances of objects inside the detected bounding boxes. +- [Vision Node](#vision-node) + - [Model overview](#model-overview) + - [How it works](#how-it-works) + - [1. Object-Detection](#1-object-detection) + - [2. Distance-Calculation](#2-distance-calculation) + - [3. Publishing of Outputs](#3-publishing-of-outputs) + ## Model overview The Vision-Node implements an interface for a lot of different models which can be specified in the perception launch file. @@ -76,43 +83,43 @@ The object-detection can be run both ultralytics and pyTorch models. Depending o The object-detection can publish images to RViz under their specified camera angle and topic. -![Object-Detection](../06_perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolov8x_seg.jpg) +![Object-Detection](../perception/experiments/object-detection-model_evaluation/asset-copies/1619_yolov8x_seg.jpg) -Please refer to the [model evaluation](../06_perception/experiments/object-detection-model_evaluation/README.md) for more detailed information about the performance of each model. +Please refer to the [model evaluation](../perception/experiments/object-detection-model_evaluation/README.md) for more detailed information about the performance of each model. **Center Camera** -![Center Camera](../00_assets/Front_Detection.png) +![Center Camera](../assets/Front_Detection.png) **Back Camera** -![Back Camera](../00_assets/Back_Detection.png) +![Back Camera](../assets/Back_Detection.png) **Left Camera** -![Left Camera](../00_assets/Left_Detection.png) +![Left Camera](../assets/Left_Detection.png) **Right Camera** -![Right Camera](../00_assets/Right_Detection.png) +![Right Camera](../assets/Right_Detection.png) ## 2. Distance-Calculation -The Vision-Node reveives depth-images from the [lidar distance node](10_distance_to_objects.md) for the specified camera angle. It can than find the min x and min abs y distance within each bounding box that has been predicted by a model. This feature is implemented only for utralytics models. +The Vision-Node reveives depth-images from the [lidar distance node](distance_to_objects.md) for the specified camera angle. It can than find the min x and min abs y distance within each bounding box that has been predicted by a model. This feature is implemented only for utralytics models. The depth images have the same dimension as the camera image and contain x, y and z coordinates of the lidar coordinates system in the three RGB-Channels. -![Depth Image](../00_assets/2_15_layover.png) +![Depth Image](../assets/2_15_layover.png) -Read more about the calculation of Depth Image [here](10_distance_to_objects.md) +Read more about the calculation of Depth Image [here](distance_to_objects.md) ## 3. Publishing of Outputs -In order to provide valuble information for the [planning](../07_planning/README.md), the Vision-Node collects a set of information for each object and publishes a list of objects on the "distance_of_objects" Topic. +In order to provide valuble information for the [planning](../planning/README.md), the Vision-Node collects a set of information for each object and publishes a list of objects on the "distance_of_objects" Topic. - Class_Index - Min_X - Min_Abs_Y When no Lidar-Points are found inside a bounding box, the distances will both be set to np.inf. -Check also [here](10_distance_to_objects.md) to learn more about this list. +Check also [here](distance_to_objects.md) to learn more about this list. In order to provide good visual feedback of what is calculated in the Vision-Node, each camera angle publishes images with bounding boxes and the corresponding distance values found for the object. -![Distance of objects](../00_assets/distance_visualization.png) +![Distance of objects](../assets/distance_visualization.png) diff --git a/doc/07_planning/ACC.md b/doc/planning/ACC.md similarity index 68% rename from doc/07_planning/ACC.md rename to doc/planning/ACC.md index 012e45c6..95621f09 100644 --- a/doc/07_planning/ACC.md +++ b/doc/planning/ACC.md @@ -1,12 +1,12 @@ # ACC (Adaptive Cruise Control) -## About +**Summary:** The ACC module is a ROS node responsible for adaptive speed control in an autonomous vehicle. It receives information about possible collisions, the current speed, the trajectory, and the speed limits. Based on this information, it calculates the desired speed and publishes it. -The ACC module is a ROS node responsible for adaptive speed control in an autonomous vehicle. It receives information about possible collisions, the current speed, the trajectory, and the speed limits. Based on this information, it calculates the desired speed and publishes it. - -## Components - -This module doesn't contain more components. +- [ACC (Adaptive Cruise Control)](#acc-adaptive-cruise-control) + - [ROS Data Interface](#ros-data-interface) + - [Published Topics](#published-topics) + - [Subscribed Topics](#subscribed-topics) + - [Node Creation + Running Tests](#node-creation--running-tests) ## ROS Data Interface diff --git a/doc/07_planning/Behavior_tree.md b/doc/planning/Behavior_tree.md similarity index 93% rename from doc/07_planning/Behavior_tree.md rename to doc/planning/Behavior_tree.md index d3db2ca6..aae559df 100644 --- a/doc/07_planning/Behavior_tree.md +++ b/doc/planning/Behavior_tree.md @@ -4,24 +4,7 @@ **Disclaimer**: As we mainly built our decision tree on the previous projects [psaf2](https://github.com/ll7/psaf2) and [paf22](https://github.com/ll7/paf22) , most part of the documentation was added here and adjusted to the changes we made. ---- - -## Author - -Julius Miller - -## Date - -01.04.2024 - -## Prerequisite - ---- - - [Behavior Tree](#behavior-tree) - - [Author](#author) - - [Date](#date) - - [Prerequisite](#prerequisite) - [About](#about) - [Our behaviour tree](#our-behaviour-tree) - [Behavior](#behavior) @@ -44,11 +27,10 @@ Julius Miller - [`initialise()`](#initialise) - [`update()`](#update) - [`terminate()`](#terminate) - ## About -This Package implements a behaviour agent for our autonomous car using **Behaviour Trees**. It uses the [py_trees](./01_py_trees.md) Framework, that works well with ROS. +This Package implements a behaviour agent for our autonomous car using **Behaviour Trees**. It uses the [py_trees](./py_trees.md) Framework, that works well with ROS. For visualization at runtime you might want to also install this [rqt-Plugin](https://wiki.ros.org/rqt_py_trees). ## Our behaviour tree @@ -56,7 +38,7 @@ For visualization at runtime you might want to also install this [rqt-Plugin](ht The following section describes the behaviour tree we use for normal driving using all functionality provided by the agent. In the actual implementation this is part of a bigger tree, that handles things like writing topics to the blackboard, starting and finishing the decision tree. The following tree is a simplification. -![Simple Tree](../00_assets/planning/simple_final_tree.png) +![Simple Tree](../assets/planning/simple_final_tree.png) ### Behavior @@ -86,9 +68,9 @@ Represents a specific task/scenario which is handled by the decision tree. #### Legend -![BT Legend](../00_assets/legend_bt.png) +![BT Legend](../assets/legend_bt.png) -![BT Intersection](../00_assets/intersection.png) +![BT Intersection](../assets/intersection.png) If there is an intersection coming up, the agent executes the following sequence of behaviours: diff --git a/doc/07_planning/Collision_Check.md b/doc/planning/Collision_Check.md similarity index 68% rename from doc/07_planning/Collision_Check.md rename to doc/planning/Collision_Check.md index 67382dc5..d3e88572 100644 --- a/doc/07_planning/Collision_Check.md +++ b/doc/planning/Collision_Check.md @@ -1,9 +1,14 @@ # Collision Check -## Overview - -This module is responsible for detecting collisions and reporting them. It subscribes to topics that provide information about the current speed of the vehicle and the distances to objects detected by a LIDAR sensor. -It publishes topics that provide information about emergency stops, the distance to collisions, the distance to oncoming traffic, and the approximated speed of the obstacle in front +**Summary:** This module is responsible for detecting collisions and reporting them. It subscribes to topics that provide information about the current speed of the vehicle and the distances to objects detected by a LIDAR sensor. +It publishes topics that provide information about emergency stops, the distance to collisions, the distance to oncoming traffic, and the approximated speed of the obstacle in front. + +- [Collision Check](#collision-check) + - [Component](#component) + - [ROS Data Interface](#ros-data-interface) + - [Published Topics](#published-topics) + - [Subscribed Topics](#subscribed-topics) + - [Node Creation + Running Tests](#node-creation--running-tests) ## Component diff --git a/doc/07_planning/Global_Planner.md b/doc/planning/Global_Planner.md similarity index 97% rename from doc/07_planning/Global_Planner.md rename to doc/planning/Global_Planner.md index 10ef74d6..2d3e4ea9 100644 --- a/doc/07_planning/Global_Planner.md +++ b/doc/planning/Global_Planner.md @@ -7,34 +7,14 @@ After finishing that this node initiates the calculation of a trajectory based o from preplanning_trajectory.py. In the end the computed trajectory and prevailing speed limits are published to the other components of this project (acting, decision making,...). ---- - -## Author - -Samuel Kühnel - -## Date - -29.03.2024 - -## Note - This component and so most of the documentation was taken from the previous project PAF22 (Authors: Simon Erlbacher, Niklas Vogel) ---- - - [Global Planner](#global-planner) - - [Author](#author) - - [Date](#date) - - [Note](#note) - [Getting started](#getting-started) - [Description](#description) - [Inputs](#inputs) - [Outputs](#outputs) - [Testing](#testing) - - ---- ## Getting started diff --git a/doc/07_planning/Local_Planning.md b/doc/planning/Local_Planning.md similarity index 91% rename from doc/07_planning/Local_Planning.md rename to doc/planning/Local_Planning.md index 87480cba..22930f5d 100644 --- a/doc/07_planning/Local_Planning.md +++ b/doc/planning/Local_Planning.md @@ -2,24 +2,7 @@ **Summary:** This page contains the conceptual and theoretical explanations for the Local Planning component. For more technical documentation have a look at the other linked documentation files. ---- - -## Author - -Samuel Kühnel - -## Date - -29.03.2024 - -## Prerequisite - ---- - - [Local Planning](#local-planning) - - [Author](#author) - - [Date](#date) - - [Prerequisite](#prerequisite) - [Overview](#overview) - [Collision Check](#collision-check) - [Apply filters](#apply-filters) @@ -31,14 +14,14 @@ Samuel Kühnel - [Selecting the target velocity](#selecting-the-target-velocity) - [Moving the trajectory](#moving-the-trajectory) - [Sources](#sources) - + ## Overview The Local Planning component is responsible for evaluating short term decisions in the local environment of the ego vehicle. Some examples can be collision avoidance, reducing speed or emergency brakes. The Local Planning in this project is divided in three components. Collision Check, Adaptive Cruise Control (ACC) and Motion Planning. The architecture can be seen below: -![Planning_architecture.png](../00_assets/planning/Planning_architecture.png) +![Planning_architecture.png](../assets/planning/Planning_architecture.png) The theoretical concepts of each Local Planning component are explained below. @@ -46,14 +29,14 @@ The theoretical concepts of each Local Planning component are explained below. The Collision Check is the backbone of the Local Planning. Its task is to detect collisions with objects published by the vision node. The workflow when new objects are recieved looks like this: -![collision_check.png](../00_assets/planning/collision_check.png) +![collision_check.png](../assets/planning/collision_check.png) ### Apply filters The following input is recieved by the perception: $[class, min⁡(𝑎𝑏𝑠(𝑦)), min⁡(𝑥)]$ in a $(nx3)$-matrix Filtering steps: -![vision_objects_filter_cc.png](../00_assets/planning/vision_objects_filter_cc.png) +![vision_objects_filter_cc.png](../assets/planning/vision_objects_filter_cc.png) We filter for the following traffic objects: Pedestrians, bicycles, bikes, cars, busses and trucks. To filter oncoming traffic the $y$-distance is used as a deviation from the cars's middle axis (+ left, - right). @@ -109,7 +92,7 @@ The Motion Planning is the central control of the Local Planning. Controlling th ### Cornering Speed -![Corner Speed - Full Trajectory.png](../00_assets/planning/plot_full_trajectory_1_degree.png) +![Corner Speed - Full Trajectory.png](../assets/planning/plot_full_trajectory_1_degree.png) The cornering speed gets calculated at the beginning of the scenario, when the full trajectory is received: @@ -123,7 +106,7 @@ Lane changes are special, because you can drive the with normal speed eventhough The target velocity is a combination of the acc speed, the behavior speed and the cornering speed. Almost everytime the minimal speed is choosen. Exceptions are overtaking and the parking maneuver. -![Scenario](../00_assets/planning/three_scenarios.png) +![Scenario](../assets/planning/three_scenarios.png) In the first scenario on the left side the green ego vehicle chooses the acc speed to not cause a collision with the red car. In the second scenario the car is waiting at the intersection and chooses the behavior speed (wait at intersection), while the acc would say speedlimit. @@ -131,7 +114,7 @@ In the last scenario the car chooses the cornering speed to smoothly perform a 9 ### Moving the trajectory -![Overtake](../00_assets/planning/Overtake_car_trajectory.png) +![Overtake](../assets/planning/Overtake_car_trajectory.png) The trajectory gets moved a fixed amount of meters to the left if an overtake is triggered. @@ -146,7 +129,7 @@ rotation_adjusted = Rotation.from_euler('z', self.current_heading + After generating our target roatation we generate a offset vector with the number of meters to move our points as x-value. Then we rotate this vector and add it to the desired waypoint (see red vector in figure below) -![Vector math](../00_assets/planning/vector_calculation.png) +![Vector math](../assets/planning/vector_calculation.png) ```python offset = np.array([offset_meters, 0, 0]) diff --git a/doc/07_planning/Preplanning.md b/doc/planning/Preplanning.md similarity index 92% rename from doc/07_planning/Preplanning.md rename to doc/planning/Preplanning.md index 4ed557b9..4475c290 100644 --- a/doc/07_planning/Preplanning.md +++ b/doc/planning/Preplanning.md @@ -2,26 +2,7 @@ **Summary:** Preplanner holds the logic to create a trajectory out of an OpenDrive Map with the belonging road options ---- - -## Author - -Authors: Simon Erlbacher, Niklas Vogel - -## Date - -29.03.2023 - -## Note - -The Preplanning component was taken from the previous project PAF22. - ---- - - [Preplanning](#preplanning) - - [Author](#author) - - [Date](#date) - - [Note](#note) - [Getting started](#getting-started) - [Road option concept](#road-option-concept) - [Road information](#road-information) @@ -29,9 +10,6 @@ The Preplanning component was taken from the previous project PAF22. - [Road interpolation](#road-interpolation) - [How to use the implementation](#how-to-use-the-implementation) - [Sources](#sources) - - ---- ## Getting started @@ -42,7 +20,7 @@ No extra installation needed. The leaderboard provides target points and instructions. Every target point contains an appropriate instruction. -![img.png](../00_assets/road_option.png) +![img.png](../assets/road_option.png) We need to cover the following instructions for intersections: @@ -65,7 +43,7 @@ clipping of town 12. It visualizes the agent (red triangle) and the first target from the leaderboard. It also shows the final trajectory on the right side. The picture covers a "turn right" and a lane "change left". -![img.png](../00_assets/road_options_concept.png) +![img.png](../assets/road_options_concept.png) ## Road information @@ -85,7 +63,7 @@ only holds id values which have to be solved with the carla API. Also the name o That is why we would need to get the information for every traffic sign id from carla. This would crash with the leaderboard requirements. We are --not-- allowed to use ground truth information from the game engine. -![img.png](../00_assets/Road0_cutout.png) +![img.png](../assets/Road0_cutout.png) The picture shows the clipping of Road 0 from the leaderboard town 12. @@ -99,7 +77,7 @@ If the road would be part of a junction, there would be a id value greater than A junction manages colliding roads. Junctions only exist when roads intersect. -![img.png](../00_assets/junction.png) +![img.png](../assets/junction.png) The picture above shows an intersection of roads. All possible ways through this intersection have to be covered. The picture shows a clipping of town 12. @@ -107,7 +85,7 @@ The picture shows a clipping of town 12. To view a xodr file we used the following [viewer](https://odrviewer.io/). Very helpful tool to get a better understanding of an underlaying town and to debug the trajectory. -![img.png](../00_assets/intersection_2.png) +![img.png](../assets/intersection_2.png) The picture above shows an intersection. The agent is visualized with the red triangle and wants to drive through the intersection. He has three options, which are shown with the orange lines. The yellow point shows a target point @@ -121,12 +99,12 @@ follow to cross the intersection. Every road has a geometry information. This information is important to interpolate the road correctly. -![img.png](../00_assets/reference_xodr.png) +![img.png](../assets/reference_xodr.png) The picture shows the clipping of a road with a curve. The road contains "line" segments and "arc curvature" segments. We have to interpolate this segments in the provided order to reconstruct the reference line of a road. -![img.png](../00_assets/reference.png) +![img.png](../assets/reference.png) The picture above shows the reference line with its different segments. diff --git a/doc/07_planning/README.md b/doc/planning/README.md similarity index 76% rename from doc/07_planning/README.md rename to doc/planning/README.md index db33eda6..378d94bd 100644 --- a/doc/07_planning/README.md +++ b/doc/planning/README.md @@ -1,23 +1,5 @@ # Planning Wiki ---- - -## Structure - -Planning wiki contains different parts: - - - -- [Planning Wiki](#planning-wiki) - - [Structure](#structure) - - [Overview](#overview) - - [Preplanning](#preplanning) - - [Global plan](#global-plan) - - [Decision making](#decision-making) - - [Local Planning](#local-planning) - ---- - ## Overview ### [Preplanning](./Preplanning.md) @@ -34,7 +16,7 @@ After finishing that this node initiates the calculation of a trajectory based o from preplanning_trajectory.py. In the end the computed trajectory and prevailing speed limits are published to the other components of this project (acting, decision making,...). -![img.png](../00_assets/Global_Plan.png) +![img.png](../assets/Global_Plan.png) ### [Decision making](./Behavior_tree.md) @@ -42,11 +24,11 @@ The decision making collects most of the available information of the other comp the information. All possible traffic scenarios are covered in this component. The decision making uses a so called decision tree, which is easy to adapt and to expand. -![Simple Tree](../00_assets/planning/simple_final_tree.png) +![Simple Tree](../assets/planning/simple_final_tree.png) ### [Local Planning](./Local_Planning.md) The Local Planning component is responsible for evaluating short term decisions in the local environment of the ego vehicle. It containes components responsible for detecting collisions and reacting e. g. lowering speed. The local planning also executes behaviors e. g. changes the trajectory for an overtake. -![Overtake](../00_assets/planning/Overtake_car_trajectory.png) +![Overtake](../assets/planning/Overtake_car_trajectory.png) diff --git a/doc/07_planning/Unstuck_Behavior.md b/doc/planning/Unstuck_Behavior.md similarity index 87% rename from doc/07_planning/Unstuck_Behavior.md rename to doc/planning/Unstuck_Behavior.md index 025a46cf..99c408d5 100644 --- a/doc/07_planning/Unstuck_Behavior.md +++ b/doc/planning/Unstuck_Behavior.md @@ -2,23 +2,8 @@ **Summary:** This file explains the unstuck behavior used as a fallback to recover from stuck situations. ---- - -## Author - -Robert Fischer - -## Date - -01.04.2024 - ---- - - [Unstuck Behavior](#unstuck-behavior) - - [Author](#author) - - [Date](#date) - [Explanation](#explanation) - ## Explanation @@ -69,5 +54,5 @@ Files influenced by this behavior are: - [motion_planning.py](/code/planning/src/local_planner/motion_planning.py), for the target_speed and overtake - [behavior_speed.py](/code/planning/src/behavior_agent/behaviours/behavior_speed.py), for the target_speed - Acting: - - [vehicle_controller.py](/doc/05_acting/04_vehicle_controller.md), because of driving backwards without steering - - [velocity_controller.py](/doc/05_acting/02_velocity_controller.md), because of the sepcial -3 target_speed case + - [vehicle_controller.py](/doc/acting/vehicle_controller.md), because of driving backwards without steering + - [velocity_controller.py](/doc/acting/velocity_controller.md), because of the sepcial -3 target_speed case diff --git a/doc/07_planning/motion_planning.md b/doc/planning/motion_planning.md similarity index 93% rename from doc/07_planning/motion_planning.md rename to doc/planning/motion_planning.md index 125e4ebe..dd05ed42 100644 --- a/doc/07_planning/motion_planning.md +++ b/doc/planning/motion_planning.md @@ -3,33 +3,13 @@ **Summary:** [motion_planning.py](.../code/planning/local_planner/src/motion_planning.py): The motion planning is responsible for collecting all the speeds from the different components and choosing the optimal one to be fowarded into the acting. It also is capabale to change the trajectory for a overtaking maneuver. ---- - -## Author - -Julius Miller - -## Date - -31.03.2023 - -## Prerequisite - ---- - - [Motion Planning](#motion-planning) - - [Author](#author) - - [Date](#date) - - [Prerequisite](#prerequisite) - [Overview](#overview) - [Component](#component) - [ROS Data Interface](#ros-data-interface) - [Subscribed Topics](#subscribed-topics) - [Published Topics](#published-topics) - [Node Creation + Running Tests](#node-creation--running-tests) - - ---- ## Overview diff --git a/doc/07_planning/01_py_trees.md b/doc/planning/py_trees.md similarity index 75% rename from doc/07_planning/01_py_trees.md rename to doc/planning/py_trees.md index 94cab5db..4e12d604 100644 --- a/doc/07_planning/01_py_trees.md +++ b/doc/planning/py_trees.md @@ -2,32 +2,13 @@ **Summary:** pytrees is a python library used to generate and inspect decision trees. It has a very clear structure and is easy to understand, so it is used in this project. ---- - -## Author - -Josef Kircher - -## Date - -31.01.2023 - -## Note - -This documentation was taken from the previous project PAF22. - ---- - - [Pytrees](#pytrees) - - [Author](#author) - - [Date](#date) - - [Note](#note) - [Getting started](#getting-started) - [What is Pytrees?](#what-is-pytrees) - [Examples](#examples) - [Common commands](#common-commands) - [Sources](#sources) - + ## Getting started Pytrees is integrated in this project's dockerfile, so no setup is required. @@ -45,10 +26,9 @@ There is a very simple example for pytrees. Run: -1. call `b5 update` to update docker container -2. call `b5 run` to start container -3. in a second shell call `b5 shell` -4. run `py-trees-demo-behaviour-lifecycle` to execute the example +1. Start the dev container for the agent +2. Attach a shell to the container +3. run `py-trees-demo-behaviour-lifecycle` to execute the example ## Common commands @@ -56,7 +36,7 @@ Run rqt visualization for behaviour tree `rqt --standalone rqt_py_trees.behaviour_tree.RosBehaviourTree` -![img.png](../00_assets/behaviour_tree.png) +![img.png](../assets/behaviour_tree.png) Inspect data written to the behaviour tree diff --git a/doc/research/README.md b/doc/research/README.md new file mode 100644 index 00000000..f4703cc3 --- /dev/null +++ b/doc/research/README.md @@ -0,0 +1,8 @@ +# Research + +This folder contains the research of each individual group at the start of the project. + +The research is structured in folders for each year: + +- [PAF22](./paf22/) +- [PAF23](./paf23/) diff --git a/doc/03_research/01_acting/01_basics_acting.md b/doc/research/paf22/acting/basics_acting.md similarity index 73% rename from doc/03_research/01_acting/01_basics_acting.md rename to doc/research/paf22/acting/basics_acting.md index deeab2e1..d692baea 100644 --- a/doc/03_research/01_acting/01_basics_acting.md +++ b/doc/research/paf22/acting/basics_acting.md @@ -2,55 +2,59 @@ **Summary:** On this page you can find the results of the basic research on acting. ---- - -## Authors - -Gabriel Schwald, Julian Graf - -### Date - -14.11.2022 - ---- -[[TOC]] +- [Basic research acting](#basic-research-acting) + - [Objective](#objective) + - [Solutions from old PAF projects](#solutions-from-old-paf-projects) + - [Paf 20/1](#paf-201) + - [Paf 21/1](#paf-211) + - [Paf 20/2 and Paf 21/2](#paf-202-and-paf-212) + - [Lateral control](#lateral-control) + - [Pure Pursuit](#pure-pursuit) + - [Stanley](#stanley) + - [MPC (Model Predictive Control) / receding horizon control](#mpc-model-predictive-control--receding-horizon-control) + - [SMC (sliding mode control)](#smc-sliding-mode-control) + - [Velocity control](#velocity-control) + - [Interface](#interface) + - [Limits](#limits) + - [Visualization](#visualization) + - [Additional functionality (open for discussion)](#additional-functionality-open-for-discussion) ## Objective The job of this domain is to translate a preplanned trajectory into actual steering controls for the vehicle. -* safety: - * never exceeding vehicle limits - * never exceeding speed limits - * never leaf path -* driving comfort? +- safety: + - never exceeding vehicle limits + - never exceeding speed limits + - never leaf path +- driving comfort? ## Solutions from old PAF projects ### [Paf 20/1](https://github.com/ll7/psaf1/tree/master/psaf_ros/psaf_steering) -* [carla_ackermann_control](https://carla.readthedocs.io/projects/ros-bridge/en/latest/carla_ackermann_control/) modified for [twist-msgs](http://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/Twist.html) -* input: [twist-msgs](http://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/Twist.html) (for velocity) -* velocity control: PID -* lateral control: PD (heading error) +- [carla_ackermann_control](https://carla.readthedocs.io/projects/ros-bridge/en/latest/carla_ackermann_control/) modified for [twist-msgs](http://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/Twist.html) +- input: [twist-msgs](http://docs.ros.org/en/noetic/api/geometry_msgs/html/msg/Twist.html) (for velocity) +- velocity control: PID +- lateral control: PD (heading error) ### [Paf 21/1](https://github.com/ll7/paf21-1/wiki/Vehicle-Controller) -* input: waypoints -* curve detection: returns distance to next curve -* calculation of max curve speed as sqrt(friction_coefficient x gravity_accel x radius) -* in Curve: [naive Controller](###Pure_Pursuit) -* on straights: [Stanley Controller](###Stanley) -* interface to rosbridge +- input: waypoints +- curve detection: returns distance to next curve +- calculation of max curve speed as sqrt(friction_coefficient x gravity_accel x radius) +- in Curve: [naive Controller](###Pure_Pursuit) +- on straights: [Stanley Controller](###Stanley) +- interface to rosbridge ### [Paf 20/2](https://github.com/ll7/psaf2) and [Paf 21/2](https://github.com/ll7/paf21-2/tree/main/paf_ros/paf_actor#readme) -* input: odometry(position and velocity with uncertainty), local path -* lateral: [Stanley Controller](###Stanley) -* speed controller: pid -* ACC (Adaptive Cruise Control): (speed, distance) -> PID -* Unstuck-Routine (drive backwards) -* Emergency Modus: fastest possible braking ([Tests](https://github.com/ll7/paf21-2/blob/main/docs/paf_actor/backwards/braking.md) -> handbrake with throttle, 30° steering and reverse) +- input: odometry(position and velocity with uncertainty), local path +- lateral: [Stanley Controller](###Stanley) +- speed controller: pid +- ACC (Adaptive Cruise Control): (speed, distance) -> PID +- Unstuck-Routine (drive backwards) +- Emergency Modus: fastest possible braking ([Tests](https://github.com/ll7/paf21-2/blob/main/docs/paf_actor/backwards/braking.md) -> handbrake with throttle, 30° steering and reverse) ## Lateral control @@ -67,12 +71,12 @@ The steering angle $\delta$ is defined as the angle of the front wheel to a line This angle $\delta$ can also be defined as $tan(\delta) = L/R$ with $L$ as the wheelbase and $R$ the radius from the reference point (rear axle) to the Instantaneous Center of Rotation (ICR). Due to the bicycle model we can calculate $R = \frac{L}{tan(\delta)}$. -![Bicycle Model with ICR](../../00_assets/research_assets/bicyclegeometry.png) +![Bicycle Model with ICR](../../assets/research_assets/bicyclegeometry.png) *source: [[2]](https://medium.com/roboquest/understanding-geometric-path-tracking-algorithms-stanley-controller-25da17bcc219)* We now try to aim the circular arc to intersect with a point on our trajectory. This target point is always a defined distance (look ahead distance $l_d$) away from our reference point (dangling carrot). This leads to the following relation: -![Dangling carrot geometry](../../00_assets/research_assets/danglingcarrotgeometry.png) +![Dangling carrot geometry](../../assets/research_assets/danglingcarrotgeometry.png) *source: [[2]](https://medium.com/roboquest/understanding-geometric-path-tracking-algorithms-stanley-controller-25da17bcc219)* $\frac{l_d}{sin(\alpha)}= 2R$, where $\alpha$ is the current heading error. Combining the two equations leads to our desired steering angle. @@ -87,18 +91,18 @@ $$ \delta(t) = arctan(2L*\frac{sin(\alpha)}{K_d*v}) $$ -* simple controller -* ignores dynamic forces -* assumes no-slip condition -* possible improvement: vary the look-ahead distance based on vehicle velocity -* not really suited for straights, because ICR moves towards infinity this case +- simple controller +- ignores dynamic forces +- assumes no-slip condition +- possible improvement: vary the look-ahead distance based on vehicle velocity +- not really suited for straights, because ICR moves towards infinity this case ### Stanley The Stanley controller, named after an autonomous offroad race car, takes the front axle as a reference, while still using the bicycle model. In addition to looking at the heading error $\psi$, close to what pure pursuit does, stanley also looks at the cross track error $e_e$. The cross track error $e_e$ is defined as the distance between the reference point and the closest point on our trajectory. -![Stanley error with heading and cross track error](../../00_assets/research_assets/stanleyerror.png) +![Stanley error with heading and cross track error](../../assets/research_assets/stanleyerror.png) *source: [[2]](https://medium.com/roboquest/understanding-geometric-path-tracking-algorithms-stanley-controller-25da17bcc219)* The first part of our steering angle tries to correct for this error $arctan(\frac{k_e*e_e}{k_v*v})$ while the second part just corrects for our heading error $\psi$. @@ -115,23 +119,23 @@ With $k_e$ and $k_v$ being tuneable parameters for cross tracking error and spee The basic idea of MPC is to model the future behavior of the vehicle and compute an optimal control input that, minimizes an a priori defined cost functional. -![MPC Controller](../../00_assets/research_assets/mpc.png) +![MPC Controller](../../assets/research_assets/mpc.png) *source: [[5]](https://dingyan89.medium.com/three-methods-of-vehicle-lateral-control-pure-pursuit-stanley-and-mpc-db8cc1d32081)* -* cost function can be designed to account for driving comfort +- cost function can be designed to account for driving comfort ### [SMC](https://en.wikipedia.org/wiki/Sliding_mode_control) (sliding mode control) SMC systems are designed to drive the system states onto a particular surface in the state space, named sliding surface. Once the sliding surface is reached, sliding mode control keeps the states on the close neighborhood of the sliding surface. Real implementations of sliding mode control approximate theoretical behavior with a high-frequency and generally non-deterministic switching control signal that causes the system to chatter. -![chattering](../../00_assets/research_assets/chattering.gif) +![chattering](../../assets/research_assets/chattering.gif) *source: [[9]](https://ieeexplore.ieee.org/document/1644542)* -* simple -* robust -* stabile -* disadvantage: chattering -> controller is ill-suited for this application +- simple +- robust +- stabile +- disadvantage: chattering -> controller is ill-suited for this application Sources: @@ -155,20 +159,20 @@ PID: already implemented in [ROS](http://wiki.ros.org/pid) (and [CARLA](https:// Further information: -* +- ## Interface **subscribes** to: -* current position +- current position ([nav_msgs/Odometry Message](http://docs.ros.org/en/noetic/api/nav_msgs/html/msg/Odometry.html)) from Perception? -* path ([nav_msgs/Path Message](https://docs.ros.org/en/api/nav_msgs/html/msg/Path.html)) or target point ([geometry_msgs/Pose.msg](https://docs.ros.org/en/api/geometry_msgs/html/msg/Pose.html)) -* (maximal) velocity to drive -* (distance and speed of vehicle to follow) -* (commands for special routines) -* (Distance to obstacles for turning/min turning radius) -* (Road conditions) +- path ([nav_msgs/Path Message](https://docs.ros.org/en/api/nav_msgs/html/msg/Path.html)) or target point ([geometry_msgs/Pose.msg](https://docs.ros.org/en/api/geometry_msgs/html/msg/Pose.html)) +- (maximal) velocity to drive +- (distance and speed of vehicle to follow) +- (commands for special routines) +- (Distance to obstacles for turning/min turning radius) +- (Road conditions) **publishes**: [CarlaEgoVehicleControl.msg](https://carla.readthedocs.io/projects/ros-bridge/en/latest/ros_msgs/#carlaegovehiclecontrolmsg) or [ackermann_msgs/AckermannDrive.msg](https://docs.ros.org/en/api/ackermann_msgs/html/msg/AckermannDrive.html) @@ -177,10 +181,10 @@ Further information: In the [CarlaEgoVehicleInfo.msg](https://carla.readthedocs.io/projects/ros-bridge/en/latest/ros_msgs/#carlaegovehicleinfomsg) we get a [CarlaEgoVehicleInfoWheel.msg](https://carla.readthedocs.io/projects/ros-bridge/en/latest/ros_msgs/#carlaegovehicleinfowheelmsg) which provides us with -* tire_friction (a scalar value that indicates the friction of the wheel) -* max_steer_angle (the maximum angle in degrees that the wheel can steer) -* max_brake_torque (the maximum brake torque in Nm) -* max_handbrake_torque (the maximum handbrake torque in Nm) +- tire_friction (a scalar value that indicates the friction of the wheel) +- max_steer_angle (the maximum angle in degrees that the wheel can steer) +- max_brake_torque (the maximum brake torque in Nm) +- max_handbrake_torque (the maximum handbrake torque in Nm) The max curve speed can be calculated as sqrt(**friction_coefficient** *gravity_accel* curve_radius). @@ -193,12 +197,12 @@ For debugging purposes the vehicles path can be visualized using [carlaviz](http ## Additional functionality (open for discussion) -* ACC (Adaptive Cruise Control): reduces speed to keep set distance to vehicle in front (see also [cruise control technology review](https://www.sciencedirect.com/science/article/pii/S004579069700013X), +- ACC (Adaptive Cruise Control): reduces speed to keep set distance to vehicle in front (see also [cruise control technology review](https://www.sciencedirect.com/science/article/pii/S004579069700013X), [a comprehensive review of the development of adaptive cruise control systems](https://www.researchgate.net/publication/245309633_A_comprehensive_review_of_the_development_of_adaptive_cruise_control_systems), [towards an understanding of adaptive cruise control](https://www.sciencedirect.com/science/article/pii/S0968090X0000022X), [Encyclopedia of Systems and Control](https://dokumen.pub/encyclopedia-of-systems-and-control-2nd-ed-2021-3030441830-9783030441838.html)) -* emergency braking: stops the car as fast as possible -* emergency braking assistant: uses Lidar as proximity sensor and breaks if it would come to a collision without breaking -* parallel parking: executes [fixed parking sequence](https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5705869) to parallel park vehicle in given parking space -* U-Turn: performs u-turn -* Driving backwards: might a need different controller configuration -* Unstuck routine: performs fixed routine (e.g. driving backwards) if the car hasn't moved in a while +- emergency braking: stops the car as fast as possible +- emergency braking assistant: uses Lidar as proximity sensor and breaks if it would come to a collision without breaking +- parallel parking: executes [fixed parking sequence](https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5705869) to parallel park vehicle in given parking space +- U-Turn: performs u-turn +- Driving backwards: might a need different controller configuration +- Unstuck routine: performs fixed routine (e.g. driving backwards) if the car hasn't moved in a while diff --git a/doc/03_research/01_acting/02_implementation_acting.md b/doc/research/paf22/acting/implementation_acting.md similarity index 61% rename from doc/03_research/01_acting/02_implementation_acting.md rename to doc/research/paf22/acting/implementation_acting.md index 0d7a216e..83f89e51 100644 --- a/doc/03_research/01_acting/02_implementation_acting.md +++ b/doc/research/paf22/acting/implementation_acting.md @@ -2,30 +2,14 @@ **Summary:** On this page you can find the results of the basic research on acting summed up into resulting requirements and function, that were already agreed upon. ---- +This document sums up all functions already agreed upon in [#24](https://github.com/ll7/paf22/issues/24) regarding [acting](../acting/acting.md), that could be implemented in the next sprint. -## Authors - -Gabriel Schwald - -### Date - -20.11.2022 - ---- - - -* [Requirements and challenges for an acting implementation](#requirements-and-challenges-for-an-acting-implementation) - * [Authors](#authors) - * [Date](#date) - * [Planned basic implementation of the Acting domain](#planned-basic-implementation-of-the-acting-domain) - * [List of basic functions](#list-of-basic-functions) - * [List of Inputs/Outputs](#list-of-inputsoutputs) - * [Challenges](#challenges) - * [Next steps](#next-steps) - - -This document sums up all functions already agreed upon in [#24](https://github.com/ll7/paf22/issues/24) regarding [acting](../01_acting/01_acting.md), that could be implemented in the next sprint. +- [Requirements and challenges for an acting implementation](#requirements-and-challenges-for-an-acting-implementation) + - [Planned basic implementation of the Acting domain](#planned-basic-implementation-of-the-acting-domain) + - [List of basic functions](#list-of-basic-functions) + - [List of Inputs/Outputs](#list-of-inputsoutputs) + - [Challenges](#challenges) + - [Next steps](#next-steps) ## Planned basic implementation of the Acting domain @@ -36,34 +20,34 @@ These goals lead to the following requirements: ## List of basic functions -* Longitudinal control - * PID controller -* Lateral control - * Pure Pursuit controller - * Stanley controller +- Longitudinal control + - PID controller +- Lateral control + - Pure Pursuit controller + - Stanley controller ## List of Inputs/Outputs -* Subscribes to: - * [nav_msgs/Odometry Message](http://docs.ros.org/en/noetic/api/nav_msgs/html/msg/Odometry.html) : to get the current position and heading - * [nav_msgs/Path Message](https://docs.ros.org/en/api/nav_msgs/html/msg/Path.html) : to get the current trajectory - * emergency breaking msg : to initiate emergency breaking - * speed limit msg : to get the maximum velocity -* Publishes: - * [CarlaEgoVehicleControl.msg](https://carla.readthedocs.io/projects/ros-bridge/en/latest/ros_msgs/#carlaegovehiclecontrolmsg) : to actually control the vehicles throttle, steering, ... +- Subscribes to: + - [nav_msgs/Odometry Message](http://docs.ros.org/en/noetic/api/nav_msgs/html/msg/Odometry.html) : to get the current position and heading + - [nav_msgs/Path Message](https://docs.ros.org/en/api/nav_msgs/html/msg/Path.html) : to get the current trajectory + - emergency breaking msg : to initiate emergency breaking + - speed limit msg : to get the maximum velocity +- Publishes: + - [CarlaEgoVehicleControl.msg](https://carla.readthedocs.io/projects/ros-bridge/en/latest/ros_msgs/#carlaegovehiclecontrolmsg) : to actually control the vehicles throttle, steering, ... ## Challenges A short list of challenges for the implementation of a basic acting domain and how they these could be tackled based on the requirements mentioned above. -* The vehicle needs to know its own position => [nav_msgs/Odometry Message](http://docs.ros.org/en/noetic/api/nav_msgs/html/msg/Odometry.html) or [GNSS](https://carla.readthedocs.io/en/latest/ref_sensors/#gnss-sensor) sensor -* The vehicle needs to know its own velocity => can be calculated from last/current position and time or the [speedometer](https://leaderboard.carla.org/#map-track) pseudosensor can be used -* The vehicle needs to know its planned trajectory => [nav_msgs/Path Message](https://docs.ros.org/en/api/nav_msgs/html/msg/Path.html) this trajectory may need to be updated to accommodate obstacles -* Longitudinal control => a simple PID controller should suffice -* lateral control => Pure Pursuit as well as Stanley controller should be implemented, following tests can show, where to use each controller. -* additional features: - * emergency breaking => this command is supposed to bypass longitudinal and lateral controllers (and should use the bug discoverd by [paf21-2](https://github.com/ll7/paf21-2/tree/main/paf_ros/paf_actor#bugabuses)) - * additional functionality mostly should be added here ... +- The vehicle needs to know its own position => [nav_msgs/Odometry Message](http://docs.ros.org/en/noetic/api/nav_msgs/html/msg/Odometry.html) or [GNSS](https://carla.readthedocs.io/en/latest/ref_sensors/#gnss-sensor) sensor +- The vehicle needs to know its own velocity => can be calculated from last/current position and time or the [speedometer](https://leaderboard.carla.org/#map-track) pseudosensor can be used +- The vehicle needs to know its planned trajectory => [nav_msgs/Path Message](https://docs.ros.org/en/api/nav_msgs/html/msg/Path.html) this trajectory may need to be updated to accommodate obstacles +- Longitudinal control => a simple PID controller should suffice +- lateral control => Pure Pursuit as well as Stanley controller should be implemented, following tests can show, where to use each controller. +- additional features: + - emergency breaking => this command is supposed to bypass longitudinal and lateral controllers (and should use the bug discoverd by [paf21-2](https://github.com/ll7/paf21-2/tree/main/paf_ros/paf_actor#bugabuses)) + - additional functionality mostly should be added here ... ## Next steps diff --git a/doc/03_research/02_perception/02_basics.md b/doc/research/paf22/perception/basics.md similarity index 87% rename from doc/03_research/02_perception/02_basics.md rename to doc/research/paf22/perception/basics.md index b5f921dc..d9f2bcc3 100644 --- a/doc/03_research/02_perception/02_basics.md +++ b/doc/research/paf22/perception/basics.md @@ -1,11 +1,29 @@ # Basic research perception -The perception is responsible for the efficient conversion of raw sensor and map data +**Summary:** The perception is responsible for the efficient conversion of raw sensor and map data into a useful environment representation that can be used by the planning for further processing. This includes the classification and localization of relevant entities in traffic and also the preparation of this data to enable a fast processing of this data in the planning layer. +- [Basic research perception](#basic-research-perception) + - [Interfaces](#interfaces) + - [Input](#input) + - [Output](#output) + - [Environment](#environment) + - [What objects have to be detected?](#what-objects-have-to-be-detected) + - [Special case traffic light (PAF21-1)](#special-case-traffic-light-paf21-1) + - [Algorithms for classification/localization](#algorithms-for-classificationlocalization) + - [Prediction](#prediction) + - [Map data](#map-data) + - [Limitations of the sensors and perception](#limitations-of-the-sensors-and-perception) + - [LIDAR](#lidar) + - [RADAR](#radar) + - [Camera](#camera) + - [Training data](#training-data) + - [Classification of situations](#classification-of-situations) + - [Combination of 2D camera data and 3D RADAR/LIDAR data](#combination-of-2d-camera-data-and-3d-radarlidar-data) + ## Interfaces ### Input diff --git a/doc/03_research/02_perception/03_first_implementation_plan.md b/doc/research/paf22/perception/first_implementation_plan.md similarity index 74% rename from doc/03_research/02_perception/03_first_implementation_plan.md rename to doc/research/paf22/perception/first_implementation_plan.md index 77498f1c..8bc88aba 100644 --- a/doc/03_research/02_perception/03_first_implementation_plan.md +++ b/doc/research/paf22/perception/first_implementation_plan.md @@ -1,44 +1,28 @@ # First Implementation Plan -This document shows the initial ideas for the implementation of the perception module. +**Summary:** This document shows the initial ideas for the implementation of the perception module. It includes the various detection and classification modules that are necessary for an efficient and reliable workflow. ---- - -## Authors - -Marco Riedenauer - -## Date - -26.11.2022 - ---- - -* [First Implementation Plan](#first-implementation-plan) - * [Authors](#authors) - * [Date](#date) - * [Overview](#overview) - * [Panoptic Segmentation](#panoptic-segmentation) - * [Things and Stuff](#things-and-stuff) - * [Things](#things) - * [Stuff](#stuff) - * [Segmentation Overview](#segmentation-overview) - * [Image Panoptic Segmentation](#image-panoptic-segmentation) - * [LIDAR Panoptic Segmentation](#lidar-panoptic-segmentation) - * [Position Validation](#position-validation) - * [Obstacle Detection and Object Classification](#obstacle-detection-and-object-classification) - * [Lane Detection](#lane-detection) - * [Traffic Light Detection](#traffic-light-detection) - * [Traffic Sign Detection](#traffic-sign-detection) - * [Prediction](#prediction) - - ---- +- [First Implementation Plan](#first-implementation-plan) + - [Overview](#overview) + - [Panoptic Segmentation](#panoptic-segmentation) + - [Things and Stuff](#things-and-stuff) + - [Things](#things) + - [Stuff](#stuff) + - [Segmentation Overview](#segmentation-overview) + - [Image Panoptic Segmentation](#image-panoptic-segmentation) + - [LIDAR Panoptic Segmentation](#lidar-panoptic-segmentation) + - [Position Validation](#position-validation) + - [Obstacle Detection and Object Classification](#obstacle-detection-and-object-classification) + - [Lane Detection](#lane-detection) + - [Traffic Light Detection](#traffic-light-detection) + - [Traffic Sign Detection](#traffic-sign-detection) + - [Prediction](#prediction) + - [Possible Issues/Milestones](#possible-issuesmilestones) ## Overview -![Implementation Plan Perception](../../00_assets/implementation_plan_perception.jpg) +![Implementation Plan Perception](../../assets/implementation_plan_perception.jpg) --- @@ -58,14 +42,14 @@ Stuff is the term used to define objects that don’t have proper geometry but a There are three different kinds of image segmentation: -* **Semantic Segmentation**: \ +- **Semantic Segmentation**: \ Classification of every pixel or point in an image or LIDAR map into different classes (car, person, street, ...) -* **Instance Segmentation**: \ +- **Instance Segmentation**: \ Detection of the different instances of things. -* **Panoptic Segmentation**: \ +- **Panoptic Segmentation**: \ Combination of semantic segmentation and instance segmentation. Detection of stuff plus instances of things. -![Segmentation](../../00_assets/segmentation.png) +![Segmentation](../../assets/segmentation.png) [Source](https://www.v7labs.com/blog/panoptic-segmentation-guide) ### Image Panoptic Segmentation @@ -129,11 +113,11 @@ As classification net I would recommend the [net implemented by PAF21-1](https:/ Possible states are: -* green -* orange -* red -* off -* backside +- green +- orange +- red +- off +- backside --- @@ -159,11 +143,11 @@ No implementation plan yet. ## Possible Issues/Milestones -* Implement/Adapt panoptic segmentation model (EfficientPS) -* (Implement/Adapt) LIDAR panoptic segmentation model (EfficientLPS) -* Choose datasets for training -* Generate own training data for fine-tuning -* Implement classification net for traffic light/sign classification -* Find ways for lane detection -* Find solutions/implementations for the projection of LIDAR, Radar and image data -* Position validation necessary? +- Implement/Adapt panoptic segmentation model (EfficientPS) +- (Implement/Adapt) LIDAR panoptic segmentation model (EfficientLPS) +- Choose datasets for training +- Generate own training data for fine-tuning +- Implement classification net for traffic light/sign classification +- Find ways for lane detection +- Find solutions/implementations for the projection of LIDAR, Radar and image data +- Position validation necessary? diff --git a/doc/03_research/03_planning/00_paf22/03_Implementation.md b/doc/research/paf22/planning/Implementation.md similarity index 65% rename from doc/03_research/03_planning/00_paf22/03_Implementation.md rename to doc/research/paf22/planning/Implementation.md index 534f4142..7af7cd55 100644 --- a/doc/03_research/03_planning/00_paf22/03_Implementation.md +++ b/doc/research/paf22/planning/Implementation.md @@ -1,37 +1,22 @@ # Planning Implementation -**Summary:** -The document gives a first impression of how the planning could/should work +**Summary:** The document gives a first impression of how the planning could/should work and how the topics are edited ---- - -## Authors - -Simon Erlbacher, Niklas Vogel - -## Date - -29.11.2022 - ---- - -* [Planning Implementation](#planning-implementation) - * [Authors](#authors) - * [Date](#date) - * [Overview](#overview) - * [Preplanning](#preplanning) - * [Decision Making](#decision-making) - * [Local Path Planning](#local-path-planning) - * [Next steps](#next-steps) -* [Sources](#sources) - - ---- +- [Planning Implementation](#planning-implementation) + - [Overview](#overview) + - [Preplanning](#preplanning) + - [Decision Making](#decision-making) + - [Local Path Planning](#local-path-planning) + - [Velocity profile](#velocity-profile) + - [Update path](#update-path) + - [Measure distance](#measure-distance) + - [Next steps](#next-steps) + - [Sources](#sources) ## Overview -![Implementation](../../00_assets/Planning_Implementierung.png) +![Implementation](../../assets/Planning_Implementierung.png) [Link to original](https://miro.com/app/board/uXjVP_LIQpE=/?share_link_id=806357474480) --- @@ -45,28 +30,28 @@ Either you use the given waypoints for start and goal values or alternatively th The Output (Solution of the planning problem) will be a route defined by a sequence of lanelets and a sequence of points (~ 10cm apart). Lanelet Model Example : -![Lanelet Model Example](../../00_assets/Lanelets.png) +![Lanelet Model Example](../../assets/Lanelets.png) [(Source)](https://github.com/ll7/psaf2/tree/main/Planning/global_planner) Input: -* Map -* Navigation Waypoints -* (Odometry data (sensoring)) -* (GNUU data (sensoring)) +- Map +- Navigation Waypoints +- (Odometry data (sensoring)) +- (GNUU data (sensoring)) Output: -* Route (Sequences of Lanelets and Points) (local path planning, decision making) +- Route (Sequences of Lanelets and Points) (local path planning, decision making) --- ## Decision Making -If an obstacle, which interferes with the own trajectory, is being recognized in the [perception](../02_perception), +If an obstacle, which interferes with the own trajectory, is being recognized in the [perception](../perception), the decision making sends a message to the local path planning where the system then chooses another trajectory/lanelet. With the Lanelets Model it is easier to give a prediction for other objects and the vehicle itself, -by following the lane direction of an object. With the prediction, which is mainly based inside the [perception](../02_perception), +by following the lane direction of an object. With the prediction, which is mainly based inside the [perception](../perception), it's then possible to check weather or not other objects interfere with ourselves. The decision making can be implemented with a state machine. Therefore there must be a state defined for every incoming perception/situation to ensure correct and save behavior. @@ -74,13 +59,13 @@ The system needs to make good predictions to avoid collisions. The Perception da Input: -* Lanelet data (preplanning, local path planning) -* perception data (traffic lights situation, pedestrians,...) +- Lanelet data (preplanning, local path planning) +- perception data (traffic lights situation, pedestrians,...) Output: -* updated driving status (acting, local path planning) -* Lanelet data (acting) +- updated driving status (acting, local path planning) +- Lanelet data (acting) --- @@ -92,15 +77,15 @@ Local Planner updates the current route, if Decision Making detects an obstacle. The Local Path Planer receives the lanelets, points and the path to drive. The local planner creates a velocity profile on the calculated trajectory based on curvature, crossings and traffic lights. -This will be calculated directly after the preplanning created a trajectory. The velocity value is published to the [acting side](../01_acting). +This will be calculated directly after the preplanning created a trajectory. The velocity value is published to the [acting side](../acting). Input: -* Trajectory points (preplanning) +- Trajectory points (preplanning) Output: -* Max. Velocity (Acting) +- Max. Velocity (Acting) ### Update path @@ -111,39 +96,39 @@ It also tells the velocity profile to update because of the new trajectory. Input: -* lanelet modell (preplanning) -* update command (decision making) -* information about blocked lanelets (decision making, perception) +- lanelet modell (preplanning) +- update command (decision making) +- information about blocked lanelets (decision making, perception) Output: -* updated trajectory (acting, decision making) -* update command (velocity profile) +- updated trajectory (acting, decision making) +- update command (velocity profile) ### Measure distance -This module measures the distance to obstacles, especially cars, with the Lidar Sensor. The current distance value is published to the [acting side](../01_acting) for keeping a safe distance (Adaptive Cruise Control). +This module measures the distance to obstacles, especially cars, with the Lidar Sensor. The current distance value is published to the [acting side](../acting) for keeping a safe distance (Adaptive Cruise Control). Input: -* Lidar Sensor data (perception, sensoring) +- Lidar Sensor data (perception, sensoring) Output: -* distance value (acting) +- distance value (acting) --- ## Next steps -* Another Coordination with Perception to prevent overlaps with Map Manager, Map enrichment, -* Implement Map Manager to convert data into a compatible type for route planning and to extract additional informations (Speed Limits, trafic signs, traffic lights) -* Implement a commonroad route planner (old projects and Gitlab TUM) -* Analyze Lanelet plan and be familiar with it (Which information can we additionally receive from the plan?) -* Enrich Lanelet Modell/Map with additional Informations (additional/parallel Lanes, Speed Limits, trafic signs, traffic lights) -* Choose the Decision Maker (Evaluate Markov Modell in combination with occupancy grid) -* calculate and evaluate distances with given perceptions -* Publish available and needed data (data available in this stage) +- Another Coordination with Perception to prevent overlaps with Map Manager, Map enrichment, +- Implement Map Manager to convert data into a compatible type for route planning and to extract additional informations (Speed Limits, trafic signs, traffic lights) +- Implement a commonroad route planner (old projects and Gitlab TUM) +- Analyze Lanelet plan and be familiar with it (Which information can we additionally receive from the plan?) +- Enrich Lanelet Modell/Map with additional Informations (additional/parallel Lanes, Speed Limits, trafic signs, traffic lights) +- Choose the Decision Maker (Evaluate Markov Modell in combination with occupancy grid) +- calculate and evaluate distances with given perceptions +- Publish available and needed data (data available in this stage) --- diff --git a/doc/03_research/03_planning/00_paf22/05_Navigation_Data.md b/doc/research/paf22/planning/Navigation_Data.md similarity index 75% rename from doc/03_research/03_planning/00_paf22/05_Navigation_Data.md rename to doc/research/paf22/planning/Navigation_Data.md index 2d9047ca..f16cc211 100644 --- a/doc/03_research/03_planning/00_paf22/05_Navigation_Data.md +++ b/doc/research/paf22/planning/Navigation_Data.md @@ -2,26 +2,11 @@ **Summary:** This page gives an overview and summary of how navigation data can be received, how it is structured and a visualisation of where the route instructions are placed on the ego vehicle route. ---- - -## Author - -Niklas Vogel - -## Date - -14.12.2022 - ---- - -* [Navigation Data Research](#navigation-data-research) - * [Author](#author) - * [Date](#date) - * [How to receive navigation data](#how-to-receive-navigation-data) - * [Structure of navigation data](#structure-of-navigation-data) - * [Visualisation of received navigation data](#visualisation-of-received-navigation-data) -* [Sources](#sources) - +- [Navigation Data Research](#navigation-data-research) + - [How to receive navigation data](#how-to-receive-navigation-data) + - [Structure of navigation data](#structure-of-navigation-data) + - [Visualisation of received navigation data](#visualisation-of-received-navigation-data) + - [Sources](#sources) ## How to receive navigation data @@ -58,15 +43,15 @@ Therefore, the Map is published as topic ``/carla/hero/OpenDrive`` in [OpenDRIVE The route is published in the following topics: -* ``/carla/hero/global_plan`` ([carla_msgs/CarlaRoute](https://github.com/carla-simulator/ros-carla-msgs/blob/leaderboard-2.0/msg/CarlaRoute.msg)) -* ``/carla/hero/global_plan_gnss`` ([carla_msgs/CarlaGnnsRoute](https://github.com/carla-simulator/ros-carla-msgs/blob/leaderboard-2.0/msg/CarlaGnssRoute.msg)) +- ``/carla/hero/global_plan`` ([carla_msgs/CarlaRoute](https://github.com/carla-simulator/ros-carla-msgs/blob/leaderboard-2.0/msg/CarlaRoute.msg)) +- ``/carla/hero/global_plan_gnss`` ([carla_msgs/CarlaGnnsRoute](https://github.com/carla-simulator/ros-carla-msgs/blob/leaderboard-2.0/msg/CarlaGnssRoute.msg)) ## Structure of navigation data Routes consist of tuples of a position and a high level route instruction command which should be taken at that point. Positions are either given as GPS coordinates or as world coordinates: -* GPS coordinates: +- GPS coordinates: ```yaml [({'z': 0.0, 'lat': 48.99822669411668, 'lon': 8.002271601998707}, RoadOption.LEFT), @@ -75,7 +60,7 @@ Positions are either given as GPS coordinates or as world coordinates: ({'z': 0.0, 'lat': 48.99822679980298, 'lon': 8.002735250105061}, RoadOption.STRAIGHT)] ``` -* World coordinates: +- World coordinates: ```yaml [({'x': 153.7, 'y': 15.6, 'z': 0.0}, RoadOption.LEFT), @@ -84,14 +69,14 @@ Positions are either given as GPS coordinates or as world coordinates: ({'x': 180.7, 'y': 45.1, 'z': 1.2}, RoadOption.STRAIGHT)] ``` -* High-level route instruction commands (road options): +- High-level route instruction commands (road options): - * RoadOption.**CHANGELANELEFT**: Move one lane to the left. - * RoadOption.**CHANGELANERIGHT**: Move one lane to the right. - * RoadOption.**LANEFOLLOW**: Continue in the current lane. - * RoadOption.**LEFT**: Turn left at the intersection. - * RoadOption.**RIGHT**: Turn right at the intersection. - * RoadOption.**STRAIGHT**: Keep straight at the intersection. + - RoadOption.**CHANGELANELEFT**: Move one lane to the left. + - RoadOption.**CHANGELANERIGHT**: Move one lane to the right. + - RoadOption.**LANEFOLLOW**: Continue in the current lane. + - RoadOption.**LEFT**: Turn left at the intersection. + - RoadOption.**RIGHT**: Turn right at the intersection. + - RoadOption.**STRAIGHT**: Keep straight at the intersection. **Important:** Distance between route points can be up to hundreds of meters. @@ -103,7 +88,7 @@ WIP notes from team intern meeting: -* leaderboard evaluation visualisiert die route und scenarien evtl schon... evtl wert genauer zu betrachten +- leaderboard evaluation visualisiert die route und scenarien evtl schon... evtl wert genauer zu betrachten ### Sources diff --git a/doc/03_research/03_planning/00_paf22/07_OpenDrive.md b/doc/research/paf22/planning/OpenDrive.md similarity index 54% rename from doc/03_research/03_planning/00_paf22/07_OpenDrive.md rename to doc/research/paf22/planning/OpenDrive.md index 7c5c46fc..84895cd3 100644 --- a/doc/03_research/03_planning/00_paf22/07_OpenDrive.md +++ b/doc/research/paf22/planning/OpenDrive.md @@ -2,35 +2,19 @@ **Summary:** Evaluate the reading of the OpenDrive map in other projects and outline recommended further steps. ---- - -## Authors - -Simon Erlbacher - -### Date - -10.01.2023 - ---- - - -* [OpenDrive Format](#opendrive-format) - * [Authors](#authors) - * [Date](#date) - * [General](#general) - * [Different Projects](#different-projects) - * [PSAF1](#psaf1) - * [PSAF2](#psaf2) - * [paf21-2](#paf21-2) - * [paf21-1](#paf21-1) - * [Result](#result) - * [More information about OpenDrive](#more-information-about-opendrive) - * [Start of the implementation](#start-of-the-implementation) - * [Implementation details](#implementation-details) - * [Follow-up Issues](#follow-up-issues) - * [Sources](#sources) - +- [OpenDrive Format](#opendrive-format) + - [General](#general) + - [Different Projects](#different-projects) + - [PSAF1](#psaf1) + - [PSAF2](#psaf2) + - [paf21-2](#paf21-2) + - [paf21-1](#paf21-1) + - [Result](#result) + - [More information about OpenDrive](#more-information-about-opendrive) + - [Start of the implementation](#start-of-the-implementation) + - [Implementation details](#implementation-details) + - [Follow-up Issues](#follow-up-issues) + - [Sources](#sources) ## General @@ -45,33 +29,33 @@ It is examined how the OpenDrive file is converted and read in other groups and ### PSAF1 -* Subscribed the OpenDrive information from the Carla Simulator -* Used the Commonroad Route Planner from TUM (in the project they used the now deprecated verison) -* This Route Planner converts the xdor file from the CarlaWorldInfo message automatically -* As a result they used a Lanelet model, which they enriched with additional information about +- Subscribed the OpenDrive information from the Carla Simulator +- Used the Commonroad Route Planner from TUM (in the project they used the now deprecated verison) +- This Route Planner converts the xdor file from the CarlaWorldInfo message automatically +- As a result they used a Lanelet model, which they enriched with additional information about traffic lights and traffic signs -* This additional information comes from the Carla Simulator API +- This additional information comes from the Carla Simulator API Result: We can't use this information from [psaf1]("https://github.com/ll7/psaf1/tree/master/psaf_ros/psaf_global_planner") , because it is not allowed to use privileged information from the Carla Simulator ### PSAF2 -* Same approach as described in PSAF1 above -* Same problem in [psaf2](https://github.com/ll7/psaf2/tree/main/Planning/global_planner) with this approach as +- Same approach as described in PSAF1 above +- Same problem in [psaf2](https://github.com/ll7/psaf2/tree/main/Planning/global_planner) with this approach as mentioned in PSAF1 ### paf21-2 -* Same approach as described in PSAF1 above -* Same problem in [paf21-2](https://github.com/ll7/paf21-2#global-planner) with this approach as mentioned in PSAF1 +- Same approach as described in PSAF1 above +- Same problem in [paf21-2](https://github.com/ll7/paf21-2#global-planner) with this approach as mentioned in PSAF1 ### paf21-1 -* Worked directly with the OpenDrive format -* There is a lot of information available -* They extracted some information from the xdor file to plan their trajectory -* They don't recommend to use this approach, because a lot of "black magic" is happening in their code +- Worked directly with the OpenDrive format +- There is a lot of information available +- They extracted some information from the xdor file to plan their trajectory +- They don't recommend to use this approach, because a lot of "black magic" is happening in their code Result: The only possible way to get all the road information without using the Carla Simulator API @@ -83,19 +67,19 @@ during the planning process. It would be better to convert and analyse the xdor ## More information about OpenDrive -* We can read the xdor file with the [ElementTree XML API](https://docs.python.org/3/library/xml.etree.elementtree.html) -* We can refactor the scripts from paf21-1 but as they described, it is a lot of code and hard to get a good +- We can read the xdor file with the [ElementTree XML API](https://docs.python.org/3/library/xml.etree.elementtree.html) +- We can refactor the scripts from paf21-1 but as they described, it is a lot of code and hard to get a good overview about it -* Also we have a different scenario, because we do not need to read the whole xdor file in the beginning. We need +- Also we have a different scenario, because we do not need to read the whole xdor file in the beginning. We need to search for the relevant area -* The OpenDrive format contains a lot of information to extract - * Every road section has a unique id - * Road has a predecessor and a successor with its specific type (road, junction,...) - * Information about signals and their position - * Information about the reference lines (line which seperates lanes) and their layout (linear, arc, cubic curves) - * Information about the maximum speed +- The OpenDrive format contains a lot of information to extract + - Every road section has a unique id + - Road has a predecessor and a successor with its specific type (road, junction,...) + - Information about signals and their position + - Information about the reference lines (line which seperates lanes) and their layout (linear, arc, cubic curves) + - Information about the maximum speed -![OpenDrive stop sign](../../00_assets/Stop_sign_OpenDrive.png) +![OpenDrive stop sign](../../assets/Stop_sign_OpenDrive.png) Impression of the format There are a lot of infomrations in the file. Also a lot of information, which is not relevant for our project. @@ -108,22 +92,22 @@ After that, we can add some more information about the signals to our trajectory structure of the xodr files from the Simulator: -* header -* road (attributes: junction id (-1 if no junction), length, road id, Road name) - * lanes - * link (predecessor and successor with id) - * signals - * type (contains max speed) - * planView (contains information about the geometry and the line type (= reference line)) -* controller (information about the controlled signals) -* junction (crossing lane sections) +- header +- road (attributes: junction id (-1 if no junction), length, road id, Road name) + - lanes + - link (predecessor and successor with id) + - signals + - type (contains max speed) + - planView (contains information about the geometry and the line type (= reference line)) +- controller (information about the controlled signals) +- junction (crossing lane sections) link: -* every road has a successor and a predecessor road (sometimes only one of them) -* the road can have the type "road" or "junction" -* we can access the relevant sections with an id value -* Example: +- every road has a successor and a predecessor road (sometimes only one of them) +- the road can have the type "road" or "junction" +- we can access the relevant sections with an id value +- Example: @@ -132,10 +116,10 @@ link: planView: -* x and y world coordinates (startposition of the reference line) -* hdg value for the orientation -* length value for the length of this road section (reference line) -* reference line type: line, curvature (more possible in Asam OpenDrive) +- x and y world coordinates (startposition of the reference line) +- hdg value for the orientation +- length value for the length of this road section (reference line) +- reference line type: line, curvature (more possible in Asam OpenDrive) @@ -151,18 +135,18 @@ planView: lane: -* a lane is part of a road -* road can consists of different lanes -* the lane next to the reference line has the value 1 -* the lanes next to that lane have increasing numbers -* lanes on the left and on the right side of the reference line have different signs +- a lane is part of a road +- road can consists of different lanes +- the lane next to the reference line has the value 1 +- the lanes next to that lane have increasing numbers +- lanes on the left and on the right side of the reference line have different signs junction: -* a road section with crossing lanes -* a junction has one id -* every segment in the junction connects different lanes -* every connection has its own id +- a road section with crossing lanes +- a junction has one id +- every segment in the junction connects different lanes +- every connection has its own id @@ -176,16 +160,16 @@ junction: Relevant coordinate system: -* inertial coordinate system - * x -> right (roll) - * y -> up (pitch) - * z -> coming out of the drawig plane (yaw) +- inertial coordinate system + - x -> right (roll) + - y -> up (pitch) + - z -> coming out of the drawig plane (yaw) Driving direction: -* calculate on which road the agent drives -* that has an impact on the way we have to calculate the end points -* A road is decribed through the reference line. Every road segment has a +- calculate on which road the agent drives +- that has an impact on the way we have to calculate the end points +- A road is decribed through the reference line. Every road segment has a starting point and a length value. The distance to the following road segment. The calculation of the trajectory uses the startpoint of the next road segment to navigate along the street. If the agent drives on the other side of the street, @@ -198,105 +182,105 @@ the start points of the reference line There are two methods to calculate the trajectory. The first method is only needed once at the beginning, when the ego-vehicle stays at its start position. -* First we need to find the current road, where the agent is located -* Take all road start points and calculate the nearest startpoint to the vehicle position -* Calculate Endpoint for each connecting road and check if the vehicle lays in the interval -> road id - * use the predecessor and the successor points to get the correct road - * also check if the predecessor or successor is a junction. If do not have a command from the leaderboard we pass +- First we need to find the current road, where the agent is located +- Take all road start points and calculate the nearest startpoint to the vehicle position +- Calculate Endpoint for each connecting road and check if the vehicle lays in the interval -> road id + - use the predecessor and the successor points to get the correct road + - also check if the predecessor or successor is a junction. If do not have a command from the leaderboard we pass the junction straight. For this scenario we first have to filter the correct road id out ouf the junction to get the start and endpoint - * check if the ego vehicle lays in the interval -> if yes change the road id (else we chose the correct one) -* Check the driving direction (following road id) - * calculate the distances from one predecessor point and one successor point to the target point - * the road with the smaller distance is the next following road -* Interpolate the current road from start to end (arc and line) - * check the point ordering -> possible that we have to reverse them - * at the beginning we can be located in the middle of a street - * we need to delete the points from the interpolation laying before our ego vehicle position -* Weakness - * The Calculation of the driving direction is based on the distance to the target location - * If the course of the road is difficult, this approach could fail - * As you can see in the top right corner of the picture. the distance from the lower blue line + - check if the ego vehicle lays in the interval -> if yes change the road id (else we chose the correct one) +- Check the driving direction (following road id) + - calculate the distances from one predecessor point and one successor point to the target point + - the road with the smaller distance is the next following road +- Interpolate the current road from start to end (arc and line) + - check the point ordering -> possible that we have to reverse them + - at the beginning we can be located in the middle of a street + - we need to delete the points from the interpolation laying before our ego vehicle position +- Weakness + - The Calculation of the driving direction is based on the distance to the target location + - If the course of the road is difficult, this approach could fail + - As you can see in the top right corner of the picture. the distance from the lower blue line is shorter to the target than the upper blue line. The method would choose the lower line because of the smaller distance -![preplanning_start](../../00_assets/preplanning_start.png) +![preplanning_start](../../assets/preplanning_start.png) Road Concepts Further Calculation of the trajectory -* after each interpolation we calculate the midpoint of a lane. Otherwise we would drive on +- after each interpolation we calculate the midpoint of a lane. Otherwise we would drive on the reference line. That is why we have to filter the width information for our lanes. - * there can be more than one driving lane on one side of the reference line - * filter all width values and decide on which side of the reference line the vehicle drives - * after this we have the information which of the two perpendicular vectors we need to compute + - there can be more than one driving lane on one side of the reference line + - filter all width values and decide on which side of the reference line the vehicle drives + - after this we have the information which of the two perpendicular vectors we need to compute the points on the correct side of the reference line - * we always choose the biggest width value, to take the rightmost lane -![lane_midpoint](../../00_assets/lane_midpoint.png) + - we always choose the biggest width value, to take the rightmost lane +![lane_midpoint](../../assets/lane_midpoint.png) Scenario and concept to compute the midpoint of a lane -* the second method takes the target position and the next command from the leaderboard -* we always calculate the follow road based on the distance to the target and then +- the second method takes the target position and the next command from the leaderboard +- we always calculate the follow road based on the distance to the target and then interpolate the current road - * here we can also change this approach if there is the same weakness as mentioned before - * we can calculate the next road based on the distance to the last trajectory point -* we have to keep in mind the same aspects as in the starting case -* after each interpolation of a road we check the distance from the new trajectory points to + - here we can also change this approach if there is the same weakness as mentioned before + - we can calculate the next road based on the distance to the last trajectory point +- we have to keep in mind the same aspects as in the starting case +- after each interpolation of a road we check the distance from the new trajectory points to the target position - * if the distance is smaller than a set threshold, we reached the target - * in this case we may need to calculate this last road again because based on the command + - if the distance is smaller than a set threshold, we reached the target + - in this case we may need to calculate this last road again because based on the command from the leaderboard we have to turn to the left side or the rigth side. We need to change the lane before we reach the startpoint of a junction - * we calculate the next road to take, based on the heading value of the endpoint of this + - we calculate the next road to take, based on the heading value of the endpoint of this following road. We compare this value to the yaw value from the leaderboard. The heading value with the smallest distance indicates the correct following road id. - * when we know the end point of the following road, we can recompute the last trajectory point + - when we know the end point of the following road, we can recompute the last trajectory point with all possible width values for this road. calculate the distance to the following endpoint and chose the width value with the smallest distance. - * Now we can interpolate our last road with the new width value (if the width value was updated) - * Also we can smooth our first trajectory points with smaller width values, to change the lane smooth + - Now we can interpolate our last road with the new width value (if the width value was updated) + - Also we can smooth our first trajectory points with smaller width values, to change the lane smooth For the next target point and command we need to call this method again (not the starting method) and calculate the trajectory. Weakness -* Offset for restricted areas is not yet calculated (see the picture above) -* no max speed value for junctions -> default value -* Check where the target points are located. In the middle of a junction or before? +- Offset for restricted areas is not yet calculated (see the picture above) +- no max speed value for junctions -> default value +- Check where the target points are located. In the middle of a junction or before? At the moment we assume they are before a junction. In the following test scenario we added a manual start point on road 8. The following target points and commandos for the next action also have been added manual. -![roads_vis](../../00_assets/roads_vis.png) +![roads_vis](../../assets/roads_vis.png) roads to interpolate -![trajectory_roads](../../00_assets/trajectory_roads.png) +![trajectory_roads](../../assets/trajectory_roads.png) roads chosen by the methods -![global_trajectory](../../00_assets/global_trajectory.png) +![global_trajectory](../../assets/global_trajectory.png) Global trajectory visualised -![local_trajectory](../../00_assets/local_trajectory.png) +![local_trajectory](../../assets/local_trajectory.png) One cutout of the trajectory ## Follow-up Issues -* Check out positioning - * Compare positioning of signs in Carla and in the OpenDrive Map - * Compare positioning of traffic lights in Carla and in the OpenDrive Map -* Visualize Trajectory in Carla -* Implement velocity profile -* Check if waypoints fit with Simulator -* Keep the lane limitation -> testing -* Extract signals information for the state machine -* Implement local path planner for alternative routes and collision prediction +- Check out positioning + - Compare positioning of signs in Carla and in the OpenDrive Map + - Compare positioning of traffic lights in Carla and in the OpenDrive Map +- Visualize Trajectory in Carla +- Implement velocity profile +- Check if waypoints fit with Simulator +- Keep the lane limitation -> testing +- Extract signals information for the state machine +- Implement local path planner for alternative routes and collision prediction ## Sources diff --git a/doc/03_research/03_planning/00_paf22/02_basics.md b/doc/research/paf22/planning/basics.md similarity index 64% rename from doc/03_research/03_planning/00_paf22/02_basics.md rename to doc/research/paf22/planning/basics.md index b8b75532..093dcb23 100644 --- a/doc/03_research/03_planning/00_paf22/02_basics.md +++ b/doc/research/paf22/planning/basics.md @@ -1,42 +1,31 @@ # Grundrecherche im Planing -## Authors - -Simon Erlbacher, Niklas Vogel - -## Datum - -15.11.2022 - ---- - -* [Grundrecherche im Planing](#grundrecherche-im-planing) - * [Authors](#authors) - * [Datum](#datum) - * [PAF 2021-1](#paf-2021-1) - * [Vehicle Controller](#vehicle-controller) - * [Decision-Making-Component](#decision-making-component) - * [PAF 2021-2](#paf-2021-2) - * [PAF 2020 (1 & 2)](#paf-2020-1--2) - * [Informationen aus alten Projekten](#informationen-aus-alten-projekten) - * [Planning Unterteilung](#planning-unterteilung) - * [Probleme](#probleme) - * [Lokalisierung](#lokalisierung) - * [Hindernisse erkennen](#hindernisse-erkennen) - * [Sicherheitseigenschaften](#sicherheitseigenschaften) - * [Decision Making (Behaviour Planner)](#decision-making-behaviour-planner) - * [Trajektorie](#trajektorie) - * [Trajektorie Tracking](#trajektorie-tracking) - * [Offene Fragen aus dem Issue](#offene-fragen-aus-dem-issue) - * [Was ist zu tun?](#was-ist-zu-tun) - * [Eingang](#eingang) - * [Ausgang](#ausgang) - * [Wie sehen die Daten vom Leaderboard für das Global Planning aus](#wie-sehen-die-daten-vom-leaderboard-für-das-global-planning-aus) - * [Daten aus dem LB und Global planning, wie kann daraus eine Trajektorie generiert werden](#daten-aus-dem-lb-und-global-planning-wie-kann-daraus-eine-trajektorie-generiert-werden) - * [Wie sieht die Grenze zwischen global und local plan aus?](#wie-sieht-die-grenze-zwischen-global-und-local-plan-aus) - * [Müssen Staus umfahren werden?](#müssen-staus-umfahren-werden) - * [Sollgeschwindigkeitsplanung](#sollgeschwindigkeitsplanung) - +**Summary:** This page contains the resarch of planning components of previous years. + +- [Grundrecherche im Planing](#grundrecherche-im-planing) + - [PAF 2021-1](#paf-2021-1) + - [Vehicle Controller](#vehicle-controller) + - [Decision-Making-Component](#decision-making-component) + - [PAF 2021-2](#paf-2021-2) + - [PAF 2020 (1 \& 2)](#paf-2020-1--2) + - [Informationen aus alten Projekten](#informationen-aus-alten-projekten) + - [Planning Unterteilung](#planning-unterteilung) + - [Probleme](#probleme) + - [Lokalisierung](#lokalisierung) + - [Hindernisse erkennen](#hindernisse-erkennen) + - [Sicherheitseigenschaften](#sicherheitseigenschaften) + - [Decision Making (Behaviour Planner)](#decision-making-behaviour-planner) + - [Trajektorie](#trajektorie) + - [Trajektorie Tracking](#trajektorie-tracking) + - [Offene Fragen aus dem Issue](#offene-fragen-aus-dem-issue) + - [Was ist zu tun?](#was-ist-zu-tun) + - [Eingang](#eingang) + - [Ausgang](#ausgang) + - [Wie sehen die Daten vom Leaderboard für das Global Planning aus](#wie-sehen-die-daten-vom-leaderboard-für-das-global-planning-aus) + - [Daten aus dem LB und Global planning, wie kann daraus eine Trajektorie generiert werden](#daten-aus-dem-lb-und-global-planning-wie-kann-daraus-eine-trajektorie-generiert-werden) + - [Wie sieht die Grenze zwischen global und local plan aus?](#wie-sieht-die-grenze-zwischen-global-und-local-plan-aus) + - [Müssen Staus umfahren werden?](#müssen-staus-umfahren-werden) + - [Sollgeschwindigkeitsplanung](#sollgeschwindigkeitsplanung) ## [PAF 2021-1](https://github.com/ll7/paf21-1) @@ -57,15 +46,15 @@ Die Kurvendetektion berechnet die maximale Kurvengeschwindigkeit durch Ermittlun Inputs: -* Fahrzeugposition -* Fahrzeugorientierung -* Fahrzeuggeschwindigkeit -* Fahrtrajektorie +- Fahrzeugposition +- Fahrzeugorientierung +- Fahrzeuggeschwindigkeit +- Fahrtrajektorie Outputs: -* Sollgeschwindigkeit -* Lenkwinkel +- Sollgeschwindigkeit +- Lenkwinkel ### Decision-Making-Component @@ -82,16 +71,16 @@ Finite-state machine für Manöver: Inputs: -* Geschwindigkeit -* Objekt auf Trajektorie -* Ampelsignale -* Geschwindigkeitsbegrenzung -* Geschwindigkeit und Position anderer Verkehrsteilnehmer -* Target Lane +- Geschwindigkeit +- Objekt auf Trajektorie +- Ampelsignale +- Geschwindigkeitsbegrenzung +- Geschwindigkeit und Position anderer Verkehrsteilnehmer +- Target Lane Outputs: -* "Actions" (Bremsen, Beschleunigen, Halten, Spurwechsel...) +- "Actions" (Bremsen, Beschleunigen, Halten, Spurwechsel...) Globaler Planer Überblick: ![Alt text](https://github.com/ll7/paf21-1/raw/master/imgs/Global%20Planer.png) @@ -100,21 +89,21 @@ Globaler Planer Überblick: verantwortlich für die Routenplanung und Pfadplanung für das Ego-Vehicle sowie die erkannten Verkehrsteilnehmer. -* global_planner - * Planung einer Route von einem Startpunkt zu einem oder einer Liste an Zielpunkten - * Commonroad Route Planner (TUM) -> Liste an Routen-Lanelets sowie eine Liste an Punkten mit Abstand etwa 10cm - * Anreicherung mit parallelen Spuren -* local_planner - * Lokale Pfadplanung inklusive Spurwahl, Ampelmanagement und Spurwechsel - * erlaubte Geschwindigkeit, sowie die bevorzugte Spur basierend auf der Hinderniserkennung (obstacle planner) wird ergänzt - * "beste"/schnellste Möglichkeit wird errechnet und weiter an acting geschickt -* obstacle_planner - * Verwaltung von dynamischen hindernissen - * Vorhersage von Pfaden anderer Fahrzeuge und generieren von Folgefahrzeug-Informationen - * Verwerfen von "irrelevanten" Fahrezeugen - -* Geschwindigkeitsplanung/Kontrolle wie 2021-1 + Bremswegplanung [Details](https://github.com/ll7/paf21-2/tree/main/paf_ros/paf_planning#bremsweg) -* Map Manager für die Verwaltung aller statischen Kartendaten +- global_planner + - Planung einer Route von einem Startpunkt zu einem oder einer Liste an Zielpunkten + - Commonroad Route Planner (TUM) -> Liste an Routen-Lanelets sowie eine Liste an Punkten mit Abstand etwa 10cm + - Anreicherung mit parallelen Spuren +- local_planner + - Lokale Pfadplanung inklusive Spurwahl, Ampelmanagement und Spurwechsel + - erlaubte Geschwindigkeit, sowie die bevorzugte Spur basierend auf der Hinderniserkennung (obstacle planner) wird ergänzt + - "beste"/schnellste Möglichkeit wird errechnet und weiter an acting geschickt +- obstacle_planner + - Verwaltung von dynamischen hindernissen + - Vorhersage von Pfaden anderer Fahrzeuge und generieren von Folgefahrzeug-Informationen + - Verwerfen von "irrelevanten" Fahrezeugen + +- Geschwindigkeitsplanung/Kontrolle wie 2021-1 + Bremswegplanung [Details](https://github.com/ll7/paf21-2/tree/main/paf_ros/paf_planning#bremsweg) +- Map Manager für die Verwaltung aller statischen Kartendaten ## PAF 2020 ([1](https://github.com/ll7/psaf1) & [2](https://github.com/ll7/psaf2)) @@ -129,14 +118,14 @@ Teilbaum "Intersection" als Beispiel: "If there is a Intersection coming up the agent executes the following sequence of behaviours: -* Approach Intersection - * Slows down, gets into the right lane for turning and stops at line -* Wait at Intersection - * Waits for traffic lights or higher priority traffic -* Enter Intersection - * Enters the intersection and stops again, if there is higher priority oncoming traffic -* Leave Intersection - * Leaves the intersection in the right direction" +- Approach Intersection + - Slows down, gets into the right lane for turning and stops at line +- Wait at Intersection + - Waits for traffic lights or higher priority traffic +- Enter Intersection + - Enters the intersection and stops again, if there is higher priority oncoming traffic +- Leave Intersection + - Leaves the intersection in the right direction" [Kompletter Entscheidungsbaum](https://github.com/ll7/psaf2/tree/main/Planning/behavior_agent) @@ -144,27 +133,27 @@ Teilbaum "Intersection" als Beispiel: Quellen: -* -* +- +- -![architektur gewinnterteam19](../../00_assets/gewinnerteam19-architektur.png) +![architektur gewinnterteam19](../../assets/gewinnerteam19-architektur.png) Übersicht zu einer möglichen Architektur (Gewinnerteam des ersten Wettbewerbes) -![sensoranordnung](../../00_assets/sensoranordnung.png) +![sensoranordnung](../../assets/sensoranordnung.png) Mögliche Anordnung und Anzahl von Sensoren. (6 Kameras, 1 LIDAR, 2 GPS) ## Planning Unterteilung -![planning uebersicht](../../00_assets/planning%20%C3%BCbersicht.png) +![planning uebersicht](../../assets/planning%20%C3%BCbersicht.png) Planning Übersicht ## Probleme -* Kollision mit statischen Objekten (Gehsteig) -* Kollision mit Fußgängern die unerwartetes Verhalten zeigen +- Kollision mit statischen Objekten (Gehsteig) +- Kollision mit Fußgängern die unerwartetes Verhalten zeigen Es wird vorgeschlagen ein festes Notfallmanöver für das Fahrzeug zu erstellen, welches mit einer schnelleren Reaktionszeit greift, um unerwartete Kollisionen zu verhindern. @@ -174,7 +163,7 @@ Die Position des Fahrzeuges kann durch die zwei GPS Tracker bestimmt werden und Alt steht hierbei für altitude und beschreibt die gemessene Höhe durch die GPS Tracker. Der Winkel gibt hierbei die Orientierung des Fahrzeuges an. Der x und y Wert beinhaltet die Koordinaten des hinteren GPS Trackers. -![positionsvektor](../../00_assets/positionsvektor.png) ![fahrzeugwinkelberechnung](../../00_assets/fahrzeugwinkelberechnung.png) +![positionsvektor](../../assets/positionsvektor.png) ![fahrzeugwinkelberechnung](../../assets/fahrzeugwinkelberechnung.png) Positionsvektor und Berechnung des Fahrzeugwinkels zur Zielposition @@ -183,7 +172,7 @@ Wenn das GPS Signal allerdings fehlerhaft ist bzw. Störungen ausgesetzt ist, gi In diesem Fall wird ein Kalman Filter impolementiert. Er kommt mit Störungen zurecht und gibt auf Basis der aktuellen Position eine gute Vorhersage für zukünftige Zustände des Fahrzeuges. -![fahrzeugpositionsberechnung](../../00_assets/fahrzeugpositionsberechnung.png) +![fahrzeugpositionsberechnung](../../assets/fahrzeugpositionsberechnung.png) Berechnung der aktuellen und zukünftigen Fahrzeugposition @@ -193,7 +182,7 @@ Mit dem LIDAR Sensor werden Punktewolken in der Umgebung erzeugt. Diese werden mit dem DBSCAN Algorithmus geclustert. Er kommt gut mit outlinern klar und kann diese entsprechend ignorieren. Mit Calipers Algorithmus aus der OpenCV Bibliothek wird für jedes Cluster das kleinst mögliche Rechteck, welches das Cluster fitted, erzeugt. -![lidarhinderniserkennung](../../00_assets/lidarhinderniserkennung.png) +![lidarhinderniserkennung](../../assets/lidarhinderniserkennung.png) Erkennen von Hindernissen mit dem LIDAR Sensor @@ -205,15 +194,15 @@ Das Skalarprodukt ist hiermit nahe 0. Es wurde also ein Hinderniss erkannt. Der Dies soll einen Outliner darstellen. Druch das Einführen eines Thresholds können diese Detektionen ausgeschlossen werden. Hindernisse mit dem Occupacy Grid erkennen. Somit einfach Abstand der Punkte in einer Gridzelle mit dem Mittelpunkt eines Kreises berechnen und prüfen ob die Distanz kleiner als der Radius ist. -![occupancygrid](../../00_assets/occupancygrid.png) +![occupancygrid](../../assets/occupancygrid.png) 360 Grad Occupacy Grid -![fahrzeugapproximation](../../00_assets/fahrzeugapproximation.png) +![fahrzeugapproximation](../../assets/fahrzeugapproximation.png) Approximation eines Fahrzeuges mit drei Kreisen -![kollisionsberechnung](../../00_assets/kollisionsberechnung.png) +![kollisionsberechnung](../../assets/kollisionsberechnung.png) Einfache Berechnung einer Kollision @@ -221,16 +210,16 @@ Einfache Berechnung einer Kollision Wichtig ist die Sicherheitseigenschaft von Autonomen Fahrzeugen. Risiken können in drei KLassen unterteilt werden: -* Kollision mit statischen Objekten -* Kollision mit dynamischen Objekten -* Kollision mit unerwarteten Objekten +- Kollision mit statischen Objekten +- Kollision mit dynamischen Objekten +- Kollision mit unerwarteten Objekten In dem Beispielprojekt wurde eine Bewertung der Überlappung von Trajekotrien verschiedener Objekte zur HAnd genommen. Es wird eine mögliche Kollisionszone bestimmt. Das Fahrzeug hat hierbei drei Zonen auf seiner Trajektorie. -* Danger Zone: Hier muss sofort gestoppt werden wenn ein Trajektorien Konflikt detektiert wird -* Warning Zone: Hier entsprechend die Geschwindigkeit anpassen im Verhältnis zu der DTC (distance to collision) -* Safe Zone +- Danger Zone: Hier muss sofort gestoppt werden wenn ein Trajektorien Konflikt detektiert wird +- Warning Zone: Hier entsprechend die Geschwindigkeit anpassen im Verhältnis zu der DTC (distance to collision) +- Safe Zone Die Kollision benötigt die Position eines möglichen Kollisionsgegenstandes und seine Form. Wenn die Orientierung und die Geschwindigkeit verfügbar sind, kann eine Vorhersage zu der zukünftigen Position getroffen werden, um Konflikte zu vermeiden. @@ -239,27 +228,27 @@ Annahme: Alle Verkehrsteilnehmer haben konstante Geschwindigkeit (sonst Berechnu ## Decision Making (Behaviour Planner) -![kreuzungszonen](../../00_assets/kreuzungszonen.png) +![kreuzungszonen](../../assets/kreuzungszonen.png) Verkehrsszenario einer Kreuzung mit verschiedenen Zonen. -* Roter Bereich: Fahrzeug verlangsamt seine Geschwindigkeit -* Grüner Bereich: Fahrzeug kommt zum stehen -* Oranger Bereich (Intersection): Fahrzeug betritt diesen Bereich nur,wenn kein anderer Verkehrsteilnehmer in diesem erkannt wird -![statemachines](../../00_assets/statemachines.png) +- Roter Bereich: Fahrzeug verlangsamt seine Geschwindigkeit +- Grüner Bereich: Fahrzeug kommt zum stehen +- Oranger Bereich (Intersection): Fahrzeug betritt diesen Bereich nur,wenn kein anderer Verkehrsteilnehmer in diesem erkannt wird +![statemachines](../../assets/statemachines.png) Aufteilung in mehrere state machines Eine state machine oder Aufteileung in mehrere state machines Vorteile von mehreren state machines: -* Geringere Berechnungszeit -* einfacher zu erstellen und Instand zu halten +- Geringere Berechnungszeit +- einfacher zu erstellen und Instand zu halten Nachteile von mehreren state machines: -* Sehr viele Regeln -* Regeln zwischen state machines können sich wiederholen +- Sehr viele Regeln +- Regeln zwischen state machines können sich wiederholen Reinforcement Learning, Rule based System, Markov Decision Process @@ -271,26 +260,26 @@ Probleme ist hierbei das Umplanen der Trajekotrie durch unerwartete Hindernisse Das Fahrzeug muss seine zukünftigen Aktionen, eigene Zustandsübergange, Zustandsübergänge anderer Agenten einbeziehen (zB. Umschalten einer Ampel). Es wird ein Input Vektor aus dem Bycicle Modell benötigt. -![berechnungsmodell](../../00_assets/berechnungsmodell.png) +![berechnungsmodell](../../assets/berechnungsmodell.png) Modell für die Berechnung der aktuellen und zukünftigen Fahrzeugposition -![trajektorienberechnung](../../00_assets/trajektorienberechnung.png) +![trajektorienberechnung](../../assets/trajektorienberechnung.png) Berechnung einer Trajektorie -![optimierungsvisualisierung](../../00_assets/optimierungsvisualisierung.png) +![optimierungsvisualisierung](../../assets/optimierungsvisualisierung.png) Visualisierung des Optimierungsprozesses bei der Trajektorienbildung -![trajekorienfehlermin](../../00_assets/trajekorienfehlermin.png) +![trajekorienfehlermin](../../assets/trajekorienfehlermin.png) Fehlerminimierung bei der Trajektorienberechnung ## Trajektorie Tracking -* Stanley Controller -* Pure Pursuit Controller +- Stanley Controller +- Pure Pursuit Controller ## Offene Fragen aus dem [Issue](https://github.com/ll7/paf22/issues/26) @@ -302,45 +291,45 @@ Dabei werden andere Fahrzeuge im näheren Umfeld des eigenen Fahrzeugs auch in d ### Eingang -* Fahrzeugposition -* Fahrzeugorientierung -* Fahrzeuggeschwindigkeit -* Fahrtrajektorie bzw anzufahrende Punkte aus denen trajektorie errechnet werden kann -* Objekte auf Trajektorie -* Ampelsignale und Verkehrsschilder -* Geschwindigkeitsbegrenzung -* Geschwindigkeit und Position anderer Verkehrsteilnehmer -* Target Lane +- Fahrzeugposition +- Fahrzeugorientierung +- Fahrzeuggeschwindigkeit +- Fahrtrajektorie bzw anzufahrende Punkte aus denen trajektorie errechnet werden kann +- Objekte auf Trajektorie +- Ampelsignale und Verkehrsschilder +- Geschwindigkeitsbegrenzung +- Geschwindigkeit und Position anderer Verkehrsteilnehmer +- Target Lane ### Ausgang -* "Actions" - * Bremsen - * Beschleunigen - * Halten - * Spurwechsel - * ... +- "Actions" + - Bremsen + - Beschleunigen + - Halten + - Spurwechsel + - ... Oder -* Sollgeschwindigkeit -* Lenkwinkel +- Sollgeschwindigkeit +- Lenkwinkel ### Wie sehen die Daten vom Leaderboard für das Global Planning aus "For each route, agents will be initialized at a starting point and directed to drive to a destination point, provided with a description of the route through GPS style coordinates, map coordinates and route instructions." -* GPS coordinates Beispiel: - * {'z': 0.0, 'lat': 48.99822669411668, 'lon': 8.002271601998707} -* Map/World coordinates Beispiel: - * {'x': 153.7, 'y': 15.6, 'z': 0.0} -* Route Instructions: - * RoadOption.CHANGELANELEFT: Move one lane to the left. - * RoadOption.CHANGELANERIGHT: Move one lane to the right. - * RoadOption.LANEFOLLOW: Continue in the current lane. - * RoadOption.LEFT: Turn left at the intersection. - * RoadOption.RIGHT: Turn right at the intersection. - * RoadOption.STRAIGHT: Keep straight at the intersection. +- GPS coordinates Beispiel: + - {'z': 0.0, 'lat': 48.99822669411668, 'lon': 8.002271601998707} +- Map/World coordinates Beispiel: + - {'x': 153.7, 'y': 15.6, 'z': 0.0} +- Route Instructions: + - RoadOption.CHANGELANELEFT: Move one lane to the left. + - RoadOption.CHANGELANERIGHT: Move one lane to the right. + - RoadOption.LANEFOLLOW: Continue in the current lane. + - RoadOption.LEFT: Turn left at the intersection. + - RoadOption.RIGHT: Turn right at the intersection. + - RoadOption.STRAIGHT: Keep straight at the intersection. "The distance between two consecutive waypoints could be up to hundreds of meters. Do not rely on these as your principal mechanism to navigate the environment." @@ -352,9 +341,9 @@ Des Weiteren steh als globale Map ein OpenDRIVE file als String geparsed zur Ver [Beispiel 2021-2](#paf-2021-2): -* global_planner (Planung einer Route von einem Startpunkt zu einem oder einer Liste an Zielpunkten) - * Commonroad Route Planner (TUM) -> Liste an Routen-Lanelets sowie eine Liste an Punkten mit Abstand etwa 10cm - * (Anreicherung mit parallelen Spuren) +- global_planner (Planung einer Route von einem Startpunkt zu einem oder einer Liste an Zielpunkten) + - Commonroad Route Planner (TUM) -> Liste an Routen-Lanelets sowie eine Liste an Punkten mit Abstand etwa 10cm + - (Anreicherung mit parallelen Spuren) ### Wie sieht die Grenze zwischen global und local plan aus? @@ -372,12 +361,12 @@ Route deviation — If an agent deviates more than 30 meters from the assigned r ### Sollgeschwindigkeitsplanung -* Schilder - * vor Ampeln, Schildern, Kreisverkehren, Kreuzungen verzögern und langsamer werden -* Kurvenfahrt - * siehe [maximale Kurvengeschwindigkeit](#vehicle-controller) -* Vorausfahrendes Auto - * Geschwindigkeit an dieses Anpassen oder überholen wenn möglich -* Straßenverhältnisse - * "variety of situations: including freeways, urban areas, residential districts and rural settings" - * "variety of weather conditions: including daylight scenes, sunset, rain, fog, and night, among others" +- Schilder + - vor Ampeln, Schildern, Kreisverkehren, Kreuzungen verzögern und langsamer werden +- Kurvenfahrt + - siehe [maximale Kurvengeschwindigkeit](#vehicle-controller) +- Vorausfahrendes Auto + - Geschwindigkeit an dieses Anpassen oder überholen wenn möglich +- Straßenverhältnisse + - "variety of situations: including freeways, urban areas, residential districts and rural settings" + - "variety of weather conditions: including daylight scenes, sunset, rain, fog, and night, among others" diff --git a/doc/research/paf22/planning/decision_making.md b/doc/research/paf22/planning/decision_making.md new file mode 100644 index 00000000..70bc1cab --- /dev/null +++ b/doc/research/paf22/planning/decision_making.md @@ -0,0 +1,253 @@ +# Decision-making module + +**Summary:** This page gives a brief summary over possible decision-making choices their ad- and disadvantages as well as the opportunity to interchange them later on. Also, possible implementation options for those concepts are given. + +- [Decision-making module](#decision-making-module) + - [Decision-making algorithms](#decision-making-algorithms) + - [Finite State machine](#finite-state-machine) + - [Advantages](#advantages) + - [Disadvantages](#disadvantages) + - [Markov Chain](#markov-chain) + - [Advantages](#advantages-1) + - [Disadvantages](#disadvantages-1) + - [Decision Tree](#decision-tree) + - [Advantages](#advantages-2) + - [Disadvantages](#disadvantages-2) + - [Previous approaches](#previous-approaches) + - [PAF21-1](#paf21-1) + - [State machine](#state-machine) + - [Take away](#take-away) + - [PAF21-2](#paf21-2) + - [No clear concept](#no-clear-concept) + - [Take away](#take-away-1) + - [PSAF1 2020](#psaf1-2020) + - [State machine](#state-machine-1) + - [Take away](#take-away-2) + - [PSAF2 2020](#psaf2-2020) + - [Decision tree](#decision-tree-1) + - [Take Away](#take-away-3) + - [Python or ROS libraries for these decision-making algorithms](#python-or-ros-libraries-for-these-decision-making-algorithms) + - [State machines](#state-machines) + - [SMACH](#smach) + - [SMACC](#smacc) + - [Markov Chains](#markov-chains) + - [QuantEcon](#quantecon) + - [markov\_decision\_making](#markov_decision_making) + - [Decision trees](#decision-trees) + - [pytrees](#pytrees) + - [Conclusion](#conclusion) + - [Sources](#sources) + +## Decision-making algorithms + +### Finite State machine + +A finite-state machine (FSM) or finite-state automaton (FSA, plural: automata), finite automaton, or simply a state machine, is a mathematical model of computation. +It is an abstract machine that can be in exactly one of a finite number of states at any given time. +The FSM can change from one state to another in response to some inputs; the change from one state to another is called a transition. +An FSM is defined by a list of its states, its initial state, and the inputs that trigger each transition. +Finite-state machines are of two types—deterministic finite-state machines and non-deterministic finite-state machines. A deterministic finite-state machine can be constructed equivalent to any non-deterministic one. + +#### Advantages + +- easy to implement +- we know most of the scenarios (finite state space) +- previous groups have solutions we could adapt/extend + +#### Disadvantages + +- many states necessary +- even though we can try to map all possible states, there still might be some situation we could not account for + +### Markov Chain + +A Markov chain or Markov process is a stochastic model describing a sequence of possible events in which the probability of each event depends only on the state attained in the previous event. +A countably infinite sequence, in which the chain moves state at discrete time steps, gives a discrete-time Markov chain. A continuous-time process is called a continuous-time Markov chain. It is named after the Russian mathematician Andrey Markov. + +#### Advantages + +- possible to build Markov Chain from State machine +- experience from previous projects +- only depends on current state ("memorylessness") + +#### Disadvantages + +- might be complicated to implement +- probabilities for transitions might need to be guessed, empirically estimated + +### Decision Tree + +A decision tree is a decision support tool that uses a tree-like model of decisions and their possible consequences, including chance event outcomes, resource costs, and utility. +It is one way to display an algorithm that only contains conditional control statements. Decision trees are commonly used in operations research, specifically in decision analysis, to help identify a strategy most likely to reach a goal, but are also a popular tool in machine learning. + +#### Advantages + +- easy implementation +- tree like structure usable in Machine Learning (Random Forest e.g.) + +#### Disadvantages + +- multiple decision trees necessary +- prediction independent of previous state + +## Previous approaches + +### PAF21-1 + +#### State machine + +- 2 state machines: one for maneuvers, one for speed control +- Speed control more complex, when to brake seems like the most challenging task + +#### Take away + +- Some states seem to be comparable to what we are required to accomplish by the leaderboard +- Our task might be more complex, needs additional states and transitions +- I'm uncertain about an extra speed state, might be easier to handle that more locally by the local planner, maybe in combination with an observer element that keeps track of the surrounding by processing the information from `Perception` + +### PAF21-2 + +#### No clear concept + +- some sort of state machine integrated in local planner +- obstacle planner for dynamic obstacles (pedestrians, cars, bicycles) +- useful parameters which we could adapt +- path prediction for obstacles +- obstacles are only interesting if they cross the path of the ego vehicle + +#### Take away + +- Obstacle planner might be useful for dynamic obstacle detection if not handled elsewhere +- path prediction might reduce the number objects tracked that we could interfere with +- Also, if we adapt our local plan this path prediction of other vehicles might come in handy +- On the other hand, overhead to keep track of vehicles and maybe repredict paths if some vehicles change direction + +### PSAF1 2020 + +#### State machine + +- Three driving functions: Driving, stopping at traffic light, stopping at stop sign +- First project iteration so state machine more simple +- still covers many important scenarios + +#### Take away + +- Good starting point to have a minimal viable state machine +- Need adaption depending on what information we are getting forwarded/process in the planning module + +### PSAF2 2020 + +#### Decision tree + +- This team used a decision tree to cover the major driving scenarios +- Within the scenarios the actions are more linear +- Reminds me of the execution of a state where driving scenarios are the states and the execution the things our local planner should do within that state + +#### Take Away + +- Even though the approach is different, the execution might be similar to the other team algorithms +- We might not be interested in a decision tree as we want to keep the option to switch to a Markov chain, which would be more overhead if we start with a decision tree + +## Python or ROS libraries for these decision-making algorithms + +### State machines + +#### SMACH + +- Task-level architecture for creating state machines for robot behaviour. +- Based on Python +- Fast prototyping: Quickly create state machines +- Complex state machines can easily be created +- Introspection: smach_viewer provides a visual aid to follow the state machine executing its tasks + - smach_viewer is unmaintained and does not work with noetic +- Allows nested state machines +- Values can be passed between states +- Tutorials and documentation seems to be easy to understand so creating a first state machine shouldn't be too hard +- working with several ROS topics and messages within the state machine needs to be evaluated: + - the execution of states is mostly planned to happen in the local planner so for just sending a ROS message, SMACH might be efficient + +Not use SMACH for: + +- Unstructured tasks: SMACH is not efficient in sheduling unstructured tasks +- Low-level systems: SMACH is not build for high efficiency, might fall short for emergency maneuvers + +- Simple examples run without problem + +#### SMACC + +- event-driven, asynchronous, behavioral state machine library +- real-time ROS applications +- written in C++ +- designed to allow programmers to build robot control applications for multicomponent robots, in an intuitive and systematic manner. +- well maintained, lots of prebuild state machines to possibly start from + +Why not use SMACC: + +- might get some time to get back into C++ +- more sophisticated library might need more time to get used to +- awful country music in the back of tutorial videos + +- Tutorials do not run without further debugging which I didn't invest the time to do so + +### Markov Chains + +#### QuantEcon + +- a economics library for implementing Markov chains +- more focussed on simulation than actually using it in an AD agent +- maybe usable for testing and simulating a Markov chain before implementing it + +#### markov_decision_making + +- ROS library for robot decision-making based on Markov Decision Problems +- written in C++ +- callback-based action interpretation allows to use other frameworks (SMACH) +- relatively easy to implement hierarchical MDPs +- supports synchronous and asynchronous execution + +Why not use markov_decision_making: + +- not maintained +- only works with ROS hydro + +### Decision trees + +#### pytrees + +- easy framework for implementing behaviour trees +- written in python +- used by a group two years ago +- not usable for real-time application code according to their docs +- priority handling - higher level interrupts are handled first + +## Conclusion + +In my opinion, a state machine would be a great start for the project. There are plenty of resources available from recent projects. +It needs to be further discussed if the libraries presented above possess the needed functionality to run our state machine. The planning team might meet on the issue and present a suitable solution. +It is possible to start with a skeleton of both and compare them. + +### Sources + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/03_research/03_planning/00_paf22/07_reevaluation_desicion_making.md b/doc/research/paf22/planning/reevaluation_desicion_making.md similarity index 65% rename from doc/03_research/03_planning/00_paf22/07_reevaluation_desicion_making.md rename to doc/research/paf22/planning/reevaluation_desicion_making.md index a3fe7fb5..2e4eaa75 100644 --- a/doc/03_research/03_planning/00_paf22/07_reevaluation_desicion_making.md +++ b/doc/research/paf22/planning/reevaluation_desicion_making.md @@ -2,42 +2,24 @@ **Summary:** This page gives a foundation for the re-evaluation of the decision-making ---- - -## Author - -Josef Kircher - -## Date - -26.01.2023 - -## Prerequisite - ---- - -* [Re-evaluation of decision making component](#re-evaluation-of-decision-making-component) - * [**Summary:** This page gives a foundation for the re-evaluation of the decision-making](#summary-this-page-gives-a-foundation-for-the-re-evaluation-of-the-decision-making) - * [Author](#author) - * [Date](#date) - * [Prerequisite](#prerequisite) - * [Reasons for re-evaluation](#reasons-for-re-evaluation) - * [Options](#options) - * [Pylot](#pylot) - * [Pytrees](#pytrees) - * [Pros](#pros) - * [Cons](#cons) - * [Conclusion](#conclusion) - * [Sources](#sources) - +- [Re-evaluation of decision making component](#re-evaluation-of-decision-making-component) + - [Reasons for re-evaluation](#reasons-for-re-evaluation) + - [Options](#options) + - [Pylot](#pylot) + - [Pytrees](#pytrees) + - [Pros](#pros) + - [Cons](#cons) + - [Conclusion](#conclusion) + - [Sources](#sources) + ## Reasons for re-evaluation In the last sprint, I tried to get a graphic tool to work with the docker container withing the project. That failed, but I still think, that a graphical representation would be helpful. Other reasons are: -* not much time has been allocated for the state machine so far -* using SMACH would result in a mostly from scratch implementation -* harder to debug due to the lack of a graphic representation +- not much time has been allocated for the state machine so far +- using SMACH would result in a mostly from scratch implementation +- harder to debug due to the lack of a graphic representation ## Options @@ -56,18 +38,18 @@ As it is looking very promising, I list here a few arguments to help support my #### Pros -* support a graphical representation at runtime with rqt -* a lot of similar driving scenarios as the old team -* so a lot of code can be recycled -* quite intuitive and easy to understand -* only a limited amount of commands (easy to learn) -* well documented -* maintained +- support a graphical representation at runtime with rqt +- a lot of similar driving scenarios as the old team +- so a lot of code can be recycled +- quite intuitive and easy to understand +- only a limited amount of commands (easy to learn) +- well documented +- maintained #### Cons -* only a couple of decision can be made inside the tree, so it might be more complicated to depict the complex behaviour of the ego vehicle -* A lot of time was invested in the design of the original state machine, might be needed to be adapted +- only a couple of decision can be made inside the tree, so it might be more complicated to depict the complex behaviour of the ego vehicle +- A lot of time was invested in the design of the original state machine, might be needed to be adapted ## Conclusion diff --git a/doc/03_research/03_planning/00_paf22/06_state_machine_design.md b/doc/research/paf22/planning/state_machine_design.md similarity index 64% rename from doc/03_research/03_planning/00_paf22/06_state_machine_design.md rename to doc/research/paf22/planning/state_machine_design.md index e90a785f..2bf3ad2b 100644 --- a/doc/03_research/03_planning/00_paf22/06_state_machine_design.md +++ b/doc/research/paf22/planning/state_machine_design.md @@ -2,67 +2,51 @@ **Summary:** This page gives an overview of the design of the state machine and further describes states and transitions. ---- - -## Author - -Josef Kircher - -## Date - -09.12.2022 - ---- - -* [Title of wiki page](#title-of-wiki-page) - * [Author](#author) - * [Date](#date) - * [Super state machine](#super-state-machine) - * [Driving state machine](#driving-state-machine) - * [KEEP](#keep) - * [ACCEL](#accel) - * [Brake](#brake) - * [Lane change state machine](#lane-change-state-machine) - * [DECIDE_LANE_CHANGE](#decidelanechange) - * [CHANGE_LANE_LEFT](#changelaneleft) - * [CHANGE_LANE_RIGHT](#changelaneright) - * [Intersection state machine](#intersection-state-machine) - * [APPROACH_INTERSECTION](#approachintersection) - * [IN_INTERSECTION](#inintersection) - * [TURN_LEFT](#turnleft) - * [STRAIGHT](#straight) - * [TURN_RIGHT](#turnright) - * [LEAVE_INTERSECTION](#leaveintersection) - * [Stop sign/traffic light state machine](#stop-signtraffic-light-state-machine) - * [STOP_NEAR](#stopnear) - * [STOP_SLOW_DOWN](#stopslowdown) - * [STOP_WILL_STOP](#stopwillstop) - * [STOP_WAIT](#stopwait) - * [STOP_GO](#stopgo) - * [Implementation](#implementation) - * [Sources](#sources) - +- [State machine design](#state-machine-design) + - [Super state machine](#super-state-machine) + - [Driving state machine](#driving-state-machine) + - [KEEP](#keep) + - [UPDATE\_TARGET\_SPEED](#update_target_speed) + - [Lane change state machine](#lane-change-state-machine) + - [DECIDE\_LANE\_CHANGE](#decide_lane_change) + - [CHANGE\_LANE\_LEFT](#change_lane_left) + - [CHANGE\_LANE\_RIGHT](#change_lane_right) + - [Intersection state machine](#intersection-state-machine) + - [APPROACH\_INTERSECTION](#approach_intersection) + - [IN\_INTERSECTION](#in_intersection) + - [TURN\_LEFT](#turn_left) + - [STRAIGHT](#straight) + - [TURN\_RIGHT](#turn_right) + - [LEAVE\_INTERSECTION](#leave_intersection) + - [Stop sign/traffic light state machine](#stop-signtraffic-light-state-machine) + - [STOP\_NEAR](#stop_near) + - [STOP\_SLOW\_DOWN](#stop_slow_down) + - [STOP\_WILL\_STOP](#stop_will_stop) + - [STOP\_WAIT](#stop_wait) + - [STOP\_GO](#stop_go) + - [Implementation](#implementation) + - [Sources](#sources) ## Super state machine -![img.png](../../00_assets/Super_SM.png) +![img.png](../../assets/Super_SM.png) The super state machine functions as a controller of the main functions of the agent. Those functions are -* following the road and brake in front of obstacles if needed -* drive across an intersection -* change lane +- following the road and brake in front of obstacles if needed +- drive across an intersection +- change lane ## Driving state machine -![img.png](../../00_assets/Driving_SM.png) +![img.png](../../assets/Driving_SM.png) Transition: -* From `Intersection state machine` -* From `Lane change state machine` +- From `Intersection state machine` +- From `Lane change state machine` This state machine controls the speed of the ego-vehicle. It either tells the acting part of the ego vehicle to `UPDATE_TARGET_SPEED` or `KEEP` the velocity. @@ -74,7 +58,7 @@ If there is an event requiring the ego-vehicle to change the lane as mentioned i Transition: -* From `UPDATE_TARGET_SPEED` +- From `UPDATE_TARGET_SPEED` Keep the current target speed, applied most of the time. From here changes to the `UPDATE_TARGET_SPEED` state are performed, if events require a change of `target_speed`. @@ -82,36 +66,36 @@ Keep the current target speed, applied most of the time. From here changes to th Transition: -* From `KEEP` if `new target_speed` is smaller or greater than current `target_speed` or an `obstacle` or the `leading_vehicle` is in braking distance. +- From `KEEP` if `new target_speed` is smaller or greater than current `target_speed` or an `obstacle` or the `leading_vehicle` is in braking distance. Set a new target speed and change back to `KEEP` state afterwards. ## Lane change state machine -![img.png](../../00_assets/Lane_Change_SM.png) +![img.png](../../assets/Lane_Change_SM.png) Transition: -* From `driving state machine` by `lane_change_requested` +- From `driving state machine` by `lane_change_requested` This state machine completes the change of a lane. This is triggered from the super state machine and can have multiple triggers. Those include: -* Join highway -* Leave highway -* RoadOption: - * CHANGELANELEFT - * CHANGELANERIGHT - * KEEPLANE -* avoid obstacle(doors, static objects) -* give way to emergency vehicle -* overtake slow moving vehicle -* leave a parking bay +- Join highway +- Leave highway +- RoadOption: + - CHANGELANELEFT + - CHANGELANERIGHT + - KEEPLANE +- avoid obstacle(doors, static objects) +- give way to emergency vehicle +- overtake slow moving vehicle +- leave a parking bay ### DECIDE_LANE_CHANGE Transition: -* From super state machine by above triggers +- From super state machine by above triggers From the super state machine the transition to change the lane is given by one of the above triggers. This state decides to which lane should be changed dependent on the trigger. It takes into account if there are lanes to the left and/or right and if the lane change is requested by a roadOption command. @@ -120,7 +104,7 @@ It takes into account if there are lanes to the left and/or right and if the lan Transition: -* From `DECIDE_LANE_CHANGE` by `RoadOption.CHANGELANELEFT` or `obstacle_in_lane` or `leader_vehicle_speed < LEADERTHRESHOLD` +- From `DECIDE_LANE_CHANGE` by `RoadOption.CHANGELANELEFT` or `obstacle_in_lane` or `leader_vehicle_speed < LEADERTHRESHOLD` This state performs a lane change to the lane on the left. @@ -134,8 +118,8 @@ If an obstacle or a slow leading vehicle are the reasons for the lane change, to Transition: -* From `DECIDE_LANE_CHANGE` by `RoadOption.CHANGELANERIGHT` or `emergency_vehicle_in_front` -* From `CHANGE_LANE_LEFT` by `passing_obstacle` or `slow_leading_vehicle` +- From `DECIDE_LANE_CHANGE` by `RoadOption.CHANGELANERIGHT` or `emergency_vehicle_in_front` +- From `CHANGE_LANE_LEFT` by `passing_obstacle` or `slow_leading_vehicle` For changing to the right lane it is assumed, that the traffic in this lane flows in the driving direction of the ego vehicle. @@ -143,11 +127,11 @@ The lane change should be performed if the lane is free and there are no fast mo ## Intersection state machine -![img.png](../../00_assets/Intersection_SM.png) +![img.png](../../assets/Intersection_SM.png) Transition: -* From `driving state machine` by `intersection_detected` +- From `driving state machine` by `intersection_detected` This state machine handles the passing of an intersection. @@ -163,8 +147,8 @@ If there are is a traffic light or a stop sign at the intersection change to the Transition: -* From `STOP_SIGN/TRAFFIC SM` by `clearing the traffic light, stop sign` -* From `APPROACH_INTERSECTION` by `detecting an unsignalized and cleared intersection` +- From `STOP_SIGN/TRAFFIC SM` by `clearing the traffic light, stop sign` +- From `APPROACH_INTERSECTION` by `detecting an unsignalized and cleared intersection` After the approach of the intersection and clear a possible traffic light/stop sign, the ego vehicle enters the intersection. @@ -174,7 +158,7 @@ From there the RoadOption decides in which direction the ego vehicle should turn Transition: -* From `IN_INTERSECTION` by `RoadOption.LEFT` +- From `IN_INTERSECTION` by `RoadOption.LEFT` Check for pedestrians on the driving path. If the path is clear of pedestrians, make sure there will be no crashes during the turning process with oncoming traffic. @@ -182,7 +166,7 @@ Check for pedestrians on the driving path. If the path is clear of pedestrians, Transition: -* From `IN_INTERSECTION` by `RoadOption.STRAIGHT` +- From `IN_INTERSECTION` by `RoadOption.STRAIGHT` Check if there is a vehicle running a red light in the intersection. Pass the intersection. @@ -190,7 +174,7 @@ Check if there is a vehicle running a red light in the intersection. Pass the in Transition: -* From `IN_INTERSECTION` by `RoadOption.RIGHT` +- From `IN_INTERSECTION` by `RoadOption.RIGHT` Check for pedestrians on the driving path. If the path is clear of pedestrians, make sure there will be no crashes during the turning process with crossing traffic. @@ -198,15 +182,15 @@ Check for pedestrians on the driving path. If the path is clear of pedestrians, Transition: -* From `TURN_RIGHT`, `STRAIGHT` or `TURN_LEFT` by passing a distance from the intersection. +- From `TURN_RIGHT`, `STRAIGHT` or `TURN_LEFT` by passing a distance from the intersection. ## Stop sign/traffic light state machine -![img.png](../../00_assets/Traffic_SM.png) +![img.png](../../assets/Traffic_SM.png) Transition: -* From `APPROACH_INTERSECTION` by `stop_sign_detected or traffic_light_detected` +- From `APPROACH_INTERSECTION` by `stop_sign_detected or traffic_light_detected` This state machine handles the handling of stop signs and traffic lights. @@ -218,7 +202,7 @@ If the traffic light/stop sign is near, reduce speed. Avoid crashes with slowly Transitions: -* From `STOP_NEAR` if `distance greater braking distance`. +- From `STOP_NEAR` if `distance greater braking distance`. Slow down near the traffic light to be able to react to quick changes. @@ -226,10 +210,10 @@ Slow down near the traffic light to be able to react to quick changes. Transition: -* From `STOP_NEAR` if `distance < braking distance` while sensing a traffic_light that is `red` or `yellow` or a `stop sign` -* From `STOP_SLOW_DOWN` if `distance < braking distance` -* From `STOP_GO` if the traffic light changes from `green` to `yellow` or `red` and the ego vehicle can stop in front of the stop sign/traffic light. -* From `STOP_WAIT` if the there is a predominant stop sign and the ego vehicle didn't reach the stop line. +- From `STOP_NEAR` if `distance < braking distance` while sensing a traffic_light that is `red` or `yellow` or a `stop sign` +- From `STOP_SLOW_DOWN` if `distance < braking distance` +- From `STOP_GO` if the traffic light changes from `green` to `yellow` or `red` and the ego vehicle can stop in front of the stop sign/traffic light. +- From `STOP_WAIT` if the there is a predominant stop sign and the ego vehicle didn't reach the stop line. Stop in front of the traffic light or the stop sign. @@ -237,7 +221,7 @@ Stop in front of the traffic light or the stop sign. Transition: -* From `STOP_WILL_STOP` by either vehicle has stopped or distance to stop line is less than 2 meters +- From `STOP_WILL_STOP` by either vehicle has stopped or distance to stop line is less than 2 meters The vehicle has stopped and waits eiter until leading vehicle continues to drive or traffic rules permit to continue driving. @@ -245,9 +229,9 @@ The vehicle has stopped and waits eiter until leading vehicle continues to drive Transition: -* From `STOP_NEAR` if traffic light is `green` or `off` -* From `STOP_SLOW_DOWN` if traffic light is `green` or `off` -* FROM `STOP_WAIT` if traffic light is `green` or `off` +- From `STOP_NEAR` if traffic light is `green` or `off` +- From `STOP_SLOW_DOWN` if traffic light is `green` or `off` +- FROM `STOP_WAIT` if traffic light is `green` or `off` Ego vehicle starts to accelerate to clear the traffic sign/traffic light or continues to drive if the traffic light is green or deactivated. diff --git a/doc/research/paf22/requirements/README.md b/doc/research/paf22/requirements/README.md new file mode 100644 index 00000000..28fce181 --- /dev/null +++ b/doc/research/paf22/requirements/README.md @@ -0,0 +1,7 @@ +# Requirements + +This folder contains all the results of our research on requirements: + +- [Leaderboard information](./informations_from_leaderboard.md) +- [Reqirements for agent](./requirements.md) +- [Use case scenarios](./use_cases.md) diff --git a/doc/03_research/04_requirements/02_informations_from_leaderboard.md b/doc/research/paf22/requirements/informations_from_leaderboard.md similarity index 61% rename from doc/03_research/04_requirements/02_informations_from_leaderboard.md rename to doc/research/paf22/requirements/informations_from_leaderboard.md index 137d3566..9b47caa2 100644 --- a/doc/03_research/04_requirements/02_informations_from_leaderboard.md +++ b/doc/research/paf22/requirements/informations_from_leaderboard.md @@ -2,69 +2,46 @@ **Summary:** This page contains the project informations from the CARLA leaderboard. More specific summary after page is finished. ---- - -## Author - -Josef Kircher - -## Date - -17.11.2022 - -## Prerequisite - -none - ---- - - -* [Requirements of Carla Leaderboard](#requirements-of-carla-leaderboard) - * [Author](#author) - * [Date](#date) - * [Prerequisite](#prerequisite) - * [Task](#task) - * [Participation modalities](#participation-modalities) - * [Route format](#route-format) - * [Sensors](#sensors) - * [Evaluation](#evaluation) - * [Main score](#main-score) - * [Driving Score for route i](#driving-score-for-route-i) - * [Infraction penalty](#infraction-penalty) - * [Shutdown criteria](#shutdown-criteria) - * [Submission](#submission) - * [Sources](#sources) - - ---- +- [Requirements of Carla Leaderboard](#requirements-of-carla-leaderboard) + - [Task](#task) + - [Participation modalities](#participation-modalities) + - [Route format](#route-format) + - [Sensors](#sensors) + - [Evaluation](#evaluation) + - [Main score](#main-score) + - [Driving score for single route](#driving-score-for-single-route) + - [Infraction penalty](#infraction-penalty) + - [Shutdown criteria](#shutdown-criteria) + - [Submission](#submission) + - [Sources](#sources) ## Task -* an autonomous agent should drive through a set of predefined routes -* for each route: - * initialization at a starting point - * directed to drive to a destination point - * route described by GPS coordinates **or** map coordinates **or** route instructions -* route situations: - * freeways - * urban areas - * residential districts - * rural settings -* weather conditions: - * daylight - * sunset - * rain - * fog - * night - * more ... +- an autonomous agent should drive through a set of predefined routes +- for each route: + - initialization at a starting point + - directed to drive to a destination point + - route described by GPS coordinates **or** map coordinates **or** route instructions +- route situations: + - freeways + - urban areas + - residential districts + - rural settings +- weather conditions: + - daylight + - sunset + - rain + - fog + - night + - more ... Possible traffic signs (not complete): -* Stop sign -* Speed limitation -* Traffic lights -* Arrows on street -* Stop sign on street +- Stop sign +- Speed limitation +- Traffic lights +- Arrows on street +- Stop sign on street ## Participation modalities @@ -100,12 +77,12 @@ Second, world coordinates and a route option High-level commands (rood options) are: -* RoadOption.**CHANGELANELEFT**: Move one lane to the left. -* RoadOption.**CHANGELANERIGHT**: Move one lane to the right. -* RoadOption.**LANEFOLLOW**: Continue in the current lane. -* RoadOption.**LEFT**: Turn left at the intersection. -* RoadOption.**RIGHT**: Turn right at the intersection. -* RoadOption.**STRAIGHT**: Keep straight at the intersection. +- RoadOption.**CHANGELANELEFT**: Move one lane to the left. +- RoadOption.**CHANGELANERIGHT**: Move one lane to the right. +- RoadOption.**LANEFOLLOW**: Continue in the current lane. +- RoadOption.**LEFT**: Turn left at the intersection. +- RoadOption.**RIGHT**: Turn right at the intersection. +- RoadOption.**STRAIGHT**: Keep straight at the intersection. **Important:** If the semantics of left and right are ambiguous, the next position should be used to clarify the path. @@ -131,9 +108,9 @@ Determination how "good" the agent performs on the Leaderboard. The driving proficiency of an agent can be characterized by multiple metrics. -* `Driving score:` Product between route completion and infractions penalty -* `Route completion:` Percentage of the route distance completed by an agent -* `Infraction penalty:` The leaderboard tracks several types of infractions which reduce the score +- `Driving score:` Product between route completion and infractions penalty +- `Route completion:` Percentage of the route distance completed by an agent +- `Infraction penalty:` The leaderboard tracks several types of infractions which reduce the score Every agent starts with a base infraction score of 1.0 at the beginning. @@ -147,36 +124,36 @@ Product of route completion and infraction penalty of this route Not complying with traffic rules will result in a penalty. Multiple penalties can be applied per route. Infractions ordered by severity are: -* collisions with pedestrians: 0.50 -* collisions with other vehicles: 0.60 -* collisions with static elements: 0.65 -* running a red light: 0.70 -* running a stop sign: 0.80 +- collisions with pedestrians: 0.50 +- collisions with other vehicles: 0.60 +- collisions with static elements: 0.65 +- running a red light: 0.70 +- running a stop sign: 0.80 It is possible that the vehicle is stuck in some scenario. After a timeout of **4 minutes** the vehicle will be released, however a penalty is applied -* scenario timeout (feature behaviours can block ego vehicle): 0.70 +- scenario timeout (feature behaviours can block ego vehicle): 0.70 Agent should keep a minimum speed compared to the nearby traffic. The penalty is increases with the difference in speed. -* Failure to maintain minimum speed: 0.70 +- Failure to maintain minimum speed: 0.70 Agent should let emergency vehicles from behind pass. -* Failure to yield to emergency vehicle: 0.70 +- Failure to yield to emergency vehicle: 0.70 If the agent drives off-road that percentage does not count towards the road completion -* Off-road driving: not considered towards the computation of the route completion score +- Off-road driving: not considered towards the computation of the route completion score ### Shutdown criteria Some events will interrupt the simulation of that resulting in an incomplete route -* route deviation - more than 30 meters from assigned route -* agent blocked - if agent does not take an action for 180 seconds -* simulation timeout - no client-server communication in 60 seconds -* route timeout - simulation takes too long to finish +- route deviation - more than 30 meters from assigned route +- agent blocked - if agent does not take an action for 180 seconds +- simulation timeout - no client-server communication in 60 seconds +- route timeout - simulation takes too long to finish ## Submission diff --git a/doc/research/paf22/requirements/requirements.md b/doc/research/paf22/requirements/requirements.md new file mode 100644 index 00000000..9cd7ca46 --- /dev/null +++ b/doc/research/paf22/requirements/requirements.md @@ -0,0 +1,63 @@ +# Requirements + +**Summary:** This page contains the requirements obtained from the Carla Leaderboard website as well as former projects in the `Praktikum Autonomes Fahren` + +- [Requirements](#requirements) + - [Requirements from Leaderboard tasks](#requirements-from-leaderboard-tasks) + - [Prioritized driving aspects](#prioritized-driving-aspects) + - [more Content](#more-content) + - [Sources](#sources) + +## Requirements from Leaderboard tasks + +- follow waypoints on a route +- don't deviate from route by more than 30 meters +- act in accordance with traffic rules +- don't get blocked +- complete 10 routes (2 weather conditions) + +--- + +## Prioritized driving aspects + +There are different ways to prioritize the driving aspects mentioned in the document [use_cases](https://github.com/ll7/paf22/blob/482c1f5a201b52276d7b77cf402009bd99c93317/doc/research/use_cases.md). +The most important topics, in relation to this project, are the driving score and the safety aspect. +Also, it is appropriate to implement the basic features of an autonomous car first. The list is a mixture of the different approaches. Prioritizing from very important functionalities to less important features. + +`Very important:` + +- Recognize the street limitations +- Recognize pedestrians +- Follow the waypoints +- Recognize traffic lights +- Recognize obstacles +- Recognize cars in front of the agent (keep distance) +- Steering, accelerate, decelerate +- Street rules (no street signs available) +- Change lane (obstacles) + +`Important:` + +- Check Intersection +- Sense traffic (speed and trajectory) +- Predict traffic +- Emergency brake +- Sense length of ramp +- Recognize space (Turn into highway) +- Change lane (safe) +- Recognize emergency vehicle +- Recognize unexpected dynamic situations (opening door, bycicles,...) + +`Less important:` + +- Smooth driving (accelerate, decelerate, stop) +- Weather Condition +- Predict pedestrians + +--- + +## more Content + +### Sources + + diff --git a/doc/03_research/04_requirements/04_use_cases.md b/doc/research/paf22/requirements/use_cases.md similarity index 67% rename from doc/03_research/04_requirements/04_use_cases.md rename to doc/research/paf22/requirements/use_cases.md index 59cd6984..984d23e9 100644 --- a/doc/03_research/04_requirements/04_use_cases.md +++ b/doc/research/paf22/requirements/use_cases.md @@ -2,54 +2,164 @@ **Summary:** This page contains a set of possible use cases containing a description of the scenario, the functions the agent has to have to pass that scenario as well as the event triggering that use case, the flow through that use case and the outcome. ---- - -## Author - -Josef Kircher - -## Date - -21.11.2022 - -## Prerequisite - ---- - -* [Use cases in Carla Leaderboard](#use-cases-in-carla-leaderboard) - * [Author](#author) - * [Date](#date) - * [Prerequisite](#prerequisite) - * [1. Control loss due to bad road condition](#1-control-loss-due-to-bad-road-condition) - * [2. Unprotected left turn at intersection with oncoming traffic](#2-unprotected-left-turn-at-intersection-with-oncoming-traffic) - * [3. Right turn at an intersection with crossing traffic](#3-right-turn-at-an-intersection-with-crossing-traffic) - * [4. Crossing negotiation at unsignalized intersection](#4-crossing-negotiation-at-unsignalized-intersection) - * [5. Crossing traffic running a red light at intersection](#5-crossing-traffic-running-a-red-light-at-intersection) - * [6. Highway merge from on-ramp](#6-highway-merge-from-on-ramp) - * [7. Highway cut-in from on-ramp](#7-highway-cut-in-from-on-ramp) - * [8. Static cut-in](#8-static-cut-in) - * [9. Highway exit](#9-highway-exit) - * [10. Yield to emergency vehicle](#10-yield-to-emergency-vehicle) - * [11. Obstacle in lane](#11-obstacle-in-lane) - * [12. Door Obstacle](#12-door-obstacle) - * [13. Slow moving hazard at lane edge](#13-slow-moving-hazard-at-lane-edge) - * [14. Vehicle invading lane on bend](#14-vehicle-invading-lane-on-bend) - * [15. Longitudinal control after leading vehicle brakes](#15-longitudinal-control-after-leading-vehicle-brakes) - * [16. Obstacle avoidance without prior action](#16-obstacle-avoidance-without-prior-action) - * [17. Pedestrian emerging from behind parked vehicle](#17-pedestrian-emerging-from-behind-parked-vehicle) - * [18. Obstacle avoidance with prior action](#18-obstacle-avoidance-with-prior-action) - * [19. Parking Cut-in](#19-parking-cut-in) - * [20. Lane changing to evade slow leading vehicle](#20-lane-changing-to-evade-slow-leading-vehicle) - * [21. Passing obstacle with oncoming traffic](#21-passing-obstacle-with-oncoming-traffic) - * [22. Parking Exit](#22-parking-exit) - * [Sources](#sources) - - ---- +- [Use cases in Carla Leaderboard](#use-cases-in-carla-leaderboard) + - [1. Control loss due to bad road condition](#1-control-loss-due-to-bad-road-condition) + - [Description](#description) + - [Pre-condition(Event)](#pre-conditionevent) + - [Driving functions](#driving-functions) + - [Outcome](#outcome) + - [Associated use cases](#associated-use-cases) + - [2. Unprotected left turn at intersection with oncoming traffic](#2-unprotected-left-turn-at-intersection-with-oncoming-traffic) + - [Description](#description-1) + - [Basic flow](#basic-flow) + - [Pre-condition(Event)](#pre-conditionevent-1) + - [Driving functions](#driving-functions-1) + - [Outcome](#outcome-1) + - [Associated use cases](#associated-use-cases-1) + - [3. Right turn at an intersection with crossing traffic](#3-right-turn-at-an-intersection-with-crossing-traffic) + - [Description](#description-2) + - [Basic flow](#basic-flow-1) + - [Pre-condition(Event)](#pre-conditionevent-2) + - [Driving functions](#driving-functions-2) + - [Outcome](#outcome-2) + - [Associated use cases](#associated-use-cases-2) + - [4. Crossing negotiation at unsignalized intersection](#4-crossing-negotiation-at-unsignalized-intersection) + - [Description](#description-3) + - [Basic flow](#basic-flow-2) + - [Pre-condition(Event)](#pre-conditionevent-3) + - [Driving functions](#driving-functions-3) + - [Outcome](#outcome-3) + - [Associated use cases](#associated-use-cases-3) + - [5. Crossing traffic running a red light at intersection](#5-crossing-traffic-running-a-red-light-at-intersection) + - [Description](#description-4) + - [Pre-condition(Event)](#pre-conditionevent-4) + - [Driving functions](#driving-functions-4) + - [Outcome](#outcome-4) + - [Associated use cases](#associated-use-cases-4) + - [6. Highway merge from on-ramp](#6-highway-merge-from-on-ramp) + - [Description](#description-5) + - [Basic flow](#basic-flow-3) + - [Pre-condition(Event)](#pre-conditionevent-5) + - [Driving functions](#driving-functions-5) + - [Outcome](#outcome-5) + - [Associated use cases](#associated-use-cases-5) + - [7. Highway cut-in from on-ramp](#7-highway-cut-in-from-on-ramp) + - [Description](#description-6) + - [Basic flow](#basic-flow-4) + - [Pre-condition(Event)](#pre-conditionevent-6) + - [Driving functions](#driving-functions-6) + - [Outcome](#outcome-6) + - [Associated use cases](#associated-use-cases-6) + - [8. Static cut-in](#8-static-cut-in) + - [Description](#description-7) + - [Basic flow](#basic-flow-5) + - [Pre-condition(Event)](#pre-conditionevent-7) + - [Driving functions](#driving-functions-7) + - [Outcome](#outcome-7) + - [Associated use cases](#associated-use-cases-7) + - [9. Highway exit](#9-highway-exit) + - [Description](#description-8) + - [Basic flow](#basic-flow-6) + - [Pre-condition(Event)](#pre-conditionevent-8) + - [Driving functions](#driving-functions-8) + - [Outcome](#outcome-8) + - [Associated use cases](#associated-use-cases-8) + - [10. Yield to emergency vehicle](#10-yield-to-emergency-vehicle) + - [Description](#description-9) + - [Basic flow](#basic-flow-7) + - [Pre-condition(Event)](#pre-conditionevent-9) + - [Driving functions](#driving-functions-9) + - [Outcome](#outcome-9) + - [Associated use cases](#associated-use-cases-9) + - [11. Obstacle in lane](#11-obstacle-in-lane) + - [Description](#description-10) + - [Basic flow](#basic-flow-8) + - [Pre-condition(Event)](#pre-conditionevent-10) + - [Driving functions](#driving-functions-10) + - [Outcome](#outcome-10) + - [Associated use cases](#associated-use-cases-10) + - [12. Door Obstacle](#12-door-obstacle) + - [Description](#description-11) + - [Basic flow](#basic-flow-9) + - [Pre-condition(Event)](#pre-conditionevent-11) + - [Driving functions](#driving-functions-11) + - [Outcome](#outcome-11) + - [Associated use cases](#associated-use-cases-11) + - [13. Slow moving hazard at lane edge](#13-slow-moving-hazard-at-lane-edge) + - [Description](#description-12) + - [Basic flow](#basic-flow-10) + - [Pre-condition(Event)](#pre-conditionevent-12) + - [Driving functions](#driving-functions-12) + - [Outcome](#outcome-12) + - [Associated use cases](#associated-use-cases-12) + - [14. Vehicle invading lane on bend](#14-vehicle-invading-lane-on-bend) + - [Description](#description-13) + - [Basic flow](#basic-flow-11) + - [Pre-condition(Event)](#pre-conditionevent-13) + - [Driving functions](#driving-functions-13) + - [Outcome](#outcome-13) + - [Associated use cases](#associated-use-cases-13) + - [15. Longitudinal control after leading vehicle brakes](#15-longitudinal-control-after-leading-vehicle-brakes) + - [Description](#description-14) + - [Basic flow](#basic-flow-12) + - [Pre-condition(Event)](#pre-conditionevent-14) + - [Driving functions](#driving-functions-14) + - [Outcome](#outcome-14) + - [Associated use cases](#associated-use-cases-14) + - [16. Obstacle avoidance without prior action](#16-obstacle-avoidance-without-prior-action) + - [Description](#description-15) + - [Basic flow](#basic-flow-13) + - [Pre-condition(Event)](#pre-conditionevent-15) + - [Driving functions](#driving-functions-15) + - [Outcome](#outcome-15) + - [Associated use cases](#associated-use-cases-15) + - [17. Pedestrian emerging from behind parked vehicle](#17-pedestrian-emerging-from-behind-parked-vehicle) + - [Description](#description-16) + - [Basic flow](#basic-flow-14) + - [Pre-condition(Event)](#pre-conditionevent-16) + - [Driving functions](#driving-functions-16) + - [Outcome](#outcome-16) + - [Associated use cases](#associated-use-cases-16) + - [18. Obstacle avoidance with prior action](#18-obstacle-avoidance-with-prior-action) + - [Description](#description-17) + - [Basic flow](#basic-flow-15) + - [Pre-condition(Event)](#pre-conditionevent-17) + - [Driving functions](#driving-functions-17) + - [Outcome](#outcome-17) + - [Associated use cases](#associated-use-cases-17) + - [19. Parking Cut-in](#19-parking-cut-in) + - [Description](#description-18) + - [Basic flow](#basic-flow-16) + - [Pre-condition(Event)](#pre-conditionevent-18) + - [Driving functions](#driving-functions-18) + - [Outcome](#outcome-18) + - [Associated use cases](#associated-use-cases-18) + - [20. Lane changing to evade slow leading vehicle](#20-lane-changing-to-evade-slow-leading-vehicle) + - [Description](#description-19) + - [Basic flow](#basic-flow-17) + - [Pre-condition(Event)](#pre-conditionevent-19) + - [Driving functions](#driving-functions-19) + - [Outcome](#outcome-19) + - [Associated use cases](#associated-use-cases-19) + - [21. Passing obstacle with oncoming traffic](#21-passing-obstacle-with-oncoming-traffic) + - [Description](#description-20) + - [Basic flow](#basic-flow-18) + - [Pre-condition(Event)](#pre-conditionevent-20) + - [Driving functions](#driving-functions-20) + - [Outcome](#outcome-20) + - [Associated use cases](#associated-use-cases-20) + - [22. Parking Exit](#22-parking-exit) + - [Description](#description-21) + - [Basic flow](#basic-flow-19) + - [Pre-condition(Event)](#pre-conditionevent-21) + - [Driving functions](#driving-functions-21) + - [Outcome](#outcome-21) + - [Associated use cases](#associated-use-cases-21) + - [Sources](#sources) ## 1. Control loss due to bad road condition -![img](../../00_assets/TR01.png) +![img](../../assets/TR01.png) ### Description @@ -61,9 +171,9 @@ Loss of control ### Driving functions -* Control steering angle, throttle and brake to counter unexpected movements +- Control steering angle, throttle and brake to counter unexpected movements -* (Opt): Sense wheel friction to predict unexpected behaviour +- (Opt): Sense wheel friction to predict unexpected behaviour ### Outcome @@ -77,7 +187,7 @@ None ## 2. Unprotected left turn at intersection with oncoming traffic -![img](../../00_assets/TR08.png) +![img](../../assets/TR08.png) ### Description @@ -98,13 +208,13 @@ Global route wants you to perform a left turn at an intersection ### Driving functions -* Sense street signs and traffic lights -* Observe the intersection -* Sense oncoming traffic -* (Check indicator of oncoming traffic) -* Sense pedestrians in your drive path -* Steer the vehicle in a left turn -* Predict if a turn is possible before oncoming traffic reaches the intersection +- Sense street signs and traffic lights +- Observe the intersection +- Sense oncoming traffic +- (Check indicator of oncoming traffic) +- Sense pedestrians in your drive path +- Steer the vehicle in a left turn +- Predict if a turn is possible before oncoming traffic reaches the intersection ### Outcome @@ -124,7 +234,7 @@ Turn left at the intersection without violating traffic rules ## 3. Right turn at an intersection with crossing traffic -![img](../../00_assets/TR09.png) +![img](../../assets/TR09.png) ### Description @@ -144,13 +254,13 @@ Global route wants you to perform a right turn at an intersection ### Driving functions -* Sense street signs and traffic lights -* Observe the intersection -* Sense crossing traffic -* Check indicator of crossing traffic -* Sense pedestrians in your drive path -* Steer the vehicle in a right turn -* Predict if a turn is possible before crossing traffic reaches the intersection +- Sense street signs and traffic lights +- Observe the intersection +- Sense crossing traffic +- Check indicator of crossing traffic +- Sense pedestrians in your drive path +- Steer the vehicle in a right turn +- Predict if a turn is possible before crossing traffic reaches the intersection ### Outcome @@ -170,7 +280,7 @@ Turn right at the intersection without violating traffic rules ## 4. Crossing negotiation at unsignalized intersection -![img](../../00_assets/TR10.png) +![img](../../assets/TR10.png) ### Description @@ -192,10 +302,10 @@ No traffic lights or street signs are sensed and agent is at an intersection ### Driving functions -* Sense street signs and traffic lights -* Observe the intersection -* Sense pedestrians in your drive path -* Steering the vehicle +- Sense street signs and traffic lights +- Observe the intersection +- Sense pedestrians in your drive path +- Steering the vehicle ### Outcome @@ -213,7 +323,7 @@ Cross the intersection without violating traffic rules ## 5. Crossing traffic running a red light at intersection -![img](../../00_assets/TR07.png) +![img](../../assets/TR07.png) ### Description @@ -225,10 +335,10 @@ Vehicle enters intersection while having a red light ### Driving functions -* Sense street signs and traffic lights -* Observe the intersection -* Sense crossing traffic -* Emergency brake +- Sense street signs and traffic lights +- Observe the intersection +- Sense crossing traffic +- Emergency brake ### Outcome @@ -250,7 +360,7 @@ Emergency brake to avoid collision ## 6. Highway merge from on-ramp -![img](../../00_assets/TR18.png) +![img](../../assets/TR18.png) ### Description @@ -269,10 +379,10 @@ Vehicle enters a highway ### Driving functions -* Sense speed of surrounding traffic -* Sense length of ramp -* Adjust speed to enter highway -* Turn into highway +- Sense speed of surrounding traffic +- Sense length of ramp +- Adjust speed to enter highway +- Turn into highway ### Outcome @@ -290,7 +400,7 @@ Join the highway traffic without any traffic violation ## 7. Highway cut-in from on-ramp -![img](../../00_assets/TR19.png) +![img](../../assets/TR19.png) ### Description @@ -310,11 +420,11 @@ Vehicle enters a highway ### Driving functions -* Sense speed of surrounding traffic -* Adjust speed to let vehicle enter highway -* Change lane -* Decelerate -* Brake +- Sense speed of surrounding traffic +- Adjust speed to let vehicle enter highway +- Change lane +- Decelerate +- Brake ### Outcome @@ -332,7 +442,7 @@ Let vehicle join the highway traffic without any traffic violation ## 8. Static cut-in -![img](../../00_assets/TR20.png) +![img](../../assets/TR20.png) ### Description @@ -352,11 +462,11 @@ Vehicle tries to cut-in ### Driving functions -* Sense speed of surrounding traffic -* Adjust speed to let vehicle enter lane -* Change lane -* Decelerate -* Brake +- Sense speed of surrounding traffic +- Adjust speed to let vehicle enter lane +- Change lane +- Decelerate +- Brake ### Outcome @@ -374,7 +484,7 @@ Let vehicle join the lane without any traffic violation ## 9. Highway exit -![img](../../00_assets/TR21.png) +![img](../../assets/TR21.png) ### Description @@ -397,12 +507,12 @@ Vehicle leaves a highway ### Driving functions -* Sense speed of surrounding traffic -* Sense distance to off-ramp -* Adjust speed to change lane -* Change lane -* Decelerate -* Brake +- Sense speed of surrounding traffic +- Sense distance to off-ramp +- Adjust speed to change lane +- Change lane +- Decelerate +- Brake ### Outcome @@ -420,7 +530,7 @@ Vehicle exits the highway traffic without any traffic violation ## 10. Yield to emergency vehicle -![img](../../00_assets/TR23.png) +![img](../../assets/TR23.png) ### Description @@ -441,10 +551,10 @@ Emergency vehicle behind us ### Driving functions -* Sense emergency vehicle -* Sense speed of surrounding traffic -* Adjust speed to change lane -* Change lane +- Sense emergency vehicle +- Sense speed of surrounding traffic +- Adjust speed to change lane +- Change lane ### Outcome @@ -460,7 +570,7 @@ Let emergency vehicle pass without any traffic violation ## 11. Obstacle in lane -![img](../../00_assets/TR14.png) +![img](../../assets/TR14.png) ### Description @@ -481,11 +591,11 @@ Obstacle on lane ### Driving functions -* Sense obstacles -* Sense speed of surrounding traffic -* Change lane -* Decelerate -* Brake +- Sense obstacles +- Sense speed of surrounding traffic +- Change lane +- Decelerate +- Brake ### Outcome @@ -515,7 +625,7 @@ Pass an obstacle without any traffic violation ## 12. Door Obstacle -![img](../../00_assets/TR15.png) +![img](../../assets/TR15.png) ### Description @@ -536,11 +646,11 @@ Door opens in lane ### Driving functions -* Sense opening door -* Sense speed of surrounding traffic -* Change lane -* Decelerate -* Brake +- Sense opening door +- Sense speed of surrounding traffic +- Change lane +- Decelerate +- Brake ### Outcome @@ -564,7 +674,7 @@ Pass the open door without any traffic violation ## 13. Slow moving hazard at lane edge -![img](../../00_assets/TR16.png) +![img](../../assets/TR16.png) ### Description @@ -585,11 +695,11 @@ slow moving hazard(bicycle) in lane ### Driving functions -* Sense slow moving hazards -* Sense speed of surrounding traffic -* Change lane -* Decelerate -* Brake +- Sense slow moving hazards +- Sense speed of surrounding traffic +- Change lane +- Decelerate +- Brake ### Outcome @@ -613,7 +723,7 @@ Pass the slow moving hazard without any traffic violation ## 14. Vehicle invading lane on bend -![img](../../00_assets/TR22.png) +![img](../../assets/TR22.png) ### Description @@ -632,10 +742,10 @@ Bend in the road and a vehicle invading our lane ### Driving functions -* Sense vehicle on our lane -* Decelerate -* Brake -* Move to right part of lane +- Sense vehicle on our lane +- Decelerate +- Brake +- Move to right part of lane ### Outcome @@ -649,7 +759,7 @@ None ## 15. Longitudinal control after leading vehicle brakes -![img](../../00_assets/TR02.png) +![img](../../assets/TR02.png) ### Description @@ -667,10 +777,10 @@ Vehicle in front suddenly slows down ### Driving functions -* Sense vehicle on our lane -* Sense vehicle speed -* Decelerate -* Emergency-/Brake +- Sense vehicle on our lane +- Sense vehicle speed +- Decelerate +- Emergency-/Brake ### Outcome @@ -692,7 +802,7 @@ Slow down without crashing in vehicle in front of us ## 16. Obstacle avoidance without prior action -![img](../../00_assets/TR03.png) +![img](../../assets/TR03.png) ### Description @@ -709,9 +819,9 @@ Obstacle in front suddenly appears ### Driving functions -* Sense obstacle on our lane -* Decelerate -* Emergency-/Brake +- Sense obstacle on our lane +- Decelerate +- Emergency-/Brake ### Outcome @@ -743,7 +853,7 @@ Slow down without crashing in the obstacle in front of us ## 17. Pedestrian emerging from behind parked vehicle -![img](../../00_assets/TR17.png) +![img](../../assets/TR17.png) ### Description @@ -760,9 +870,9 @@ Pedestrian in front suddenly appears from behind a parked car. ### Driving functions -* Sense pedestrian on our lane -* Decelerate -* Emergency-/Brake +- Sense pedestrian on our lane +- Decelerate +- Emergency-/Brake ### Outcome @@ -786,7 +896,7 @@ Slow down without crashing into the pedestrian in front of us ## 18. Obstacle avoidance with prior action -![img](../../00_assets/TR04.png) +![img](../../assets/TR04.png) ### Description @@ -803,9 +913,9 @@ Obstacle in planned driving path ### Driving functions -* Sense obstacle in driving path -* Decelerate -* Emergency-/Brake +- Sense obstacle in driving path +- Decelerate +- Emergency-/Brake ### Outcome @@ -825,7 +935,7 @@ Slow down without crashing into the obstacle in our path ## 19. Parking Cut-in -![img](../../00_assets/TR12.png) +![img](../../assets/TR12.png) ### Description @@ -842,9 +952,9 @@ Parked car tries to join traffic ### Driving functions -* Sense parked car starts moving -* Decelerate -* Emergency-/Brake +- Sense parked car starts moving +- Decelerate +- Emergency-/Brake ### Outcome @@ -862,7 +972,7 @@ Slow down without crashing into the car joining our lane ## 20. Lane changing to evade slow leading vehicle -![img](../../00_assets/TR05.png) +![img](../../assets/TR05.png) ### Description @@ -883,11 +993,11 @@ Speed of car is under a certain threshold ### Driving functions -* Sense speed of traffic -* Sense vehicles in surrounding lanes -* Decelerate -* Emergency-/Brake -* Change lane +- Sense speed of traffic +- Sense vehicles in surrounding lanes +- Decelerate +- Emergency-/Brake +- Change lane ### Outcome @@ -909,7 +1019,7 @@ Change lane without any traffic violations ## 21. Passing obstacle with oncoming traffic -![img](../../00_assets/TR06.png) +![img](../../assets/TR06.png) ### Description @@ -929,14 +1039,14 @@ Obstacle in front of us with oncoming traffic ### Driving functions -* Sense obstacle -* Sense length of obstacle -* Sense speed, distance of oncoming traffic -* Sense vehicles in surrounding lanes -* Decelerate -* Brake -* Change lane -* Rejoin old lane after the obstacle +- Sense obstacle +- Sense length of obstacle +- Sense speed, distance of oncoming traffic +- Sense vehicles in surrounding lanes +- Decelerate +- Brake +- Change lane +- Rejoin old lane after the obstacle ### Outcome @@ -962,7 +1072,7 @@ Maneuver around obstacle without any traffic violations ## 22. Parking Exit -![img](../../00_assets/TR11.png) +![img](../../assets/TR11.png) ### Description @@ -981,11 +1091,11 @@ Ego-vehicle is parked and wants to join traffic ### Driving functions -* Sense space of parking bay -* Sense speed, distance of traffic -* Sense vehicles in lane the agent wants to join -* Accelerate -* Change lane(Join traffic) +- Sense space of parking bay +- Sense speed, distance of traffic +- Sense vehicles in lane the agent wants to join +- Accelerate +- Change lane(Join traffic) ### Outcome diff --git a/doc/research/paf23/acting/autoware_acting.md b/doc/research/paf23/acting/autoware_acting.md new file mode 100644 index 00000000..bf1900a6 --- /dev/null +++ b/doc/research/paf23/acting/autoware_acting.md @@ -0,0 +1,46 @@ +# Research: [Autoware Acting](https://autowarefoundation.github.io/autoware-documentation/main/design/autoware-architecture/control/#autoware-control-design) + +**Summary:** This page contains the research into the action component of Autoware. + +- [Research: Autoware Acting](#research-autoware-acting) + - [Inputs](#inputs) + - [General Component Architecture](#general-component-architecture) + - [With the Control Module](#with-the-control-module) + - [Trajectory Follower](#trajectory-follower) + - [Vehicle Command Gate](#vehicle-command-gate) + - [Outputs](#outputs) + +## Inputs + +- Odometry (position and orientation, from Localization module) +- Trajectory (output of Planning) +- Steering Status (current steering of vehicle, from Vehicle Interface) +- Actuation Status (acceleration, steering, brake actuations, from Vehicle Interface) +- (“vehicle signal commands” directly into Vehicle Interface -> Handbrake, Hazard Lights, Headlights, Horn, Stationary Locking, Turn Indicators, Wipers etc.) + +### General Component Architecture + +![Node diagram](https://autowarefoundation.github.io/autoware-documentation/main/design/autoware-interfaces/components/images/Control-Bus-ODD-Architecture.drawio.svg) + +### With the Control Module + +![control-component](https://autowarefoundation.github.io/autoware-documentation/main/design/autoware-architecture/control/image/control-component.drawio.svg) + +## [Trajectory Follower](https://autowarefoundation.github.io/autoware.universe/main/control/trajectory_follower_base/) + +- generates control command to follow reference trajectory from Planning +- computes lateral (steering) and longitudinal (velocity) controls separately +- lateral controller: mpc (model predictive) or pure pursuit +- longitudinal: “currently only” PID controller + +## Vehicle Command Gate + +- filters control commands to prevent abnormal values +- sends commands to [Vehicle Interface](https://autowarefoundation.github.io/autoware-documentation/main/design/autoware-interfaces/components/vehicle-interface/) + +## Outputs + +- steering angle +- steering torque +- speed +- acceleration diff --git a/doc/research/paf23/acting/paf21_1_acting.md b/doc/research/paf23/acting/paf21_1_acting.md new file mode 100644 index 00000000..d6aae3d2 --- /dev/null +++ b/doc/research/paf23/acting/paf21_1_acting.md @@ -0,0 +1,45 @@ +# Research: PAF21_1 Acting + +**Summary:** This page contains the research into the action component of the PAF21_1 group. + +- [Research: PAF21\_1 Acting](#research-paf21_1-acting) + - [Inputs](#inputs) + - [Curve Detection](#curve-detection) + - [Speed Control](#speed-control) + - [Steering Control](#steering-control) + - [Straight Trajectories](#straight-trajectories) + - [Detected Curves](#detected-curves) + +## Inputs + +- waypoints of the planned route +- general odometry of the vehicle + +## Curve Detection + +- Can detect curves on the planned trajectory +- Calculates the speed in which to drive the detected Curve +![Curve](../../assets/research_assets/curve_detection_paf21_1.png) + +## Speed Control + +- [CARLA Ackermann Control](https://carla.readthedocs.io/projects/ros-bridge/en/latest/carla_ackermann_control/) +- Speed is forwarded to the CARLA vehicle via Ackermann_message, which already includes a PID controller for safe driving/accelerating etc. +- no further controlling needed -> speed can be passed as calculated + +## Steering Control + +### Straight Trajectories + +- **Stanley Steering Controller** + - Calculates steering angle from offset and heading error + - includes PID controller + ![Stanley Controller](../../assets/research_assets/stanley_paf21_1.png) + +### Detected Curves + +- **Naive Steering Controller** (close to pure pursuit) + - uses Vehicle Position + Orientation + Waypoints + - Calculate direction to drive to as vector + - direction - orientation = Steering angle at each point in time + - speed is calculated in Curve Detection and taken as is diff --git a/doc/03_research/01_acting/04_paf21_2_and_pylot_acting.md b/doc/research/paf23/acting/paf21_2_and_pylot_acting.md similarity index 90% rename from doc/03_research/01_acting/04_paf21_2_and_pylot_acting.md rename to doc/research/paf23/acting/paf21_2_and_pylot_acting.md index b1e76d19..c071338b 100644 --- a/doc/03_research/01_acting/04_paf21_2_and_pylot_acting.md +++ b/doc/research/paf23/acting/paf21_2_and_pylot_acting.md @@ -1,6 +1,33 @@ # PAF Research: Robert Fischer -## PAF22 +**Summary:** This page contains the research into the action component of the PAF21_2 group and pylot. + +- [PAF Research: Robert Fischer](#paf-research-robert-fischer) + - [Acting](#acting) + - [List of Inputs/Outputs](#list-of-inputsoutputs) + - [Challenges](#challenges) + - [PAF21\_2 Acting](#paf21_2-acting) + - [Standardroutine](#standardroutine) + - [Unstuck-Routine](#unstuck-routine) + - [Deadlock](#deadlock) + - [Verfolgung von Hindernissen](#verfolgung-von-hindernissen) + - [Messages](#messages) + - [StanleyController](#stanleycontroller) + - [PID Controller](#pid-controller) + - [Emergency Modus](#emergency-modus) + - [Bugabuses](#bugabuses) + - [Pylot Acting (Control)](#pylot-acting-control) + - [Control Types](#control-types) + - [PID](#pid) + - [MPC](#mpc) + - [Carla\_Autopilot](#carla_autopilot) + - [Basic Cotrol Code](#basic-cotrol-code) + - [**control\_eval\_operator.py**](#control_eval_operatorpy) + - [**messages.py**](#messagespy) + - [**pid.py**](#pidpy) + - [**pid\_control\_operator.py**](#pid_control_operatorpy) + - [**utils.py**](#utilspy) + - [MPC Control Code](#mpc-control-code) ## Acting @@ -9,13 +36,13 @@ - Lateral control - Pure Pursuit controller - ![Untitled](../../00_assets/research_assets/pure_pursuit.png) + ![Untitled](../../assets/research_assets/pure_pursuit.png) - Stanley controller - ![Untitled](../../00_assets/research_assets/stanley_controller.png) + ![Untitled](../../assets/research_assets/stanley_controller.png) -### [List of Inputs/Outputs](https://github.com/una-auxme/paf23/blob/main/doc/03_research/01_acting/02_acting_implementation.md#list-of-inputsoutputs) +### [List of Inputs/Outputs](https://github.com/una-auxme/paf/blob/main/doc/research/acting/acting_implementation.md#list-of-inputsoutputs) - Subscribes to: - [nav_msgs/Odometry Message](http://docs.ros.org/en/noetic/api/nav_msgs/html/msg/Odometry.html) : to get the current position and heading @@ -25,7 +52,7 @@ - Publishes: - [CarlaEgoVehicleControl.msg](https://carla.readthedocs.io/projects/ros-bridge/en/latest/ros_msgs/#carlaegovehiclecontrolmsg) : to actually control the vehicles throttle, steering -### [Challenges](https://github.com/una-auxme/paf23/blob/main/doc/03_research/01_acting/02_acting_implementation.md#challenges) +### [Challenges](https://github.com/una-auxme/paf/blob/main/doc/research/acting/acting_implementation.md#challenges) A short list of challenges for the implementation of a basic acting domain and how they these could be tackled based on the requirements mentioned above. @@ -42,7 +69,7 @@ A short list of challenges for the implementation of a basic acting domain and h ### [Standardroutine](https://github.com/ll7/paf21-2/tree/main/paf_ros/paf_actor#standardroutine) -![Untitled](../../00_assets/research_assets/standard_routine_paf21_2.png) +![Untitled](../../assets/research_assets/standard_routine_paf21_2.png) - Longitudinal control - PID controller @@ -149,7 +176,7 @@ Timer und Schwellenwerte um Stuck Situation zu erkennen ### [Messages](https://github.com/ll7/paf21-2/tree/main/paf_ros/paf_actor#messages) -![Untitled](../../00_assets/research_assets/messages_paf21_2.png) +![Untitled](../../assets/research_assets/messages_paf21_2.png) ### [StanleyController](https://github.com/ll7/paf21-2/tree/main/paf_ros/paf_actor#stanleycontroller) @@ -224,7 +251,7 @@ implements a longitudinal and lateral controller - Predicts future states using a kinematic model to optimize control inputs. - Parameters include mpc_horizon, mpc_steps, and mpc_weights -![Untitled](../../00_assets/research_assets/mpc.png) +![Untitled](../../assets/research_assets/mpc.png) • cost function can be designed to account for driving comfort diff --git a/doc/03_research/Leaderboard-2/changes_leaderboard2.md b/doc/research/paf23/leaderboard/changes_leaderboard.md similarity index 90% rename from doc/03_research/Leaderboard-2/changes_leaderboard2.md rename to doc/research/paf23/leaderboard/changes_leaderboard.md index 39c23951..aa7e6e91 100644 --- a/doc/03_research/Leaderboard-2/changes_leaderboard2.md +++ b/doc/research/paf23/leaderboard/changes_leaderboard.md @@ -2,21 +2,18 @@ **Summary:** New Features and changes made with the CARLA leaderboard-2.0 ---- - -## Author - -Samuel Kühnel - -## Date - -17.11.2023 +- [Overview leaderboard 2.0](#overview-leaderboard-20) + - [General Information](#general-information) + - [Submissions](#submissions) + - [New Features](#new-features) + - [Maps](#maps) + - [Scenarios and training database](#scenarios-and-training-database) ## General Information Leaderboard 1.0 | Leaderboard 2.0 :-------------------------:|:-------------------------: -![leaderboard-1](../../00_assets/leaderboard-1.png) | ![leaderboard-2](../../00_assets/leaderboard-2.png) +![leaderboard-1](../../assets/leaderboard-1.png) | ![leaderboard-2](../../assets/leaderboard-2.png) As shown in the images above the new leaderboard seems to have way more traffic than the previous version. The leaderboard 2.0 uses an enhanced version of CARLA 0.9.14. So be aware that even if the documentation mentions this version tag, there are probably features missing. Therefore it is recommended to use the latest version. diff --git a/doc/03_research/02_perception/LIDAR_data.md b/doc/research/paf23/perception/LIDAR_data.md similarity index 65% rename from doc/03_research/02_perception/LIDAR_data.md rename to doc/research/paf23/perception/LIDAR_data.md index 528620dc..121b616b 100644 --- a/doc/03_research/02_perception/LIDAR_data.md +++ b/doc/research/paf23/perception/LIDAR_data.md @@ -1,6 +1,12 @@ # LIDAR-Data -This File discusses where the LIDAR-Data comes from, how its processed and how we could possibly use it. +**Summary:** This File discusses where the LIDAR-Data comes from, how its processed and how we could possibly use it. + +- [LIDAR-Data](#lidar-data) + - [Origin](#origin) + - [Processing](#processing) + - [Distance Calculation](#distance-calculation) + - [Open questions](#open-questions) ## Origin @@ -9,22 +15,22 @@ LIDAR-Data comes in Pointclouds from a specific LIDAR-Topic. `rospy.Subscriber(rospy.get_param('~source_topic', "/carla/hero/LIDAR"), PointCloud2, self.callback)` -Read more about the LIDAR-Sensor [here](https://github.com/una-auxme/paf23/blob/main/doc/06_perception/03_lidar_distance_utility.md) +Read more about the LIDAR-Sensor [here](https://github.com/una-auxme/paf/blob/main/doc/perception/lidar_distance_utility.md) ## Processing The goal is to identify Objects and their distance. Therefor we need to calculate distances from the pointcloud data. To do this the lidar-distance node first converts pointcloud data to an array, which contains cartesian coordinates. -`paf23-agent-1 | (76.12445 , -1.6572031e+01, 13.737187 , 0.7287409 )` +`paf-agent-1 | (76.12445 , -1.6572031e+01, 13.737187 , 0.7287409 )` -`paf23-agent-1 | (71.9434 , -1.8718828e+01, 13.107929 , 0.7393809 )` +`paf-agent-1 | (71.9434 , -1.8718828e+01, 13.107929 , 0.7393809 )` -`paf23-agent-1 | (-0.3482422 , -1.6367188e-02, -0.20128906, 0.99839103)` +`paf-agent-1 | (-0.3482422 , -1.6367188e-02, -0.20128906, 0.99839103)` -`paf23-agent-1 | (-0.3486328 , -1.4062500e-02, -0.20152344, 0.99838954)` +`paf-agent-1 | (-0.3486328 , -1.4062500e-02, -0.20152344, 0.99838954)` -`paf23-agent-1 | (-0.35070312, -2.3828126e-03, -0.2025 , 0.99838144)` +`paf-agent-1 | (-0.35070312, -2.3828126e-03, -0.2025 , 0.99838144)` The first three values of each row correspon to x, y, z. diff --git a/doc/03_research/02_perception/05_Research_PAF21-Perception.md b/doc/research/paf23/perception/Research_PAF21-Perception.md similarity index 77% rename from doc/03_research/02_perception/05_Research_PAF21-Perception.md rename to doc/research/paf23/perception/Research_PAF21-Perception.md index f036100c..0e7fa9dc 100644 --- a/doc/03_research/02_perception/05_Research_PAF21-Perception.md +++ b/doc/research/paf23/perception/Research_PAF21-Perception.md @@ -1,8 +1,17 @@ # Sprint 0: Research Samuel Kühnel -## PAF 21-2 +**Summary:** This page contains the research into the perception component of the PAF21_2 group. -### Perception +- [Sprint 0: Research Samuel Kühnel](#sprint-0-research-samuel-kühnel) + - [Perception](#perception) + - [Obstacle detection](#obstacle-detection) + - [TrafficLightDetection](#trafficlightdetection) + - [Problems and solutions](#problems-and-solutions) + - [Resume](#resume) + - [Perception](#perception-1) + - [Planning](#planning) + +## Perception ### Obstacle detection @@ -26,7 +35,7 @@ - Yellow painted traffic lights distort traffic light phase detection → **Solution**: Filter out red and green sections beforehand using masks and convert remaining image to grayscale and add masks again. - **Problem without solution**: European traffic lights can sometimes not be recognized at the stop line. -## Resumee +## Resume ### Perception diff --git a/doc/03_research/02_perception/05-autoware-perception.md b/doc/research/paf23/perception/autoware-perception.md similarity index 70% rename from doc/03_research/02_perception/05-autoware-perception.md rename to doc/research/paf23/perception/autoware-perception.md index fa9f8418..afe99950 100644 --- a/doc/03_research/02_perception/05-autoware-perception.md +++ b/doc/research/paf23/perception/autoware-perception.md @@ -1,8 +1,15 @@ # Autoware Perception +**Summary:** This page contains the research into the perception component of Autoware. + +- [Autoware Perception](#autoware-perception) + - [1.Architecture](#1architecture) + - [2.Detection Mechanisms](#2detection-mechanisms) + - [3. Conclusion](#3-conclusion) + ## 1.Architecture -![image](https://github.com/una-auxme/paf23/assets/102369315/6b3fb964-e650-442a-a674-8e0471d931a9) +![image](https://github.com/una-auxme/paf/assets/102369315/6b3fb964-e650-442a-a674-8e0471d931a9) Focus on: @@ -16,7 +23,7 @@ Focus on: Autowares perception is very complex and uses a variety of mechnaism to gather as much information as possible about the surroundings of the car. -![image](https://github.com/una-auxme/paf23/assets/102369315/23f9699e-85c7-44c6-b9fa-a603dc73afcf) +![image](https://github.com/una-auxme/paf/assets/102369315/23f9699e-85c7-44c6-b9fa-a603dc73afcf) For the perception Autoware mainly uses the following Sensors: diff --git a/doc/03_research/02_perception/06_paf_21_1_perception.md b/doc/research/paf23/perception/paf_21_1_perception.md similarity index 72% rename from doc/03_research/02_perception/06_paf_21_1_perception.md rename to doc/research/paf23/perception/paf_21_1_perception.md index 6d2d0903..fa87bb5f 100644 --- a/doc/03_research/02_perception/06_paf_21_1_perception.md +++ b/doc/research/paf23/perception/paf_21_1_perception.md @@ -1,8 +1,18 @@ # Paf_21_1 - Perception +**Summary:** This page contains the research into the perception component of the PAF21_1 group. + +- [Paf\_21\_1 - Perception](#paf_21_1---perception) + - [1. Architecture](#1-architecture) + - [**Key Features**](#key-features) + - [2. Sensors](#2-sensors) + - [3. Object-Detection](#3-object-detection) + - [4. TrafficLight-Detection](#4-trafficlight-detection) + - [5. Conclusion](#5-conclusion) + ## 1. Architecture -![image](https://github.com/una-auxme/paf23/assets/102369315/07328c78-83d7-425c-802e-8cc49430e6c1) +![image](https://github.com/una-auxme/paf/assets/102369315/07328c78-83d7-425c-802e-8cc49430e6c1) ### **Key Features** diff --git a/doc/03_research/02_perception/04_pylot.md b/doc/research/paf23/perception/pylot.md similarity index 81% rename from doc/03_research/02_perception/04_pylot.md rename to doc/research/paf23/perception/pylot.md index 3b82e29e..69620e39 100644 --- a/doc/03_research/02_perception/04_pylot.md +++ b/doc/research/paf23/perception/pylot.md @@ -1,10 +1,16 @@ # Pylot - Perception -**Authors:** Maximilian Jannack - -**Date:** 12.11.2023 - ---- +**Summary:** This page contains the research into the perception component of pylot. + +- [Pylot - Perception](#pylot---perception) + - [Detection](#detection) + - [Obstacle detection](#obstacle-detection) + - [Traffic light detection](#traffic-light-detection) + - [Lane detection](#lane-detection) + - [Obstacle Tracking](#obstacle-tracking) + - [Depth Estimation](#depth-estimation) + - [Segmentation](#segmentation) + - [Lidar](#lidar) ## [Detection](https://pylot.readthedocs.io/en/latest/perception.detection.html) diff --git a/doc/03_research/03_planning/00_paf23/04_Local_planning_for_first_milestone.md b/doc/research/paf23/planning/Local_planning_for_first_milestone.md similarity index 77% rename from doc/03_research/03_planning/00_paf23/04_Local_planning_for_first_milestone.md rename to doc/research/paf23/planning/Local_planning_for_first_milestone.md index 431ffee9..a212dceb 100644 --- a/doc/03_research/03_planning/00_paf23/04_Local_planning_for_first_milestone.md +++ b/doc/research/paf23/planning/Local_planning_for_first_milestone.md @@ -2,21 +2,15 @@ **Summary:** This document states the implementation plan for the local planning. ---- - -## Author - -Julius Miller - -## Date - -03.12.2023 +- [Local Planning for first milestone](#local-planning-for-first-milestone) + - [Research](#research) + - [New Architecture for first milestone](#new-architecture-for-first-milestone) ## Research Paper: [Behavior Planning for Autonomous Driving: Methodologies, Applications, and Future Orientation](https://www.researchgate.net/publication/369181112_Behavior_Planning_for_Autonomous_Driving_Methodologies_Applications_and_Future_Orientation) -![Overview_interfaces](../../../00_assets/planning/overview_paper1.png) +![Overview_interfaces](../../../assets/planning/overview_paper1.png) Rule-based planning @@ -49,7 +43,7 @@ Leader, Track-Speed Github: [Decision Making with Behaviour Tree](https://github.com/kirilcvetkov92/Path-planning?source=post_page-----8db1575fec2c--------------------------------) -![github_tree](../../../00_assets/planning/BehaviorTree_medium.png) +![github_tree](../../../assets/planning/BehaviorTree_medium.png) - No Intersection - Collision Detection in behaviour Tree @@ -58,7 +52,7 @@ Paper: [Behavior Trees for decision-making in Autonomous Driving](https://www.diva-portal.org/smash/get/diva2:907048/FULLTEXT01.pdf) -![Behaviour Tree](../../../00_assets/planning/BT_paper.png) +![Behaviour Tree](../../../assets/planning/BT_paper.png) - simple simulation - Car only drives straight @@ -81,17 +75,17 @@ Low Level Decision: - Emergency Brake - ACC -![localplan](../../../00_assets/planning/localplan.png) +![localplan](../../../assets/planning/localplan.png) Scenarios: -![Intersection](../../../00_assets/planning/intersection_scenario.png) +![Intersection](../../../assets/planning/intersection_scenario.png) Left: Behaviour Intersection is triggered for motion planning, acc publishes speed. -> Lower speed is used to approach intersection Right: Behaviour Intersection is used for motion planning, acc is ignored (no object in front) -![Overtake](../../../00_assets/planning/overtaking_scenario.png) +![Overtake](../../../assets/planning/overtaking_scenario.png) Left: Overtake gets triggered to maintain speed, acc is ignored diff --git a/doc/03_research/03_planning/00_paf23/03_PlannedArchitecture.md b/doc/research/paf23/planning/PlannedArchitecture.md similarity index 83% rename from doc/03_research/03_planning/00_paf23/03_PlannedArchitecture.md rename to doc/research/paf23/planning/PlannedArchitecture.md index 172cb9ca..e58578d2 100644 --- a/doc/03_research/03_planning/00_paf23/03_PlannedArchitecture.md +++ b/doc/research/paf23/planning/PlannedArchitecture.md @@ -1,10 +1,19 @@ # Planned Architecture -Provide an overview for a possible planning architecture consisting of Global Planner, Local Planner and Decision Making. +**Summary:** Provide an overview for a possible planning architecture consisting of Global Planner, Local Planner and Decision Making. + +- [Planned Architecture](#planned-architecture) + - [Overview](#overview) + - [Components](#components) + - [Global Plan](#global-plan) + - [Decision Making](#decision-making) + - [Local Plan](#local-plan) + - [Interfaces](#interfaces) + - [Prioritisation](#prioritisation) ## Overview -![overview](../../../00_assets/planning/overview.png) +![overview](../../../assets/planning/overview.png) The **Global Plan** gathers all data relevant to build a copy of the town the car is driving in. It also computes an optimal global path, which includes all waypoints. The Decision Making can order a recalculation of the global path. @@ -19,7 +28,7 @@ Motions like lane changing must be approved by the decision making and they get ### Global Plan -![overview](../../../00_assets/planning/Globalplan.png) +![overview](../../../assets/planning/Globalplan.png) *Map Generator:* Gathers map data from Carla and prepares it for the PrePlanner @@ -69,7 +78,7 @@ See Behaviour Tree. ### Local Plan -![Local Plan](../../../00_assets/planning/localplan.png) +![Local Plan](../../../assets/planning/localplan.png) *Local Preplan:* Segements the global path and calculates the middle of the lane. Is not called in every cycle. @@ -128,4 +137,4 @@ See Behaviour Tree. Red must have for next Milestone, Orange needed for future milestones, Green can already be used or is not that important -![prios](../../../00_assets/planning/prios.png) +![prios](../../../assets/planning/prios.png) diff --git a/doc/03_research/03_planning/00_paf23/01_Planning.md b/doc/research/paf23/planning/Planning.md similarity index 87% rename from doc/03_research/03_planning/00_paf23/01_Planning.md rename to doc/research/paf23/planning/Planning.md index a36283ac..28ca5fb0 100644 --- a/doc/03_research/03_planning/00_paf23/01_Planning.md +++ b/doc/research/paf23/planning/Planning.md @@ -1,12 +1,21 @@ # Planning +**Summary:** This page contains research into the planning component of the PAF21_2 group. + +- [Planning](#planning) + - [What is Planning?](#what-is-planning) + - [PAF21 - 2](#paf21---2) + - [Autoware](#autoware) + - [Resumee](#resumee) + - [Notes](#notes) + ## What is Planning? Finding the optimal path from start to goal, taking into account the static and dynamic conditions and transfering a suitable trajectory to the acting system ### [PAF21 - 2](https://github.com/ll7/paf21-2) -![Planning](../../../00_assets/planning/Planning_paf21.png) +![Planning](../../../assets/planning/Planning_paf21.png) Input: @@ -55,7 +64,7 @@ Map Manager ### [Autoware](https://github.com/autowarefoundation/autoware) -![https://autowarefoundation.github.io/autoware-documentation/main/design/autoware-architecture/planning/](../../../00_assets/planning/Planning.png) +![https://autowarefoundation.github.io/autoware-documentation/main/design/autoware-architecture/planning/](../../../assets/planning/Planning.png) Input: diff --git a/doc/03_research/03_planning/00_paf23/02_PlanningPaf22.md b/doc/research/paf23/planning/PlanningPaf22.md similarity index 68% rename from doc/03_research/03_planning/00_paf23/02_PlanningPaf22.md rename to doc/research/paf23/planning/PlanningPaf22.md index 605605e7..d856b671 100644 --- a/doc/03_research/03_planning/00_paf23/02_PlanningPaf22.md +++ b/doc/research/paf23/planning/PlanningPaf22.md @@ -1,10 +1,24 @@ # Planning in PAF 22 +**Summary:** This page contains research into the planning component of the PAF22 group. + [(Github)](https://github.com/ll7/paf22) +- [Planning in PAF 22](#planning-in-paf-22) + - [Architecture](#architecture) + - [Preplanning](#preplanning) + - [Decision Making](#decision-making) + - [Local path planning](#local-path-planning) + - [Planning documentation](#planning-documentation) + - [Preplanning in code](#preplanning-in-code) + - [Global Plan in code](#global-plan-in-code) + - [Decision Making in code](#decision-making-in-code) + - [Conclusion](#conclusion) + - [What can be done next](#what-can-be-done-next) + ## Architecture -![overview](../../../00_assets/planning/overview.jpg) +![overview](../../../assets/planning/overview.jpg) ### Preplanning diff --git a/doc/03_research/03_planning/00_paf23/09_Research_Pylot_Planning.md b/doc/research/paf23/planning/Research_Pylot_Planning.md similarity index 89% rename from doc/03_research/03_planning/00_paf23/09_Research_Pylot_Planning.md rename to doc/research/paf23/planning/Research_Pylot_Planning.md index e7277d52..30d046aa 100644 --- a/doc/03_research/03_planning/00_paf23/09_Research_Pylot_Planning.md +++ b/doc/research/paf23/planning/Research_Pylot_Planning.md @@ -1,6 +1,9 @@ # Sprint 0: Research Samuel Kühnel -## Pylot +**Summary:** This page contains the research into the planning component of pylot. + +- [Sprint 0: Research Samuel Kühnel](#sprint-0-research-samuel-kühnel) + - [Planning](#planning) ## Planning diff --git a/doc/03_research/03_planning/00_paf23/Testing_frenet_trajectory_planner.md b/doc/research/paf23/planning/Testing_frenet_trajectory_planner.md similarity index 92% rename from doc/03_research/03_planning/00_paf23/Testing_frenet_trajectory_planner.md rename to doc/research/paf23/planning/Testing_frenet_trajectory_planner.md index ea300c31..ccc40903 100644 --- a/doc/03_research/03_planning/00_paf23/Testing_frenet_trajectory_planner.md +++ b/doc/research/paf23/planning/Testing_frenet_trajectory_planner.md @@ -2,15 +2,11 @@ **Summary:** This document summarizes the Frenet Optimal Trajectory planner used in the pylot project ---- - -## Author - -Samuel Kühnel - -## Date - -15.01.2024 +- [Frenet Optimal Trajectory](#frenet-optimal-trajectory) + - [Setup](#setup) + - [Example Usage](#example-usage) + - [Inputs](#inputs) + - [Decision](#decision) ## Setup @@ -33,7 +29,7 @@ A test python file is also located [here](test_traj.py). The below image was gen The orange points represent a possible object and the blue points the old (left) and new (right) trajectory. -![test_trajectory](../../../00_assets/planning/test_frenet_results.png) +![test_trajectory](../../../assets/planning/test_frenet_results.png) ## Inputs diff --git a/doc/03_research/03_planning/00_paf23/08_paf21-1.md b/doc/research/paf23/planning/paf21-1.md similarity index 67% rename from doc/03_research/03_planning/00_paf23/08_paf21-1.md rename to doc/research/paf23/planning/paf21-1.md index 254dd811..7d9f0d4d 100644 --- a/doc/03_research/03_planning/00_paf23/08_paf21-1.md +++ b/doc/research/paf23/planning/paf21-1.md @@ -1,19 +1,15 @@ # Planning in PAF21-1 -**Authors:** Maximilian Jannack - -**Date:** 12.11.2023 - ---- - -In PAF21-1, they divided the planning stage into two major components: +**Summary:** In PAF21-1, they divided the planning stage into two major components: - Global Planner - Local Planner -A more detailed explanation is already present in the [basics](../00_paf22/02_basics.md#paf-2021-1) chapter. +A more detailed explanation is already present in the [basics](../paf22/basics.md#paf-2021-1) chapter. ---- +- [Planning in PAF21-1](#planning-in-paf21-1) + - [Global Planner](#global-planner) + - [Local Planner](#local-planner) ## Global Planner diff --git a/doc/03_research/03_planning/00_paf23/test_traj.py b/doc/research/paf23/planning/test_traj.py similarity index 53% rename from doc/03_research/03_planning/00_paf23/test_traj.py rename to doc/research/paf23/planning/test_traj.py index 97283e6c..1e9edc71 100644 --- a/doc/03_research/03_planning/00_paf23/test_traj.py +++ b/doc/research/paf23/planning/test_traj.py @@ -1,18 +1,22 @@ -from frenet_optimal_trajectory_planner.FrenetOptimalTrajectory.fot_wrapper \ - import run_fot +from frenet_optimal_trajectory_planner.FrenetOptimalTrajectory.fot_wrapper import ( + run_fot, +) import numpy as np import matplotlib.pyplot as plt -wp = wp = np.r_[[np.full((50), 983.5889666959667)], - [np.linspace(5370.016106881272, 5399.016106881272, 50)]].T +wp = wp = np.r_[ + [np.full((50), 983.5889666959667)], + [np.linspace(5370.016106881272, 5399.016106881272, 50)], +].T initial_conditions = { - 'ps': 0, - 'target_speed': 6, - 'pos': np.array([983.5807552562393, 5370.014637890163]), - 'vel': np.array([5, 1]), - 'wp': wp, - 'obs': np.array([[983.568124548765, 5386.0219828457075, - 983.628124548765, 5386.0219828457075]]) + "ps": 0, + "target_speed": 6, + "pos": np.array([983.5807552562393, 5370.014637890163]), + "vel": np.array([5, 1]), + "wp": wp, + "obs": np.array( + [[983.568124548765, 5386.0219828457075, 983.628124548765, 5386.0219828457075]] + ), } hyperparameters = { @@ -39,9 +43,21 @@ "num_threads": 0, # set 0 to avoid using threaded algorithm } -result_x, result_y, speeds, ix, iy, iyaw, d, s, speeds_x, \ - speeds_y, misc, costs, success = run_fot(initial_conditions, - hyperparameters) +( + result_x, + result_y, + speeds, + ix, + iy, + iyaw, + d, + s, + speeds_x, + speeds_y, + misc, + costs, + success, +) = run_fot(initial_conditions, hyperparameters) if success: print("Success!") @@ -50,12 +66,18 @@ fig, ax = plt.subplots(1, 2) ax[0].scatter(wp[:, 0], wp[:, 1], label="original") - ax[0].scatter([983.568124548765, 983.628124548765], - [5386.0219828457075, 5386.0219828457075], label="object") + ax[0].scatter( + [983.568124548765, 983.628124548765], + [5386.0219828457075, 5386.0219828457075], + label="object", + ) ax[0].set_xticks([983.518124548765, 983.598124548765]) ax[1].scatter(result_x, result_y, label="frenet") - ax[1].scatter([983.568124548765, 983.628124548765], - [5386.0219828457075, 5386.0219828457075], label="object") + ax[1].scatter( + [983.568124548765, 983.628124548765], + [5386.0219828457075, 5386.0219828457075], + label="object", + ) ax[1].set_xticks([983.518124548765, 983.598124548765]) plt.legend() plt.show() diff --git a/pc_setup_user.sh b/pc_setup_user.sh index 7d752f35..23b931f9 100755 --- a/pc_setup_user.sh +++ b/pc_setup_user.sh @@ -1,7 +1,4 @@ cd mkdir git cd git -git clone https://github.com/una-auxme/paf23.git - -cd paf23 -./dc-run-file.sh build/docker-compose.yaml \ No newline at end of file +git clone https://github.com/una-auxme/paf.git diff --git a/xhost_enable.sh b/xhost_enable.sh deleted file mode 100755 index e80e040d..00000000 --- a/xhost_enable.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -# enable xhost for the current user to allow docker to display graphics -xhost +local: \ No newline at end of file